diff --git a/.gitattributes b/.gitattributes index e98601997668f313b1d6b5a29dfba6712c3e2326..87f697b11c58374bfc030fb9f0239f4f73a35a96 100644 --- a/.gitattributes +++ b/.gitattributes @@ -77,3 +77,4 @@ MLPY/Library/bin/omptarget.rtl.opencl.dll filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/PyWin32.chm filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/h5py/hdf5.dll filter=lfs diff=lfs merge=lfs -text diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/INSTALLER b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/LICENSE.md b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..6249d60ce6f8a9a2a48a0314150cf0997a4a9962 --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/LICENSE.md @@ -0,0 +1,30 @@ +BSD 3-Clause License + +Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/METADATA b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..516d18d6ed07306adc1279010798a57f36cbd5b0 --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/METADATA @@ -0,0 +1,146 @@ +Metadata-Version: 2.1 +Name: Markdown +Version: 3.6 +Summary: Python implementation of John Gruber's Markdown. +Author: Manfred Stienstra, Yuri Takhteyev +Author-email: Waylan limberg +Maintainer: Isaac Muse +Maintainer-email: Waylan Limberg +License: BSD 3-Clause License + + Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later) + Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) + Copyright 2004 Manfred Stienstra (the original version) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Project-URL: Homepage, https://Python-Markdown.github.io/ +Project-URL: Documentation, https://Python-Markdown.github.io/ +Project-URL: Repository, https://github.com/Python-Markdown/markdown +Project-URL: Issue Tracker, https://github.com/Python-Markdown/markdown/issues +Project-URL: Changelog, https://python-markdown.github.io/changelog/ +Keywords: markdown,markdown-parser,python-markdown,markdown-to-html +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Communications :: Email :: Filters +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries +Classifier: Topic :: Internet :: WWW/HTTP :: Site Management +Classifier: Topic :: Software Development :: Documentation +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Filters +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Topic :: Text Processing :: Markup :: Markdown +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: importlib-metadata >=4.4 ; python_version < "3.10" +Provides-Extra: docs +Requires-Dist: mkdocs >=1.5 ; extra == 'docs' +Requires-Dist: mkdocs-nature >=0.6 ; extra == 'docs' +Requires-Dist: mdx-gh-links >=0.2 ; extra == 'docs' +Requires-Dist: mkdocstrings[python] ; extra == 'docs' +Requires-Dist: mkdocs-gen-files ; extra == 'docs' +Requires-Dist: mkdocs-section-index ; extra == 'docs' +Requires-Dist: mkdocs-literate-nav ; extra == 'docs' +Provides-Extra: testing +Requires-Dist: coverage ; extra == 'testing' +Requires-Dist: pyyaml ; extra == 'testing' + +[Python-Markdown][] +=================== + +[![Build Status][build-button]][build] +[![Coverage Status][codecov-button]][codecov] +[![Latest Version][mdversion-button]][md-pypi] +[![Python Versions][pyversion-button]][md-pypi] +[![BSD License][bsdlicense-button]][bsdlicense] +[![Code of Conduct][codeofconduct-button]][Code of Conduct] + +[build-button]: https://github.com/Python-Markdown/markdown/workflows/CI/badge.svg?event=push +[build]: https://github.com/Python-Markdown/markdown/actions?query=workflow%3ACI+event%3Apush +[codecov-button]: https://codecov.io/gh/Python-Markdown/markdown/branch/master/graph/badge.svg +[codecov]: https://codecov.io/gh/Python-Markdown/markdown +[mdversion-button]: https://img.shields.io/pypi/v/Markdown.svg +[md-pypi]: https://pypi.org/project/Markdown/ +[pyversion-button]: https://img.shields.io/pypi/pyversions/Markdown.svg +[bsdlicense-button]: https://img.shields.io/badge/license-BSD-yellow.svg +[bsdlicense]: https://opensource.org/licenses/BSD-3-Clause +[codeofconduct-button]: https://img.shields.io/badge/code%20of%20conduct-contributor%20covenant-green.svg?style=flat-square +[Code of Conduct]: https://github.com/Python-Markdown/markdown/blob/master/CODE_OF_CONDUCT.md + +This is a Python implementation of John Gruber's [Markdown][]. +It is almost completely compliant with the reference implementation, +though there are a few known issues. See [Features][] for information +on what exactly is supported and what is not. Additional features are +supported by the [Available Extensions][]. + +[Python-Markdown]: https://Python-Markdown.github.io/ +[Markdown]: https://daringfireball.net/projects/markdown/ +[Features]: https://Python-Markdown.github.io#Features +[Available Extensions]: https://Python-Markdown.github.io/extensions + +Documentation +------------- + +```bash +pip install markdown +``` +```python +import markdown +html = markdown.markdown(your_text_string) +``` + +For more advanced [installation] and [usage] documentation, see the `docs/` directory +of the distribution or the project website at . + +[installation]: https://python-markdown.github.io/install/ +[usage]: https://python-markdown.github.io/reference/ + +See the change log at . + +Support +------- + +You may report bugs, ask for help, and discuss various other issues on the [bug tracker][]. + +[bug tracker]: https://github.com/Python-Markdown/markdown/issues + +Code of Conduct +--------------- + +Everyone interacting in the Python-Markdown project's code bases, issue trackers, +and mailing lists is expected to follow the [Code of Conduct]. diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/RECORD b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b461d77a73baeb3c467e7910633600e33f5a9f19 --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/RECORD @@ -0,0 +1,74 @@ +../../Scripts/markdown_py.exe,sha256=gjsXkYARbEnNiU7AO0QnizX91GeRgvRNt-gE-xGyTy4,108386 +Markdown-3.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Markdown-3.6.dist-info/LICENSE.md,sha256=e6TrbRCzKy0R3OE4ITQDUc27swuozMZ4Qdsv_Ybnmso,1650 +Markdown-3.6.dist-info/METADATA,sha256=8_ETqzTxcOemQXj7ujUabMFcDBDGtsRrccFDr1-XWvc,7040 +Markdown-3.6.dist-info/RECORD,, +Markdown-3.6.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +Markdown-3.6.dist-info/entry_points.txt,sha256=lMEyiiA_ZZyfPCBlDviBl-SiU0cfoeuEKpwxw361sKQ,1102 +Markdown-3.6.dist-info/top_level.txt,sha256=IAxs8x618RXoH1uCqeLLxXsDefJvE_mIibr_M4sOlyk,9 +markdown/__init__.py,sha256=dfzwwdpG9L8QLEPBpLFPIHx_BN056aZXp9xZifTxYIU,1777 +markdown/__main__.py,sha256=innFBxRqwPBNxG1zhKktJji4bnRKtVyYYd30ID13Tcw,5859 +markdown/__meta__.py,sha256=DqtqnYYLznrkvI1G4JalBc4WpgOp48naNoG9zlMWZas,1712 +markdown/__pycache__/__init__.cpython-39.pyc,, +markdown/__pycache__/__main__.cpython-39.pyc,, +markdown/__pycache__/__meta__.cpython-39.pyc,, +markdown/__pycache__/blockparser.cpython-39.pyc,, +markdown/__pycache__/blockprocessors.cpython-39.pyc,, +markdown/__pycache__/core.cpython-39.pyc,, +markdown/__pycache__/htmlparser.cpython-39.pyc,, +markdown/__pycache__/inlinepatterns.cpython-39.pyc,, +markdown/__pycache__/postprocessors.cpython-39.pyc,, +markdown/__pycache__/preprocessors.cpython-39.pyc,, +markdown/__pycache__/serializers.cpython-39.pyc,, +markdown/__pycache__/test_tools.cpython-39.pyc,, +markdown/__pycache__/treeprocessors.cpython-39.pyc,, +markdown/__pycache__/util.cpython-39.pyc,, +markdown/blockparser.py,sha256=j4CQImVpiq7g9pz8wCxvzT61X_T2iSAjXupHJk8P3eA,5728 +markdown/blockprocessors.py,sha256=koY5rq8DixzBCHcquvZJp6x2JYyBGjrwxMWNZhd6D2U,27013 +markdown/core.py,sha256=DyyzDsmd-KcuEp8ZWUKJAeUCt7B7G3J3NeqZqp3LphI,21335 +markdown/extensions/__init__.py,sha256=9z1khsdKCVrmrJ_2GfxtPAdjD3FyMe5vhC7wmM4O9m0,4822 +markdown/extensions/__pycache__/__init__.cpython-39.pyc,, +markdown/extensions/__pycache__/abbr.cpython-39.pyc,, +markdown/extensions/__pycache__/admonition.cpython-39.pyc,, +markdown/extensions/__pycache__/attr_list.cpython-39.pyc,, +markdown/extensions/__pycache__/codehilite.cpython-39.pyc,, +markdown/extensions/__pycache__/def_list.cpython-39.pyc,, +markdown/extensions/__pycache__/extra.cpython-39.pyc,, +markdown/extensions/__pycache__/fenced_code.cpython-39.pyc,, +markdown/extensions/__pycache__/footnotes.cpython-39.pyc,, +markdown/extensions/__pycache__/legacy_attrs.cpython-39.pyc,, +markdown/extensions/__pycache__/legacy_em.cpython-39.pyc,, +markdown/extensions/__pycache__/md_in_html.cpython-39.pyc,, +markdown/extensions/__pycache__/meta.cpython-39.pyc,, +markdown/extensions/__pycache__/nl2br.cpython-39.pyc,, +markdown/extensions/__pycache__/sane_lists.cpython-39.pyc,, +markdown/extensions/__pycache__/smarty.cpython-39.pyc,, +markdown/extensions/__pycache__/tables.cpython-39.pyc,, +markdown/extensions/__pycache__/toc.cpython-39.pyc,, +markdown/extensions/__pycache__/wikilinks.cpython-39.pyc,, +markdown/extensions/abbr.py,sha256=JqFOfU7JlhIFY06-nZnSU0wDqneFKKWMe95eXB-iLtc,3250 +markdown/extensions/admonition.py,sha256=Hqcn3I8JG0i-OPWdoqI189TmlQRgH6bs5PmpCANyLlg,6547 +markdown/extensions/attr_list.py,sha256=t3PrgAr5Ebldnq3nJNbteBt79bN0ccXS5RemmQfUZ9g,7820 +markdown/extensions/codehilite.py,sha256=ChlmpM6S--j-UK7t82859UpYjm8EftdiLqmgDnknyes,13503 +markdown/extensions/def_list.py,sha256=J3NVa6CllfZPsboJCEycPyRhtjBHnOn8ET6omEvVlDo,4029 +markdown/extensions/extra.py,sha256=1vleT284kued4HQBtF83IjSumJVo0q3ng6MjTkVNfNQ,2163 +markdown/extensions/fenced_code.py,sha256=-fYSmRZ9DTYQ8HO9b_78i47kVyVu6mcVJlqVTMdzvo4,8300 +markdown/extensions/footnotes.py,sha256=bRFlmIBOKDI5efG1jZfDkMoV2osfqWip1rN1j2P-mMg,16710 +markdown/extensions/legacy_attrs.py,sha256=oWcyNrfP0F6zsBoBOaD5NiwrJyy4kCpgQLl12HA7JGU,2788 +markdown/extensions/legacy_em.py,sha256=-Z_w4PEGSS-Xg-2-BtGAnXwwy5g5GDgv2tngASnPgxg,1693 +markdown/extensions/md_in_html.py,sha256=y4HEWEnkvfih22fojcaJeAmjx1AtF8N-a_jb6IDFfts,16546 +markdown/extensions/meta.py,sha256=v_4Uq7nbcQ76V1YAvqVPiNLbRLIQHJsnfsk-tN70RmY,2600 +markdown/extensions/nl2br.py,sha256=9KKcrPs62c3ENNnmOJZs0rrXXqUtTCfd43j1_OPpmgU,1090 +markdown/extensions/sane_lists.py,sha256=ogAKcm7gEpcXV7fSTf8JZH5YdKAssPCEOUzdGM3C9Tw,2150 +markdown/extensions/smarty.py,sha256=yqT0OiE2AqYrqqZtcUFFmp2eJsQHomiKzgyG2JFb9rI,11048 +markdown/extensions/tables.py,sha256=oTDvGD1qp9xjVWPGYNgDBWe9NqsX5gS6UU5wUsQ1bC8,8741 +markdown/extensions/toc.py,sha256=PGg-EqbBubm3n0b633r8Xa9kc6JIdbo20HGAOZ6GEl8,18322 +markdown/extensions/wikilinks.py,sha256=j7D2sozica6sqXOUa_GuAXqIzxp-7Hi60bfXymiuma8,3285 +markdown/htmlparser.py,sha256=dEr6IE7i9b6Tc1gdCLZGeWw6g6-E-jK1Z4KPj8yGk8Q,14332 +markdown/inlinepatterns.py,sha256=7_HF5nTOyQag_CyBgU4wwmuI6aMjtadvGadyS9IP21w,38256 +markdown/postprocessors.py,sha256=eYi6eW0mGudmWpmsW45hduLwX66Zr8Bf44WyU9vKp-I,4807 +markdown/preprocessors.py,sha256=pq5NnHKkOSVQeIo-ajC-Yt44kvyMV97D04FBOQXctJM,3224 +markdown/serializers.py,sha256=YtAFYQoOdp_TAmYGow6nBo0eB6I-Sl4PTLdLDfQJHwQ,7174 +markdown/test_tools.py,sha256=MtN4cf3ZPDtb83wXLTol-3q3aIGRIkJ2zWr6fd-RgVE,8662 +markdown/treeprocessors.py,sha256=o4dnoZZsIeVV8qR45Njr8XgwKleWYDS5pv8dKQhJvv8,17651 +markdown/util.py,sha256=vJ1E0xjMzDAlTqLUSJWgdEvxdQfLXDEYUssOQMw9kPQ,13929 diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/WHEEL b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/entry_points.txt b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..be3bd8ff245a5124f9d988c2a63053d307b6d296 --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/entry_points.txt @@ -0,0 +1,22 @@ +[console_scripts] +markdown_py = markdown.__main__:run + +[markdown.extensions] +abbr = markdown.extensions.abbr:AbbrExtension +admonition = markdown.extensions.admonition:AdmonitionExtension +attr_list = markdown.extensions.attr_list:AttrListExtension +codehilite = markdown.extensions.codehilite:CodeHiliteExtension +def_list = markdown.extensions.def_list:DefListExtension +extra = markdown.extensions.extra:ExtraExtension +fenced_code = markdown.extensions.fenced_code:FencedCodeExtension +footnotes = markdown.extensions.footnotes:FootnoteExtension +legacy_attrs = markdown.extensions.legacy_attrs:LegacyAttrExtension +legacy_em = markdown.extensions.legacy_em:LegacyEmExtension +md_in_html = markdown.extensions.md_in_html:MarkdownInHtmlExtension +meta = markdown.extensions.meta:MetaExtension +nl2br = markdown.extensions.nl2br:Nl2BrExtension +sane_lists = markdown.extensions.sane_lists:SaneListExtension +smarty = markdown.extensions.smarty:SmartyExtension +tables = markdown.extensions.tables:TableExtension +toc = markdown.extensions.toc:TocExtension +wikilinks = markdown.extensions.wikilinks:WikiLinkExtension diff --git a/MLPY/Lib/site-packages/Markdown-3.6.dist-info/top_level.txt b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..0918c9768895c23af5ecf35282391d7b12ed301a --- /dev/null +++ b/MLPY/Lib/site-packages/Markdown-3.6.dist-info/top_level.txt @@ -0,0 +1 @@ +markdown diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4700f975c9f76ccf9dec953157a92c549f450cc --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2010 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7354b5a5f91fbb92cf6a126745f51252bae9f0ce --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA @@ -0,0 +1,93 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 2.1.5 +Summary: Safely add untrusted strings to HTML/XML markup. +Home-page: https://palletsprojects.com/p/markupsafe/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://markupsafe.palletsprojects.com/ +Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/markupsafe/ +Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst + +MarkupSafe +========== + +MarkupSafe implements a text object that escapes characters so it is +safe to use in HTML and XML. Characters that have special meanings are +replaced so that they display as the actual characters. This mitigates +injection attacks, meaning untrusted user input can safely be displayed +on a page. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U MarkupSafe + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +Examples +-------- + +.. code-block:: pycon + + >>> from markupsafe import Markup, escape + + >>> # escape replaces special characters and wraps in Markup + >>> escape("") + Markup('<script>alert(document.cookie);</script>') + + >>> # wrap in Markup to mark text "safe" and prevent escaping + >>> Markup("Hello") + Markup('hello') + + >>> escape(Markup("Hello")) + Markup('hello') + + >>> # Markup is a str subclass + >>> # methods and operators escape their arguments + >>> template = Markup("Hello {name}") + >>> template.format(name='"World"') + Markup('Hello "World"') + + +Donate +------ + +The Pallets organization develops and supports MarkupSafe and other +popular packages. In order to grow the community of contributors and +users, and allow the maintainers to devote more time to the projects, +`please donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://markupsafe.palletsprojects.com/ +- Changes: https://markupsafe.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/MarkupSafe/ +- Source Code: https://github.com/pallets/markupsafe/ +- Issue Tracker: https://github.com/pallets/markupsafe/issues/ +- Chat: https://discord.gg/pallets diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ed4c4c583c289927d059381780de6278474bfa1b --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD @@ -0,0 +1,14 @@ +MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503 +MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096 +MarkupSafe-2.1.5.dist-info/RECORD,, +MarkupSafe-2.1.5.dist-info/WHEEL,sha256=GZFS91_ufm4WrNPBaFVPB9MvOXR6bMZQhPcZRRTN5YM,100 +MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290 +markupsafe/__pycache__/__init__.cpython-39.pyc,, +markupsafe/__pycache__/_native.cpython-39.pyc,, +markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776 +markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403 +markupsafe/_speedups.cp39-win_amd64.pyd,sha256=mxpVr1JPAspPHonOfxzkg7mQIKFTQuzl9v95Ejv5zks,15872 +markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238 +markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a314ad34c55629d9d130b421f7e95a9ebfe1c5ac --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: false +Tag: cp39-cp39-win_amd64 + diff --git a/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f --- /dev/null +++ b/MLPY/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/INSTALLER b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/LICENSE.md b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..8e54864f83cd99231b50b6d9739089fc72b1019f --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/LICENSE.md @@ -0,0 +1,34 @@ +The MIT License + +Copyright (c) 2016 OpenAI (https://openai.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +# Mujoco models +This work is derived from [MuJuCo models](http://www.mujoco.org/forum/index.php?resources/) used under the following license: +``` +This file is part of MuJoCo. +Copyright 2009-2015 Roboti LLC. +Mujoco :: Advanced physics simulation engine +Source : www.roboti.us +Version : 1.31 +Released : 23Apr16 +Author :: Vikash Kumar +Contacts : kumar@roboti.us +``` diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/METADATA b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..d2a7cf6cf80cc5ca47ea4e1e1e956441e7446c9c --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/METADATA @@ -0,0 +1,74 @@ +Metadata-Version: 2.1 +Name: gym +Version: 0.26.2 +Summary: Gym: A universal API for reinforcement learning environments +Home-page: https://www.gymlibrary.dev/ +Author: Gym Community +Author-email: jkterry@umd.edu +License: MIT +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: numpy >=1.18.0 +Requires-Dist: cloudpickle >=1.2.0 +Requires-Dist: gym-notices >=0.0.4 +Requires-Dist: importlib-metadata >=4.8.0 ; python_version < "3.10" +Requires-Dist: dataclasses ==0.8 ; python_version == "3.6" +Provides-Extra: accept-rom-license +Requires-Dist: autorom[accept-rom-license] ~=0.4.2 ; extra == 'accept-rom-license' +Provides-Extra: all +Requires-Dist: opencv-python >=3.0 ; extra == 'all' +Requires-Dist: pytest ==7.0.1 ; extra == 'all' +Requires-Dist: swig ==4.* ; extra == 'all' +Requires-Dist: lz4 >=3.1.0 ; extra == 'all' +Requires-Dist: moviepy >=1.0.0 ; extra == 'all' +Requires-Dist: matplotlib >=3.0 ; extra == 'all' +Requires-Dist: pygame ==2.1.0 ; extra == 'all' +Requires-Dist: mujoco ==2.2 ; extra == 'all' +Requires-Dist: ale-py ~=0.8.0 ; extra == 'all' +Requires-Dist: box2d-py ==2.3.5 ; extra == 'all' +Requires-Dist: imageio >=2.14.1 ; extra == 'all' +Requires-Dist: mujoco-py <2.2,>=2.1 ; extra == 'all' +Provides-Extra: atari +Requires-Dist: ale-py ~=0.8.0 ; extra == 'atari' +Provides-Extra: box2d +Requires-Dist: box2d-py ==2.3.5 ; extra == 'box2d' +Requires-Dist: pygame ==2.1.0 ; extra == 'box2d' +Requires-Dist: swig ==4.* ; extra == 'box2d' +Provides-Extra: classic_control +Requires-Dist: pygame ==2.1.0 ; extra == 'classic_control' +Provides-Extra: mujoco +Requires-Dist: mujoco ==2.2 ; extra == 'mujoco' +Requires-Dist: imageio >=2.14.1 ; extra == 'mujoco' +Provides-Extra: mujoco_py +Requires-Dist: mujoco-py <2.2,>=2.1 ; extra == 'mujoco_py' +Provides-Extra: other +Requires-Dist: lz4 >=3.1.0 ; extra == 'other' +Requires-Dist: opencv-python >=3.0 ; extra == 'other' +Requires-Dist: matplotlib >=3.0 ; extra == 'other' +Requires-Dist: moviepy >=1.0.0 ; extra == 'other' +Provides-Extra: testing +Requires-Dist: opencv-python >=3.0 ; extra == 'testing' +Requires-Dist: swig ==4.* ; extra == 'testing' +Requires-Dist: lz4 >=3.1.0 ; extra == 'testing' +Requires-Dist: moviepy >=1.0.0 ; extra == 'testing' +Requires-Dist: matplotlib >=3.0 ; extra == 'testing' +Requires-Dist: pygame ==2.1.0 ; extra == 'testing' +Requires-Dist: mujoco ==2.2 ; extra == 'testing' +Requires-Dist: box2d-py ==2.3.5 ; extra == 'testing' +Requires-Dist: mujoco-py <2.2,>=2.1 ; extra == 'testing' +Requires-Dist: imageio >=2.14.1 ; extra == 'testing' +Requires-Dist: pytest ==7.0.1 ; extra == 'testing' +Provides-Extra: toy_text +Requires-Dist: pygame ==2.1.0 ; extra == 'toy_text' + +[![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://pre-commit.com/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +## Important notice + diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/RECORD b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5e38f3a9db969b6624915260b69ee736f0ae2190 --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/RECORD @@ -0,0 +1,328 @@ +gym-0.26.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +gym-0.26.2.dist-info/LICENSE.md,sha256=xnd64SHov7cV01-85qrl79uo07_1HDEji2uf-OXUF8I,1464 +gym-0.26.2.dist-info/METADATA,sha256=wTE3y-hh_8NEG3piHEnJelZ3mi7Wo3dbQqDYvUu5V8s,3323 +gym-0.26.2.dist-info/RECORD,, +gym-0.26.2.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91 +gym-0.26.2.dist-info/top_level.txt,sha256=i54cWDVQvqCNl9_WjpZD2D79fQxBvJ_-CUdBhqnRHak,4 +gym/__init__.py,sha256=QendnMqH8FTKkvDt5MCIkufcDLTKqRVdQFsz8iiTzC4,1177 +gym/__pycache__/__init__.cpython-39.pyc,, +gym/__pycache__/core.cpython-39.pyc,, +gym/__pycache__/error.cpython-39.pyc,, +gym/__pycache__/logger.cpython-39.pyc,, +gym/__pycache__/version.cpython-39.pyc,, +gym/core.py,sha256=sHXyb5Y3c6-f9a_IfQzoRhOHwsaIFLg0HUz4gxEz1Kk,20620 +gym/envs/__init__.py,sha256=u2xPiKpKAF3bZod2JlwI_iDMGE-Te_-VJcrbLT4KSqg,6961 +gym/envs/__pycache__/__init__.cpython-39.pyc,, +gym/envs/__pycache__/registration.cpython-39.pyc,, +gym/envs/box2d/__init__.py,sha256=6rsZ1qtQ25TtoYZs9zgiiWvUCLXUC1e80_DAa160V5g,202 +gym/envs/box2d/__pycache__/__init__.cpython-39.pyc,, +gym/envs/box2d/__pycache__/bipedal_walker.cpython-39.pyc,, +gym/envs/box2d/__pycache__/car_dynamics.cpython-39.pyc,, +gym/envs/box2d/__pycache__/car_racing.cpython-39.pyc,, +gym/envs/box2d/__pycache__/lunar_lander.cpython-39.pyc,, +gym/envs/box2d/bipedal_walker.py,sha256=Uq3fLdrijY6bYBAsUD2D5abn-PML-eDv8y_-WhCsDI0,31175 +gym/envs/box2d/car_dynamics.py,sha256=SnmhPoftWe0c9HrptbvoGyXfE6sUnqiDqkyvixn-FQE,12147 +gym/envs/box2d/car_racing.py,sha256=n4YJuAh9PZVcRgN4eRoE-XSllf3QP98gHrSYT2zPX_k,28932 +gym/envs/box2d/lunar_lander.py,sha256=z2jZsvIpf9Iix7ORwlxvMEsgnJIcwFZACwbPdA78Ncg,29802 +gym/envs/classic_control/__init__.py,sha256=HQCp0k_nfha3LIe1aUM2OZVc_SfY1RoWIfhxOPtSXak,324 +gym/envs/classic_control/__pycache__/__init__.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/acrobot.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/cartpole.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/continuous_mountain_car.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/mountain_car.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/pendulum.cpython-39.pyc,, +gym/envs/classic_control/__pycache__/utils.cpython-39.pyc,, +gym/envs/classic_control/acrobot.py,sha256=nlBII8tK35P0Or49zn3II3XKuuWRR-wcw3WeklZdZeU,16814 +gym/envs/classic_control/assets/clockwise.png,sha256=l7UO2thl-1ZMdjTMJvtXEogKFocPn4uJtS2J8DdKyak,6992 +gym/envs/classic_control/cartpole.py,sha256=wDqrRXZLeADnZ7JDED3SfZ1h8ZLVMFQTPBhCsE3y8JY,11579 +gym/envs/classic_control/continuous_mountain_car.py,sha256=msyASk4FuU7HglmB08JzVEbyLv8FE636ic0MWPX9g9g,10546 +gym/envs/classic_control/mountain_car.py,sha256=7CM3Pt3aDmga-3TH5dH4zAmWZfgFBoQOhpkbfLU4Cr8,9826 +gym/envs/classic_control/pendulum.py,sha256=qMJNKWmX5iQwPO4v-o6stI7GtoNVgnP943xvSVR7Cx0,9519 +gym/envs/classic_control/utils.py,sha256=QeUG8DfTPe-hlErmdUHiOvlfKTN9xKjsube27dWkNP8,1415 +gym/envs/mujoco/__init__.py,sha256=Qbkvr_i_zV-2BI6V3oNMdyVu9eLjFMG5zUugCjGNWSc,662 +gym/envs/mujoco/__pycache__/__init__.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/ant.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/ant_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/ant_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/half_cheetah.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/half_cheetah_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/half_cheetah_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/hopper.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/hopper_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/hopper_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/humanoid.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/humanoid_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/humanoid_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/humanoidstandup.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/humanoidstandup_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/inverted_double_pendulum.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/inverted_double_pendulum_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/inverted_pendulum.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/inverted_pendulum_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/mujoco_env.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/mujoco_rendering.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/pusher.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/pusher_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/reacher.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/reacher_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/swimmer.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/swimmer_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/swimmer_v4.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/walker2d.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/walker2d_v3.cpython-39.pyc,, +gym/envs/mujoco/__pycache__/walker2d_v4.cpython-39.pyc,, +gym/envs/mujoco/ant.py,sha256=as96YZYAeF_EFZ5rQ8X7Z1yIZOVT18qApyM9jDpxyAw,2400 +gym/envs/mujoco/ant_v3.py,sha256=gZds_ggLP-3bwGSiVfcz25dGmymiRcSTrWHKK8GdppI,5705 +gym/envs/mujoco/ant_v4.py,sha256=p1xIoza6zFFubxepPc5R4aO2-bLnvr_y8-sKOj355IU,19981 +gym/envs/mujoco/assets/ant.xml,sha256=zV-D7w6jWwlp5l02DFus1bdMyu9rJ-RDO1FoxgXj4r4,4934 +gym/envs/mujoco/assets/half_cheetah.xml,sha256=EXl6XWnorJVeicpv3ToAh_HJkAlP2kAaxRQg3htsVJQ,5616 +gym/envs/mujoco/assets/hopper.xml,sha256=7tMN-dmkURkR9Y8--qeu_C0X3newO4ouTiHJYPN4SGI,3028 +gym/envs/mujoco/assets/humanoid.xml,sha256=KN3ODO_OVdnX8aVOtKHyKf2h1GMIKKlYceiuYg42fn8,8866 +gym/envs/mujoco/assets/humanoidstandup.xml,sha256=wTXtvwKntZqDcbokTownpyCqTYz1AvaBV1mOCxEGyzM,8854 +gym/envs/mujoco/assets/inverted_double_pendulum.xml,sha256=V9ElSowPUwDmECsIY5bFpu9MMBktcu18DPrh9rcYe-Y,1953 +gym/envs/mujoco/assets/inverted_pendulum.xml,sha256=LF1UEhH-f0pIfxEwHytMHNwKiODovjtx0FFc2l5yEdQ,1378 +gym/envs/mujoco/assets/point.xml,sha256=0PcXVKCMgvtEIYAcVqvqtF98GGqlX4iAmQmori_1wk8,1815 +gym/envs/mujoco/assets/pusher.xml,sha256=6CKY30405sQk8iDCCvYkey8ehsiCDB72LoG10Rsr_m4,5368 +gym/envs/mujoco/assets/reacher.xml,sha256=jTzTRP2W2xL5OXFuR6hLtnECo0n248DbUZwRJb92_bw,2362 +gym/envs/mujoco/assets/swimmer.xml,sha256=i5uvAEGqBmW2Ldzc3TMt-fl0VJzSoX2KEy7HCYqcP7s,2361 +gym/envs/mujoco/assets/walker2d.xml,sha256=g7NfRgulFEVMA1XptNVDh7CKVKNOghc_XhbcZESXRjU,4285 +gym/envs/mujoco/half_cheetah.py,sha256=o483CgdCkJaGvhER-bEQd8YHa4-wzVr4QKu_PkP98GA,1840 +gym/envs/mujoco/half_cheetah_v3.py,sha256=2932ajJuCkHaFRH0SioL4Z0aUbXuYC6rVh3EtjV-U48,3669 +gym/envs/mujoco/half_cheetah_v4.py,sha256=-fWjBYxi9ZVC-oVVDFV7-kbTuiH4bPIPVEi1Vmvj4Yw,13252 +gym/envs/mujoco/hopper.py,sha256=ncWQPbjJDpGUXBzSJLdVwQDCLtLADrL2NQjxv6Dlq1Y,2026 +gym/envs/mujoco/hopper_v3.py,sha256=SZEkTZmQq2VXx16EEz1ahHP6bqsm-EkiPX4wzayb1Hg,5316 +gym/envs/mujoco/hopper_v4.py,sha256=ixwn-J2tsAUrtInDEMeCijv5m4Yt2nw0QUKR25zKmLg,16025 +gym/envs/mujoco/humanoid.py,sha256=QPQYMhZoosn-oQ4doPv8epN9fmX8ashz6_W4awD-ecg,2800 +gym/envs/mujoco/humanoid_v3.py,sha256=wo6gcsHFbIgT6n6kkKW9Y02kbVQTL-h2VwJhtE_bCJY,6299 +gym/envs/mujoco/humanoid_v4.py,sha256=CxJU0jijOHkL7sMEBzVOlOk1dA4Z44dYvZX6LjYFxE8,27951 +gym/envs/mujoco/humanoidstandup.py,sha256=rk47BuzpgEsTmh6g_ryp6Z_EiTBdOWwDC6QUmWqghwM,2449 +gym/envs/mujoco/humanoidstandup_v4.py,sha256=Mtsl6ogfccrIPOCY7rLb0puMis2gzmmcU-_qlwookKA,21674 +gym/envs/mujoco/inverted_double_pendulum.py,sha256=97d9GOmkxlhgZ-zsrp0y70uGRuBATmAJD_yekUpPhx8,2125 +gym/envs/mujoco/inverted_double_pendulum_v4.py,sha256=mEutmX3hPy5bWUw2RHGwnfgQ_hD5aF8rQZMGLflQo6k,9332 +gym/envs/mujoco/inverted_pendulum.py,sha256=DCmiFbMZe3cAdlCeYdHt-4DKleklhlRzqC-VpCDkDFE,1596 +gym/envs/mujoco/inverted_pendulum_v4.py,sha256=Kd7ZqqtGTWajTyUncs3jMV3MhbfWYZJXT9hJesCC4dg,5736 +gym/envs/mujoco/mujoco_env.py,sha256=478iRgVE0UHaUYUnqb_CelwI9eOcwzajQB7xXBS0Ybo,14450 +gym/envs/mujoco/mujoco_rendering.py,sha256=D-VEgKH5jQ6pqrdFqHJEAtHzIWHTLDH0Sp2miOwdVf8,19629 +gym/envs/mujoco/pusher.py,sha256=34enKGElH1oAJZOTPScZpFhr1vNIjHwKPvOYEMvosfM,2504 +gym/envs/mujoco/pusher_v4.py,sha256=4DxF8fK_zljVzQiFqbtohhdnkbVO-4KW6_kHEd6l7Zc,12160 +gym/envs/mujoco/reacher.py,sha256=vF-WLCP7ZhJV3IPM9-QHPZ2WhkI6FALEbESo4aCYv14,2190 +gym/envs/mujoco/reacher_v4.py,sha256=JIOKZhv7c_zu2-m8CHUNtgpaHW3FnKI6_2_CFjxjXcs,10039 +gym/envs/mujoco/swimmer.py,sha256=9Ewgh-WNiXCLfMIcJc1y0E_yoP7Q6RuhMKIv30iejd4,1676 +gym/envs/mujoco/swimmer_v3.py,sha256=HaJ6_ULgPK7BLv7ogrIo27oVsA9yddB_nau4Fbwtjr0,3877 +gym/envs/mujoco/swimmer_v4.py,sha256=BJJrXhr5OzxHnBS3834PNuPlpODMPVI7nNeyOKwYWcU,11896 +gym/envs/mujoco/walker2d.py,sha256=oB15bkYydBNRiH6W2vVTqqTmUmlZj_R5zO46qKDKrBI,1897 +gym/envs/mujoco/walker2d_v3.py,sha256=QJNcddYeInkiytg1m0wSmA71DzGV7lbc2MIptSU7hKw,4942 +gym/envs/mujoco/walker2d_v4.py,sha256=A24fx0kkRd6Kd31d4CrQxUdHH5ycidGdX4SoM4vBSQE,16500 +gym/envs/registration.py,sha256=e7SRL0MzqGHKELuDWCJ7YNhr8SPPR9a-nStHdhgW_Lw,26208 +gym/envs/toy_text/__init__.py,sha256=_7NggA5dylU3gw97yOr3W3oy_hhKV32obsZNiFfMhsE,211 +gym/envs/toy_text/__pycache__/__init__.cpython-39.pyc,, +gym/envs/toy_text/__pycache__/blackjack.cpython-39.pyc,, +gym/envs/toy_text/__pycache__/cliffwalking.cpython-39.pyc,, +gym/envs/toy_text/__pycache__/frozen_lake.cpython-39.pyc,, +gym/envs/toy_text/__pycache__/taxi.cpython-39.pyc,, +gym/envs/toy_text/__pycache__/utils.cpython-39.pyc,, +gym/envs/toy_text/blackjack.py,sha256=g2VSaI-rlLgrGaqV-je3ya1gfI8SuXaMvObgX6YI9V4,10806 +gym/envs/toy_text/cliffwalking.py,sha256=j2VeUTiA0MEAEFdrD8ejwblrr-lizPt7iqXyx-uB4k8,10940 +gym/envs/toy_text/font/Minecraft.ttf,sha256=vUcxTTAeUP9NEJv_KN_PY3y36xOUVIAlmHi4SIdazGU,14488 +gym/envs/toy_text/frozen_lake.py,sha256=gXZIsM9qIO4YPXHjAvACO1nygyz9rb4rqWs94FRN0yE,13715 +gym/envs/toy_text/img/C2.png,sha256=9wyYJEGGtI5mEUCDjj5NTBWy4aNsnv9my3DQFS9BH_E,6275 +gym/envs/toy_text/img/C3.png,sha256=1J_1vBm4VAyoj3rrEEGj9akKeZuiva10e074bXq7J48,6947 +gym/envs/toy_text/img/C4.png,sha256=zBRpVV6lC82w0dg2Y6sE7zrbzb96sMEvZzenPMC1xGM,7115 +gym/envs/toy_text/img/C5.png,sha256=R7lJ0Q5Lpt4SlP28qRTL6My3AniX6SUlLYZLZuBHsio,7948 +gym/envs/toy_text/img/C6.png,sha256=C71sLmQ6vNb54LCFPeQje_2npwMLnzB7APA2hYchHe8,8488 +gym/envs/toy_text/img/C7.png,sha256=b3wKICB8Lz9SV8mZ8k2sp5YJQNqLyMPja6ZxEpDeXPs,8704 +gym/envs/toy_text/img/C8.png,sha256=g-lUCFiaB1nbMjJSZDdgscjl6Uy9biYBtPeOcMjqeUE,9807 +gym/envs/toy_text/img/C9.png,sha256=CmMb56YlmBt6Qd5cN8oq1RXTRN4dlq0a0TTKlcRrSRw,10427 +gym/envs/toy_text/img/CA.png,sha256=N1OXD5VuQkXngVqPRjHHhM2BuCiYfnqlvUeCgb6KDuk,6621 +gym/envs/toy_text/img/CJ.png,sha256=skFNLPORyMEPUSnwMfNJbSg4BsyHHEKutAe-t3vtc1M,18635 +gym/envs/toy_text/img/CK.png,sha256=vSJH7pzBiL4AlVuO6vGmRwt5MPImA-mKT2QkNlXlmC8,19498 +gym/envs/toy_text/img/CQ.png,sha256=r62ij9z2ODsZ3HvUpCkW1cOW7CnrZuK1Q0gApOuI3Fc,18772 +gym/envs/toy_text/img/CT.png,sha256=RRlaCFWc2J6iKGVpV2ATVDqFVZKnYGqQ15NwWE0L2rE,10454 +gym/envs/toy_text/img/Card.png,sha256=0Vzl3zX6Xr978OiIlJlZ2PabHET-f0hts5TiGMh78wE,44172 +gym/envs/toy_text/img/D2.png,sha256=da-t8bL5Wk8Xp1Jqo19SDbFgvsbjNyICk29UthL12M0,5905 +gym/envs/toy_text/img/D3.png,sha256=VMpHKcypvOZ7m5-4QGbQB7HzQV_DzRNH08dqtuSUBro,6501 +gym/envs/toy_text/img/D4.png,sha256=HYc-U5b4g7AwxQvaRaEUV6LJp5557qzasuZDOTKmjJI,6660 +gym/envs/toy_text/img/D5.png,sha256=q7oTybgDKzOPr3BmacExBv1JVRcnrI9EvtGippADcns,7451 +gym/envs/toy_text/img/D6.png,sha256=7F3CFwhoqUEd0ScvLMsN4CCnnmpQVR2VyDMLo4L2SCE,7931 +gym/envs/toy_text/img/D7.png,sha256=-wvxMG10ahY2cMQL5tsy5DmAWPX3oUZPg2JXHoJEv2g,8138 +gym/envs/toy_text/img/D8.png,sha256=FYFL9Xu1Z55tk2JdytO-_kznx7LDVktsZ7veSQ9LKRI,9027 +gym/envs/toy_text/img/D9.png,sha256=2PKQFm6Kl-eIZjGP-sIxED0V6AZxc5nX1Byf3riKInE,9529 +gym/envs/toy_text/img/DA.png,sha256=ya4Iv8Kiim-xl1vDDP2crGbo-cm015MNnexKo5kr1NU,6252 +gym/envs/toy_text/img/DJ.png,sha256=IpNYdsTQvkugVOvPh75vvGUyG8xp8J9k24keW7gRoe4,18468 +gym/envs/toy_text/img/DK.png,sha256=4_N60pVqvEwIs5vdY6AqE1aQVwIl-bTrzQTCDAK39Bo,19118 +gym/envs/toy_text/img/DQ.png,sha256=nBBmACf8H9aP11-sR5QB4JhJwjLWRosYOu2fOuBk9PE,18375 +gym/envs/toy_text/img/DT.png,sha256=IxIqoSXibft44nu1MUhEy3o-5DwG6UX20DvkEwEMkxw,9546 +gym/envs/toy_text/img/H2.png,sha256=TFvU5-70jkxsp79hGYvJu5b0MXGtjOIIWhAZvd-iubY,5898 +gym/envs/toy_text/img/H3.png,sha256=yxw4ifaB_5tGYWRt00bROCI2eSWchpMUyOmFnYwlHYI,6466 +gym/envs/toy_text/img/H4.png,sha256=NSYYoorjwtXSb3MfWlUb-cNf1AZo72w1lNaO_T5Hks0,6623 +gym/envs/toy_text/img/H5.png,sha256=cYjjSvPwNg2NZbKKm2jRC30j3Qg5ZMTYsFZEu3sc16I,7384 +gym/envs/toy_text/img/H6.png,sha256=fVVYlm3RUg_2IMMP2kg1AG3e2pvcdGjlIXgcX-YoqrI,7884 +gym/envs/toy_text/img/H7.png,sha256=vFUeTuAm4d-r7PR_8rJSY9IejPYorWtoeRUH6tPk0vQ,8041 +gym/envs/toy_text/img/H8.png,sha256=sx-YQj--5cE1CqfTpux3ajT18oturlTBph5tW_e_M2I,8882 +gym/envs/toy_text/img/H9.png,sha256=1CMKYuAqtecDyQBZpFT1FKRBB-UzSYFtoijsSurfonk,9402 +gym/envs/toy_text/img/HA.png,sha256=QRsYoxkuQvYkNcC3EmDty6lu3id7HWjnyfhH0Z9RmoQ,6870 +gym/envs/toy_text/img/HJ.png,sha256=K35x9y0sd6wDtT9gcSU7McSu5zDn1qMugwh05sa-voU,18394 +gym/envs/toy_text/img/HK.png,sha256=3yxO7n3vcTQgNW5tpdoYRMnny5P6HpdZAaEUEM3VK-A,19561 +gym/envs/toy_text/img/HQ.png,sha256=1YCQpN90NFgHDVQ1qjIyZLE7NcmCpIB941gkgPu44Nk,20159 +gym/envs/toy_text/img/HT.png,sha256=95B-xA3mdAtOWnsG8bcFMqD9MIwV3gX_7tmpDtQmxow,9366 +gym/envs/toy_text/img/S2.png,sha256=yg-eXnvu91_6bC9v6PYjQyk0P5oyAzAWWsM5G_sWaAc,6205 +gym/envs/toy_text/img/S3.png,sha256=590aYJVJ7YG2xM2xO_SzISlfaAF9PjVkBRuhFW1qVUI,6812 +gym/envs/toy_text/img/S4.png,sha256=V4XDRXe_P3oOPu_SCaEnblxRoJOI7n4tJJs1e75wGnE,7012 +gym/envs/toy_text/img/S5.png,sha256=XhFXC9Ip0JaZOhiHXLvAJBDbv55nUmd5xSPsU3he0xY,7809 +gym/envs/toy_text/img/S6.png,sha256=uqTGuNwQZWI8TaWhbMBvOYnjh3A9TelZejUzEeD8xm4,8370 +gym/envs/toy_text/img/S7.png,sha256=OTq2KFJYSxxo2-mGKckX05YZi58LYKJrhRUPysIbJoo,8585 +gym/envs/toy_text/img/S8.png,sha256=MHF4V1ZGJExUJc48uEiai_JMxFTY-AF9TKNffWkx55c,9527 +gym/envs/toy_text/img/S9.png,sha256=3KYaz70QNUlcFXQTFF3YWzKXMuOi7ZMndZh7zx8Z_xI,9931 +gym/envs/toy_text/img/SA.png,sha256=uoiL6MbUk3XwwSZoI5dIiX_BV2iPWPhyWTF2xB0yNhY,6630 +gym/envs/toy_text/img/SJ.png,sha256=TDHnSTRu46hFdCND_DsNSKg6dUyjYvbcLkYOTLly1Qo,18175 +gym/envs/toy_text/img/SK.png,sha256=HzhL8Geb74zF9nHCB9jjJVqGuwhM2aUuuaEci_b7UcQ,19256 +gym/envs/toy_text/img/SQ.png,sha256=Cx4M4lhNkrN32_6rGxaTEORRJQXunGHut_drFNv_16I,19809 +gym/envs/toy_text/img/ST.png,sha256=WUxZ-MvkENMViermMSSZe_IL6tzt131s5IrPlKpXT6k,9816 +gym/envs/toy_text/img/cab_front.png,sha256=0Nrwn6zWHq9Mh7aLAXc6ki9gTUvLdFxig8NzASQ9cE4,891 +gym/envs/toy_text/img/cab_left.png,sha256=YYuXMjckXYVlDUioW77Hk0xgvMVnkzX25VqmJtXtRRE,955 +gym/envs/toy_text/img/cab_rear.png,sha256=6pdRkEyWbII-f-BAKVlhVRmGfY-in6xAnFB3nbjyjjU,892 +gym/envs/toy_text/img/cab_right.png,sha256=YrPjBpt_sWKOiIhSg-mIkehvqRjdgHvcN4aJh5eehYQ,970 +gym/envs/toy_text/img/cookie.png,sha256=iOr9fCE0CYYFxl_PG-VDifsnMo2R0yqwDri9ohdwQCo,3455 +gym/envs/toy_text/img/cracked_hole.png,sha256=s0he1Aoh-UkbycmJCznTIShV16GLShtuutFAa_iHJQo,706 +gym/envs/toy_text/img/elf_down.png,sha256=Ul7368dRAfEEoFAfjAO3bOo-KFvXp2SsVdld_j6UFwI,935 +gym/envs/toy_text/img/elf_left.png,sha256=OwgKcM3Yr8vKeQegWx9cUgGOZaX0RfXCGQLTrE_Orjo,872 +gym/envs/toy_text/img/elf_right.png,sha256=OUmLYwS5-0Go1YdjiMg8nKqC8Fu4AgPrdCuqkg2wN5I,858 +gym/envs/toy_text/img/elf_up.png,sha256=QvMfIK9izBYDeaSs3cERmEEAeoBYdKMj4fN4rIlz3A8,842 +gym/envs/toy_text/img/goal.png,sha256=OK-Obzy173i5Y0OyMtBzvfEPt8cpzDFnumxJPPB5myw,526 +gym/envs/toy_text/img/gridworld_median_bottom.png,sha256=j0P5HIaWjH3DZeR8HY9CU0pIaTs7faNyJ0fMomn88J8,2799 +gym/envs/toy_text/img/gridworld_median_horiz.png,sha256=hQsTdO3JzQydz-eda2vZmroqvPdNWizR-xcg7L7ZDZw,2352 +gym/envs/toy_text/img/gridworld_median_left.png,sha256=M455ykrkviOH7nVGC97NSPRVrg5Q35ZPyhtncJybZyM,2561 +gym/envs/toy_text/img/gridworld_median_right.png,sha256=ji2xG3ViKwPIEJyNhBD7aZJKzUV7Z78fpW0_RvgBqR8,2520 +gym/envs/toy_text/img/gridworld_median_top.png,sha256=C1U2nxpzBjGo4pWbLodGPTU-lz47eR8UqKzl5ybWgIQ,2844 +gym/envs/toy_text/img/gridworld_median_vert.png,sha256=Xx_VjzqZb3KBIGVe_O30nk7aadNjnFm4ONFBJiTmeo4,2594 +gym/envs/toy_text/img/hole.png,sha256=IUOb4ppEe7MbfQ9lneZnDOEJNxEuLfTmQCqsvoYThLI,676 +gym/envs/toy_text/img/hotel.png,sha256=8CXlzHSJvtxAswWTT9Mto7-cVvx9jgFRoJJbmoTEki4,21682 +gym/envs/toy_text/img/ice.png,sha256=v7H1hP_PrdEjYKGZnS-wwQusMsmifcWintCLrBA-vPs,494 +gym/envs/toy_text/img/mountain_bg1.png,sha256=NW5ZK9eAH2w0hMdYNCoZkKdAME_tuA9S-cGqrWm9EuI,651 +gym/envs/toy_text/img/mountain_bg2.png,sha256=9fIylx8LX3q6UMEV8iD1LN8PIDhQh3kjyEnVZreAgxI,643 +gym/envs/toy_text/img/mountain_cliff.png,sha256=7A1AUt9dS7oJHRe5ppuBtGrYFw-OgvpjbYKW4-aQGTk,442 +gym/envs/toy_text/img/mountain_near-cliff1.png,sha256=v2Sa6deBSTv8EOzZ90U3NDS112Lcl-7G6KUMHdSYonw,706 +gym/envs/toy_text/img/mountain_near-cliff2.png,sha256=dzNY-rgAPpXv0eSjKDUsKON_cLHI0FVno-ZnZqgbsOI,704 +gym/envs/toy_text/img/passenger.png,sha256=WBZJcLAoD7_G9CiipYSWta_85HOLu5vI9HMi9bbtLEo,817 +gym/envs/toy_text/img/stool.png,sha256=G00fTpkZhwO8usgSwaSi0cF8XxRyazxnX4TdiByp2Pc,651 +gym/envs/toy_text/img/taxi_background.png,sha256=J8-TimmcWzlCmd6CalW-C_bP26JCPYsgGBtKd94VIfo,2331 +gym/envs/toy_text/taxi.py,sha256=razOQvAB-WQs9Lo8WJ-qWTylvDJFQ9PyWbYOT508Xjw,18318 +gym/envs/toy_text/utils.py,sha256=Nzlmf-De1pYkWZqea40zBd5wqPNnXkfGpai4vBqvzyI,295 +gym/error.py,sha256=Gw3GAKbO6Xb66D9xjGCjOEybNP36W3c2m9CnVWrPA8c,5545 +gym/logger.py,sha256=cx_xMC9-NyzkBtQun7fV6qdHbRFJ7wfpVZQUqqVhHTs,1774 +gym/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +gym/spaces/__init__.py,sha256=-cNhl-TZPMM11GHUgUhfHwbEewSLtBLJuvherQqxhzI,1266 +gym/spaces/__pycache__/__init__.cpython-39.pyc,, +gym/spaces/__pycache__/box.cpython-39.pyc,, +gym/spaces/__pycache__/dict.cpython-39.pyc,, +gym/spaces/__pycache__/discrete.cpython-39.pyc,, +gym/spaces/__pycache__/graph.cpython-39.pyc,, +gym/spaces/__pycache__/multi_binary.cpython-39.pyc,, +gym/spaces/__pycache__/multi_discrete.cpython-39.pyc,, +gym/spaces/__pycache__/sequence.cpython-39.pyc,, +gym/spaces/__pycache__/space.cpython-39.pyc,, +gym/spaces/__pycache__/text.cpython-39.pyc,, +gym/spaces/__pycache__/tuple.cpython-39.pyc,, +gym/spaces/__pycache__/utils.cpython-39.pyc,, +gym/spaces/box.py,sha256=purpTWrFDn2bdXkTqNQi-N2OBFulzubc9gr19uUAf2c,12732 +gym/spaces/dict.py,sha256=9SRWfPHOOgsfvr0iOS-uQhumJMEEWkNnswzan9ot3ZY,10427 +gym/spaces/discrete.py,sha256=j67NZaJ2tJSrxKbiijzm357YpAhutJ3t-sSOx3g6r0A,4476 +gym/spaces/graph.py,sha256=01Je_E66XzYz9Ka0o_sXHhpbbh5RPYKn5U0QUv85L60,9771 +gym/spaces/multi_binary.py,sha256=I8fBfUyUgRR9Z0GsyU1mSAcLZygnG6sDy9lxbCb5KNI,4569 +gym/spaces/multi_discrete.py,sha256=vu2kH_tctNhcj87GcQ3_lR9DQhgR7l7CW6CCIjVcHDE,7519 +gym/spaces/sequence.py,sha256=6J5mM55l2L3nttMKaDI5WFMcCe-JpqAw2-FDKjw-g68,5487 +gym/spaces/space.py,sha256=Bojz1M7xEdwK2bb4x8ipM53dutFlNpvkKf7QKihAUlk,5653 +gym/spaces/text.py,sha256=hEGjmy0or82Nc7V0wUeimHFngnS--vjrTpWssVdVCYQ,7660 +gym/spaces/tuple.py,sha256=O06QB2854c6LR9gU9kHPZmGti1uJm8eb70hqix5Yg34,6382 +gym/spaces/utils.py,sha256=gxp9y0fXAEuMeeFC2iRBJQ8luaMlvxArSxwBjOyGtrc,14940 +gym/utils/__init__.py,sha256=EaNeZtCP0UFa30qZEXUbpisyzgoVDC6ZBuO9yXO1qs4,420 +gym/utils/__pycache__/__init__.cpython-39.pyc,, +gym/utils/__pycache__/colorize.cpython-39.pyc,, +gym/utils/__pycache__/env_checker.cpython-39.pyc,, +gym/utils/__pycache__/ezpickle.cpython-39.pyc,, +gym/utils/__pycache__/passive_env_checker.cpython-39.pyc,, +gym/utils/__pycache__/play.cpython-39.pyc,, +gym/utils/__pycache__/save_video.cpython-39.pyc,, +gym/utils/__pycache__/seeding.cpython-39.pyc,, +gym/utils/__pycache__/step_api_compatibility.cpython-39.pyc,, +gym/utils/colorize.py,sha256=WoTr-TVI1JsFF5mBLlCZXxHcJDvIW5sRsHx9wlIDGxw,974 +gym/utils/env_checker.py,sha256=CP4QA779h3_B5Rs9WMi5Lh1uuWEGwhnWkavhPA6mBEY,12606 +gym/utils/ezpickle.py,sha256=WX6c9Bh8VSaIjpE3UnGlQJPiB-E20yA77leAEqt4eTo,1354 +gym/utils/passive_env_checker.py,sha256=9JkI4FyhA_8_MJIeL58zfyp2TOz5casCxNH9xNkfUzM,15046 +gym/utils/play.py,sha256=YUsnkfT7hKtaT1PRvaYqAtexg10MdUSAoHjRYasUr-w,15289 +gym/utils/save_video.py,sha256=_HYDr4ZDdL532Nt8l1V2eCq2xC2UOw9yrngDbXFVNJQ,4191 +gym/utils/seeding.py,sha256=VaoTZdBC0C0EyxzBxQJ3HlogWnzKjug1AwRpBrhLOuc,911 +gym/utils/step_api_compatibility.py,sha256=0VqS09ZFtZx2e8ckk9HXEVyy-fqfqOTJHYU5v0sGAQs,6522 +gym/vector/__init__.py,sha256=gSMxSEst5eEWI2WeolxjRn8QwEkU78PBhci6AsxvwQM,3023 +gym/vector/__pycache__/__init__.cpython-39.pyc,, +gym/vector/__pycache__/async_vector_env.cpython-39.pyc,, +gym/vector/__pycache__/sync_vector_env.cpython-39.pyc,, +gym/vector/__pycache__/vector_env.cpython-39.pyc,, +gym/vector/async_vector_env.py,sha256=__2MqiuJs2pu7rlNAeyYJKSsv6HE_tQLlaGJqMXpiXw,27608 +gym/vector/sync_vector_env.py,sha256=iwyVvOSOz3iOqzqCZG3Lch86Tbi-g8qkbkLMyfFKiJw,8760 +gym/vector/utils/__init__.py,sha256=NoUKGlOpyMEJkwotHhGazEG1D3yQWmqamvAjkGvoVBk,727 +gym/vector/utils/__pycache__/__init__.cpython-39.pyc,, +gym/vector/utils/__pycache__/misc.cpython-39.pyc,, +gym/vector/utils/__pycache__/numpy_utils.cpython-39.pyc,, +gym/vector/utils/__pycache__/shared_memory.cpython-39.pyc,, +gym/vector/utils/__pycache__/spaces.cpython-39.pyc,, +gym/vector/utils/misc.py,sha256=hVYMdlKhe7KZFFN5Xb7PdXlmbF6tNBnv9rF-p1O_5Y0,1587 +gym/vector/utils/numpy_utils.py,sha256=IjOI77MfwCnKq4mZR3mIqum722at89F05jYRw91150k,4477 +gym/vector/utils/shared_memory.py,sha256=evhgy_tqytAgkg2vkpglFZ9ao0SGy9HP168WEggIXwA,6522 +gym/vector/utils/spaces.py,sha256=QbPIQCaD6yXCO3yOwd8jVlB_uL5hbnHL0cMclWNZL7g,6434 +gym/vector/vector_env.py,sha256=SsP-9dqNRSGw9BdUcC_5WLd7RV_h9rCrYwY0Ec_rilE,11487 +gym/version.py,sha256=b9WD5a0aSxu_AnX_gU8K_lhu0F71oDanIfC9gAc0zME,19 +gym/wrappers/__init__.py,sha256=y4QmYHEWl-_Kopd9Hrk-7_xeZFfiZtQHz108p96hv0U,1356 +gym/wrappers/__pycache__/__init__.cpython-39.pyc,, +gym/wrappers/__pycache__/atari_preprocessing.cpython-39.pyc,, +gym/wrappers/__pycache__/autoreset.cpython-39.pyc,, +gym/wrappers/__pycache__/clip_action.cpython-39.pyc,, +gym/wrappers/__pycache__/compatibility.cpython-39.pyc,, +gym/wrappers/__pycache__/env_checker.cpython-39.pyc,, +gym/wrappers/__pycache__/filter_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/flatten_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/frame_stack.cpython-39.pyc,, +gym/wrappers/__pycache__/gray_scale_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/human_rendering.cpython-39.pyc,, +gym/wrappers/__pycache__/normalize.cpython-39.pyc,, +gym/wrappers/__pycache__/order_enforcing.cpython-39.pyc,, +gym/wrappers/__pycache__/pixel_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/record_episode_statistics.cpython-39.pyc,, +gym/wrappers/__pycache__/record_video.cpython-39.pyc,, +gym/wrappers/__pycache__/render_collection.cpython-39.pyc,, +gym/wrappers/__pycache__/rescale_action.cpython-39.pyc,, +gym/wrappers/__pycache__/resize_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/step_api_compatibility.cpython-39.pyc,, +gym/wrappers/__pycache__/time_aware_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/time_limit.cpython-39.pyc,, +gym/wrappers/__pycache__/transform_observation.cpython-39.pyc,, +gym/wrappers/__pycache__/transform_reward.cpython-39.pyc,, +gym/wrappers/__pycache__/vector_list_info.cpython-39.pyc,, +gym/wrappers/atari_preprocessing.py,sha256=z-8lsNaQsIdJ_ptymLMl4ADU3xbAoJpDGh28-6BWAr0,7860 +gym/wrappers/autoreset.py,sha256=s2kp5MUukJfQWgeeAMOjMKfN4vW0hP3sGPXOVBK7m3o,3131 +gym/wrappers/clip_action.py,sha256=H9qSsk9-tJR7H_Kj0io3evVpIEYrH5bNx2gc7CXA0Ek,1155 +gym/wrappers/compatibility.py,sha256=2COFfl32HA7-QhoyHnmpWCwW95jzpg6h8D-oQlOamEs,4288 +gym/wrappers/env_checker.py,sha256=OvQf8jKQKUSqmlBMSPinxrsP3t9hzjSqsPw_SyYLAJU,2306 +gym/wrappers/filter_observation.py,sha256=NO6xDswxPcXQlz3sd3ecrWzth2lAh4fiai7FvW8KWF4,3435 +gym/wrappers/flatten_observation.py,sha256=ZQypJt-mcElXJ623hsiJNOsCYakILuQFSaUyvyog77s,1092 +gym/wrappers/frame_stack.py,sha256=4NVm3OkT33GxUUyYYI5prnytdmt_vrwt8VYWpyeiaaY,6322 +gym/wrappers/gray_scale_observation.py,sha256=z7z9itOr9EAM9ZBlSJqpHXRi71R9RCvzQVkcC3lt7Oo,2079 +gym/wrappers/human_rendering.py,sha256=uLcPNKyB8Lrj2zllEzSS2R6tuAfloqjpaUCbhiBZ4uc,5051 +gym/wrappers/monitoring/__init__.py,sha256=6VeU4vURi21vtR8ZEHzFISkwREAxdkcNTzcaD-5n6K8,44 +gym/wrappers/monitoring/__pycache__/__init__.cpython-39.pyc,, +gym/wrappers/monitoring/__pycache__/video_recorder.cpython-39.pyc,, +gym/wrappers/monitoring/video_recorder.py,sha256=KC9pja5QT1bMgK87ZGL74JAlEChEAgl6YRKfl1RK7Qc,6362 +gym/wrappers/normalize.py,sha256=sU0HZzx98fAxycc4jwD2KA68URLrCtqnK6f_is_tSyM,5712 +gym/wrappers/order_enforcing.py,sha256=2IexuLdgQGF_fYYCNfSAxGv6_uao_X9A2LjPvM8qv34,2158 +gym/wrappers/pixel_observation.py,sha256=nQN7yryqAwKSp-1YLPZX2KDrKK5esIRMLNCr4JiqKko,8049 +gym/wrappers/record_episode_statistics.py,sha256=ItmZCYs0Me506kVLTreRceVvwRtiyfmWtryROcYzYI8,5650 +gym/wrappers/record_video.py,sha256=LrExuNqOfEsg98TnzX9GMk9nF1clEf42euk-W7hzktM,8310 +gym/wrappers/render_collection.py,sha256=8zfSBRJvuVeXlpXUQ0eweKxPlBk6jppZkF5pCuRVbj0,1804 +gym/wrappers/rescale_action.py,sha256=2G8Vfj6dPK8JXo1LqYDQArjkXix1xXvLTj_IJ-XLkaw,3100 +gym/wrappers/resize_observation.py,sha256=_Z1ZddQZPM1vd7SbVn7WNz2udhqOz9tPU7BI2ykJbq8,2399 +gym/wrappers/step_api_compatibility.py,sha256=dwV-B2UqfN3dPRXbwN6DUYXQdJxGgf4bZUWly3xDl3o,2649 +gym/wrappers/time_aware_observation.py,sha256=mh7l_3253g_t_wrwfJL79TzM0l2qusMbxFZJDWHcwlY,2402 +gym/wrappers/time_limit.py,sha256=tCOfnouPIhdWwUAs0AiWGQpngQLw5XYEDqKHqqurrDY,2527 +gym/wrappers/transform_observation.py,sha256=PzuspuqGYYqVj3U4wDPcXyS3h8WfOxr13LVEhkb4BF0,1672 +gym/wrappers/transform_reward.py,sha256=v82_2Z6X8B47EKitj2-ONJvzEQJOvO0QmJRJfkYLcGU,1332 +gym/wrappers/vector_list_info.py,sha256=ED9NOju0-8qLtfNCKWuzVxZlQz2H1-ABld7X22FMDUI,3821 diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/WHEEL b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ecaf39f3c3df8b0075a2951da9b1a27fcb08a173 --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (71.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/gym-0.26.2.dist-info/top_level.txt b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..1e6c2dd43fd01404d4f7beb122961c70d52e4c2f --- /dev/null +++ b/MLPY/Lib/site-packages/gym-0.26.2.dist-info/top_level.txt @@ -0,0 +1 @@ +gym diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/INSTALLER b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/LICENSE.txt b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..335ea9d070ad1c319906aeff798584ded23c7387 --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2018 The Python Packaging Authority + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/METADATA b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..cafef6b612d942dd2ccc72e8b977680d967c61d3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/METADATA @@ -0,0 +1,16 @@ +Metadata-Version: 2.1 +Name: gym-notices +Version: 0.0.8 +Summary: Notices for gym +Home-page: https://github.com/Farama-Foundation/gym-notices +Author: Jordan Terry +Author-email: jkterry0@farama.org +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Description-Content-Type: text/markdown +License-File: LICENSE.txt + +# Gym Notices + +This repository hosts notices for Gym that may be displayed on import on internet connected systems, in order to give notices if versions have major reproducibility issues, are very old and need to be upgraded (e.g. there's been issues with researchers using 4 year old versions of Gym for no reason), or other similar issues. If you're using a current version of Gym and nothing extraordinary happens, you'll never see a message from this, but I want start including the option to prevent future issues. By pulling the error messages from a public git repository, there's absolute transparency and versioning in the process. diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/RECORD b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b1507a1090523783d0364d028eb85a7f73517b58 --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/RECORD @@ -0,0 +1,10 @@ +gym_notices-0.0.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +gym_notices-0.0.8.dist-info/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073 +gym_notices-0.0.8.dist-info/METADATA,sha256=Iptgc7IOCtz58uh5cVp908nDruGm-SgPjpgYRx0JMeQ,1049 +gym_notices-0.0.8.dist-info/RECORD,, +gym_notices-0.0.8.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +gym_notices-0.0.8.dist-info/top_level.txt,sha256=KgY59gG5P771aCK2MTCI072fc7_zLeKVHfrIPeQ6Nuk,12 +gym_notices/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +gym_notices/__pycache__/__init__.cpython-39.pyc,, +gym_notices/__pycache__/notices.cpython-39.pyc,, +gym_notices/notices.py,sha256=U4ivMmQLy2sJnWMtDZDsyOVfT2-wLKnXsF_7hIT7zq4,630 diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/WHEEL b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/top_level.txt b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbeea1bdfbf5b1f816cd9b444baa68a4e0d14394 --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices-0.0.8.dist-info/top_level.txt @@ -0,0 +1 @@ +gym_notices diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/LICENSE b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..88904fa19777d29012cd84f560de8c735ecc0fe2 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2008 Andrew Collette and contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/METADATA b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e38473e67ca9c98732f38ec5e5a27fe66466b424 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/METADATA @@ -0,0 +1,51 @@ +Metadata-Version: 2.1 +Name: h5py +Version: 3.11.0 +Summary: Read and write HDF5 files from Python +Author-email: Andrew Collette +Maintainer-email: Thomas Kluyver , Thomas A Caswell +License: BSD-3-Clause +Project-URL: Homepage, https://www.h5py.org/ +Project-URL: Source, https://github.com/h5py/h5py +Project-URL: Documentation, https://docs.h5py.org/en/stable/ +Project-URL: Release notes, https://docs.h5py.org/en/stable/whatsnew/index.html +Project-URL: Discussion forum, https://forum.hdfgroup.org/c/hdf-tools/h5py +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Database +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: numpy >=1.17.3 + +The h5py package provides both a high- and low-level interface to the HDF5 +library from Python. The low-level interface is intended to be a complete +wrapping of the HDF5 API, while the high-level component supports access to +HDF5 files, datasets and groups using established Python and NumPy concepts. + +A strong emphasis on automatic conversion between Python (Numpy) datatypes and +data structures and their HDF5 equivalents vastly simplifies the process of +reading and writing data from Python. + +Wheels are provided for several popular platforms, with an included copy of +the HDF5 library (usually the latest version when h5py is released). + +You can also `build h5py from source +`_ +with any HDF5 stable release from version 1.10.4 onwards, although naturally new +HDF5 versions released after this version of h5py may not work. +Odd-numbered minor versions of HDF5 (e.g. 1.13) are experimental, and may not +be supported. diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/RECORD b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6666564b320ae4ced964f59fbb401a7b9ab69200 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/RECORD @@ -0,0 +1,150 @@ +h5py-3.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +h5py-3.11.0.dist-info/LICENSE,sha256=t3O8fPA4Y3p_2Ah2SMwo4F8PJNU3cNGCcUekCh0mirU,1550 +h5py-3.11.0.dist-info/METADATA,sha256=73yb6nTQ_Jj7V3gb5LtgY3t63hDIecokX0BVPQDDu4Y,2525 +h5py-3.11.0.dist-info/RECORD,, +h5py-3.11.0.dist-info/WHEEL,sha256=Z6c-bE0pUM47a70GvqO_SvH_XXU0lm62gEAKtoNJ08A,100 +h5py-3.11.0.dist-info/top_level.txt,sha256=fO7Bsaa0F3Nx6djErCCbSw4-E7rBFMWrBVTGLEMxUMg,5 +h5py/__init__.py,sha256=_rrHccfmxYOX14DlmFP9jyloa1IUTlg2FhuRIXN27ZE,3777 +h5py/__pycache__/__init__.cpython-39.pyc,, +h5py/__pycache__/h5py_warnings.cpython-39.pyc,, +h5py/__pycache__/ipy_completer.cpython-39.pyc,, +h5py/__pycache__/version.cpython-39.pyc,, +h5py/_conv.cp39-win_amd64.pyd,sha256=8gHwNboJkT27xGk5WYvbpJd4_4w1ed3xp5gYTSZbD1g,177664 +h5py/_errors.cp39-win_amd64.pyd,sha256=M3thJ-HQUdbszgxlR-66d6I4ZQrForAoouSXoLwFM4s,49664 +h5py/_hl/__init__.py,sha256=McVVGnMUXmMKA8GWPD9ie2jqe9qsgZ0rSYKmQIEN7C0,472 +h5py/_hl/__pycache__/__init__.cpython-39.pyc,, +h5py/_hl/__pycache__/attrs.cpython-39.pyc,, +h5py/_hl/__pycache__/base.cpython-39.pyc,, +h5py/_hl/__pycache__/compat.cpython-39.pyc,, +h5py/_hl/__pycache__/dataset.cpython-39.pyc,, +h5py/_hl/__pycache__/datatype.cpython-39.pyc,, +h5py/_hl/__pycache__/dims.cpython-39.pyc,, +h5py/_hl/__pycache__/files.cpython-39.pyc,, +h5py/_hl/__pycache__/filters.cpython-39.pyc,, +h5py/_hl/__pycache__/group.cpython-39.pyc,, +h5py/_hl/__pycache__/selections.cpython-39.pyc,, +h5py/_hl/__pycache__/selections2.cpython-39.pyc,, +h5py/_hl/__pycache__/vds.cpython-39.pyc,, +h5py/_hl/attrs.py,sha256=aKR8KCrno5KmUPifq1s_IxcpJGq0Ut02lEVGx90Ug2M,10499 +h5py/_hl/base.py,sha256=UMTAs1fS9eWeBrTQigPFGfWDgGphuiQlYS9XUR5ealU,16316 +h5py/_hl/compat.py,sha256=cS7DL_7c9YH1bSReQvp05nnm4pRYXalIIMZclczrls8,1417 +h5py/_hl/dataset.py,sha256=ReICJ6OfHfBOXX2TL_rfxuM2Qfqddj0muxxtoYCrL9M,42715 +h5py/_hl/datatype.py,sha256=CKWF_DmX_A9afqf3anXcs0b-yC2p8_mcPIqS3alZB5U,1603 +h5py/_hl/dims.py,sha256=aBBk4muIacpLhl6ra97wZx0v9U75KMBC9yqjm8Ajzq4,5290 +h5py/_hl/files.py,sha256=R1Jp2v88WaAb4Fv6M0lspNt-zY2FNqNrexxy-RrFfp4,24196 +h5py/_hl/filters.py,sha256=R7oVlmlYw5dktSNCn1oLAcXdO1wOxqzkHWX-gm1QkFE,14324 +h5py/_hl/group.py,sha256=GVS1pfX69f3oGV8ziyk79HG0Ze9lJejkRS_xeXBTiBA,31157 +h5py/_hl/selections.py,sha256=oLobyA7ITnNs7-EMAnuFcvJRlddPdWBoL86VDYhHmtA,14910 +h5py/_hl/selections2.py,sha256=BSuUwex1Qfy1wN2Wb3dAD2vpnb8KXEoJRvLKG0GoAaI,2826 +h5py/_hl/vds.py,sha256=9gM-jnGP-Ew4PkChaFUxhuLd9M78sIAbP6o6WEtekmU,9629 +h5py/_objects.cp39-win_amd64.pyd,sha256=4zMktt3J-WG7ux2a6C7SB7v75q5fHgSProeKvpBlyUk,111104 +h5py/_proxy.cp39-win_amd64.pyd,sha256=adj1kSqkU0VIM6KDouNjAGrCFoB59k89iWeLRj6U_hc,37376 +h5py/_selector.cp39-win_amd64.pyd,sha256=OEmTtZhXooxPAdDav6fu_9z5BbkPubX3QRWTg8fEWPU,136704 +h5py/defs.cp39-win_amd64.pyd,sha256=_4uMLxUW8mEqdKiktYdrk2Tzh7DE5CjXNVODePqOXEs,219648 +h5py/h5.cp39-win_amd64.pyd,sha256=J2HmI_p7dKph5trQZurWHhfLMU4W1jGQTQWRC_B7Bs8,91136 +h5py/h5a.cp39-win_amd64.pyd,sha256=J7UhI_BAElQwOJoNUJU3Md7EvawiQelTn5e3KGJhsQA,126976 +h5py/h5ac.cp39-win_amd64.pyd,sha256=YShT4Snr1AtFMhSoPJkkVTgjt3KKgQR1M_DNxwq-8vE,54784 +h5py/h5d.cp39-win_amd64.pyd,sha256=A0z1iGvp70U6_HRZ8iKu3IQBrdBxOiTkOcl4T_tE4WM,248832 +h5py/h5ds.cp39-win_amd64.pyd,sha256=ZXMdctAygHTDonZUnodhRmH9r-PnrLY1qf0r1MLfRp4,79872 +h5py/h5f.cp39-win_amd64.pyd,sha256=PL74nMXgRNrXIb6Xv9v955CBzPKRUpJPjnovzjlQ9I4,140288 +h5py/h5fd.cp39-win_amd64.pyd,sha256=t8vssorafQfN5u5H0s8OeRLopCKIetyZ7IuEA89rhMM,146944 +h5py/h5g.cp39-win_amd64.pyd,sha256=fRzqdQ44ZqQhjPYAI4yk3gVqVw8SkpTjBQZp7fuqKQU,153600 +h5py/h5i.cp39-win_amd64.pyd,sha256=ilaE0ACHzI6GR5l83KuPoO1HgGdRDUPoqWKGfba23PA,57856 +h5py/h5l.cp39-win_amd64.pyd,sha256=RzxwMpI1U9g6DhZSco_RqvhxDg7lXymU2ZzBkIru2KI,106496 +h5py/h5o.cp39-win_amd64.pyd,sha256=ZB1q4328C6db3pCLJQ1L8efUTFPv3E53jnbn06jodfU,125440 +h5py/h5p.cp39-win_amd64.pyd,sha256=UiYb17ZYh0oGgkbne1ikOH2njIopOHdx3W4Fpgzxf2A,400384 +h5py/h5pl.cp39-win_amd64.pyd,sha256=D_SluW0nyvrw5_P0kp9ThD_wdUbmIF-1aavfRbQVvlQ,47104 +h5py/h5py_warnings.py,sha256=YR2DLMumy_qgchAg5_MLJniloT90Dil5_OHftF7MmD8,544 +h5py/h5r.cp39-win_amd64.pyd,sha256=mWxAqep5CphH4N3ldT9-zk9tyzOVgiTTiOq-YABgCRo,69120 +h5py/h5s.cp39-win_amd64.pyd,sha256=d93gbeUw9iqhDTKucqTv4wG14G6cBOFzkeysFw6lxWQ,122368 +h5py/h5t.cp39-win_amd64.pyd,sha256=mkAt0byWBM-nez5MQ_Z5T7Jqg2qP45AsgXdVcwsPVuo,367104 +h5py/h5z.cp39-win_amd64.pyd,sha256=BrYzvfT5j2PkADI3WzJUHN7KC6yzy56vu826Hi4x2fU,51712 +h5py/hdf5.dll,sha256=oBy_qGExAHJujZJ3S4tB3JXaDBQUi-z3v98uYw6-WAo,3458560 +h5py/hdf5_hl.dll,sha256=IcaP_p4xa7xIVv1gj9xk9QBDWIcr7rBDFy4mydYtOsQ,117760 +h5py/ipy_completer.py,sha256=iOKbnKizsvTlbsVU4ArkZHLWLqJfbjsrKMfLKYgQSGo,3889 +h5py/tests/__init__.py,sha256=fLCW7Dm0BboDAVY-KDWdK_GRPPJv-KW7Z31kiKOaCtI,691 +h5py/tests/__pycache__/__init__.cpython-39.pyc,, +h5py/tests/__pycache__/common.cpython-39.pyc,, +h5py/tests/__pycache__/conftest.cpython-39.pyc,, +h5py/tests/__pycache__/test_attribute_create.cpython-39.pyc,, +h5py/tests/__pycache__/test_attrs.cpython-39.pyc,, +h5py/tests/__pycache__/test_attrs_data.cpython-39.pyc,, +h5py/tests/__pycache__/test_base.cpython-39.pyc,, +h5py/tests/__pycache__/test_big_endian_file.cpython-39.pyc,, +h5py/tests/__pycache__/test_completions.cpython-39.pyc,, +h5py/tests/__pycache__/test_dataset.cpython-39.pyc,, +h5py/tests/__pycache__/test_dataset_getitem.cpython-39.pyc,, +h5py/tests/__pycache__/test_dataset_swmr.cpython-39.pyc,, +h5py/tests/__pycache__/test_datatype.cpython-39.pyc,, +h5py/tests/__pycache__/test_dimension_scales.cpython-39.pyc,, +h5py/tests/__pycache__/test_dims_dimensionproxy.cpython-39.pyc,, +h5py/tests/__pycache__/test_dtype.cpython-39.pyc,, +h5py/tests/__pycache__/test_errors.cpython-39.pyc,, +h5py/tests/__pycache__/test_file.cpython-39.pyc,, +h5py/tests/__pycache__/test_file2.cpython-39.pyc,, +h5py/tests/__pycache__/test_file_alignment.cpython-39.pyc,, +h5py/tests/__pycache__/test_file_image.cpython-39.pyc,, +h5py/tests/__pycache__/test_filters.cpython-39.pyc,, +h5py/tests/__pycache__/test_group.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5d_direct_chunk.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5f.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5o.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5p.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5pl.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5t.cpython-39.pyc,, +h5py/tests/__pycache__/test_h5z.cpython-39.pyc,, +h5py/tests/__pycache__/test_objects.cpython-39.pyc,, +h5py/tests/__pycache__/test_ros3.cpython-39.pyc,, +h5py/tests/__pycache__/test_selections.cpython-39.pyc,, +h5py/tests/__pycache__/test_slicing.cpython-39.pyc,, +h5py/tests/common.py,sha256=exoIAp_l0nZhZLXZgO9wGTsN5ys23gpj1QffpREwXIg,8006 +h5py/tests/conftest.py,sha256=OiQZ2XuYVfeikYVoHGIuOkgXgsaKPpbTpfB6rY07eoA,569 +h5py/tests/data_files/__init__.py,sha256=mqVluyvrAZ6HEZH0QAX8D8q-ErxJG_b3roDk1UMh_RU,200 +h5py/tests/data_files/__pycache__/__init__.cpython-39.pyc,, +h5py/tests/data_files/vlen_string_dset.h5,sha256=kA-LrxnT2MTRGTrBrGAZ7nd0AF6FwDCvROZ1ezjSl5M,6304 +h5py/tests/data_files/vlen_string_dset_utc.h5,sha256=hbcoOCuDPB2mFie5ozTiKCLVwcs1n-O6byUmKvRTL2M,169904 +h5py/tests/data_files/vlen_string_s390x.h5,sha256=6pkMaOA3G6-rSWb3UvoTVbO59kNgBm9tna_48bfnTKU,9008 +h5py/tests/test_attribute_create.py,sha256=liDSJwp2td9I6jsEV0xXc6YNdpK4U2NU4zh8cDfQHYA,3085 +h5py/tests/test_attrs.py,sha256=cG5mwHFUwIkIMeJ_AMt3P7eAeJkox2cNQYP1pEdxRWs,9878 +h5py/tests/test_attrs_data.py,sha256=gSUOwHyBwCTmROTP6jiBdUEnssipWindpfyO378unBc,10079 +h5py/tests/test_base.py,sha256=QTxwpyHIJaT1NXEV95ApbEikEtzh5ZqMfD6bSNTUuSw,3962 +h5py/tests/test_big_endian_file.py,sha256=oPoetdkfWeGLSAMpEGagpRQC7VbIZplkU9sLKyxR0OM,1497 +h5py/tests/test_completions.py,sha256=jA2OuWaYhcPr4fAi04j6D3DjgCdY-BIzJquCQHa2Hvo,1525 +h5py/tests/test_dataset.py,sha256=0XdT7wKMPvqk4IXaWBv4a_ohebDfRfNJlE94GRmOKr8,76459 +h5py/tests/test_dataset_getitem.py,sha256=FLWDesUVsY-SsQsnKOY_3gaIhP0znUJbUryHXkgiGU0,19339 +h5py/tests/test_dataset_swmr.py,sha256=XDzSDzCKIXZnoGkRUnin3hjU4nPfIiyJuBBIBj1bMxk,4093 +h5py/tests/test_datatype.py,sha256=Ed8zcEv-0UahPxM4fbF3SVKHdZbrUscOTOM1MVb0Fgw,1047 +h5py/tests/test_dimension_scales.py,sha256=ydkP35UlA6RAjvuqZQe2utP1weWZcTvlnwNn4rm3Rec,8334 +h5py/tests/test_dims_dimensionproxy.py,sha256=mZ008MxVRhtf3WCR_pwLn527VU2NVo-i1WDYAGTF5BI,625 +h5py/tests/test_dtype.py,sha256=3wqa4nYXoKm4fNakkA6WOgeUBSnBYnekkfCAMq4VQiM,18442 +h5py/tests/test_errors.py,sha256=eA68-lYML0_CJNHNB6uH6OuZeITEOXUQ7JYoZ9DmVCI,2331 +h5py/tests/test_file.py,sha256=o1KzdaL1Izi3EX-doGH7BC2d1ckAlzsoKnhBEbDEj9Y,33502 +h5py/tests/test_file2.py,sha256=ejALn_-2rbyEPOo5yqVryY9jPLgw8C_GF2neCBtdbfI,10655 +h5py/tests/test_file_alignment.py,sha256=aA5fB1dzJwyzA5aEagJq8NzxnUBJGvG4grc2JUc_xEo,4509 +h5py/tests/test_file_image.py,sha256=3lmRRQ8Mex358uYSgMVVY-5V8JhTAPZLVbKrRSVRYBo,1451 +h5py/tests/test_filters.py,sha256=SJiYJi9wxN0C34WlQXvEEbxzv_Dxf1Dj9lGOmU0hyt0,3161 +h5py/tests/test_group.py,sha256=QPBihoi5TgM0XnPYmU5DPyhM-gmYOe2JfS9PdgdhLoQ,37660 +h5py/tests/test_h5.py,sha256=tUVpBOgKSVHLlDpu3urVcmaBPMt0vxAr46nIEgL4an4,1261 +h5py/tests/test_h5d_direct_chunk.py,sha256=Bg-C_r5sEzm1P-OfsfE431LlLtcDWdwYFCE6AdiMEns,8073 +h5py/tests/test_h5f.py,sha256=iQGz7YI01xUDBAsFC7ZwZmx6hb7_Q97Zf0cfdnKjZZ4,4127 +h5py/tests/test_h5o.py,sha256=j5cc1FGgSPoQgpxXC-ETSzQ32nEFuZDmKfovzjuk7Mc,529 +h5py/tests/test_h5p.py,sha256=dcpP0FPpsXzTXJNm8nMoWw647MPVwExlgoDzQVlQaOM,7042 +h5py/tests/test_h5pl.py,sha256=qtcjqJy99yo3GWmrRcJUFf5Pl_2rPNNpPkl-SrfnuEM,1859 +h5py/tests/test_h5t.py,sha256=Uhg1w6WfOkPWH0iUBfO5Onlw2O61sUiYvsL0MixSwrs,6770 +h5py/tests/test_h5z.py,sha256=2w_IFh8o2B2kIlXXMq4uc9aEppWUwkAGJX80ogs0Rbs,2004 +h5py/tests/test_objects.py,sha256=MM_-y3vTepZTael-TSNMcvENb0RldXJoj1oztQ48BPM,947 +h5py/tests/test_ros3.py,sha256=ttNSNiiZTvv66Fi6rdTP6c4YVlqxfPj2Njs-806hWE0,2234 +h5py/tests/test_selections.py,sha256=O4ZY5XX5oJDzQayTtgD-f3Hn7kMlZZKUFRVX4RDU42s,4995 +h5py/tests/test_slicing.py,sha256=8OyyTYkOAycRJbJsCjJ5_GDbdDKT4kVDjdwocD66nZY,14290 +h5py/tests/test_vds/__init__.py,sha256=I2uUocf1v8DxK1ox18-rp4bt1rWkCFXLL32K6rtGZcU,107 +h5py/tests/test_vds/__pycache__/__init__.cpython-39.pyc,, +h5py/tests/test_vds/__pycache__/test_highlevel_vds.cpython-39.pyc,, +h5py/tests/test_vds/__pycache__/test_lowlevel_vds.cpython-39.pyc,, +h5py/tests/test_vds/__pycache__/test_virtual_source.cpython-39.pyc,, +h5py/tests/test_vds/test_highlevel_vds.py,sha256=Z1NRsL8M0EkIDu7bcXip8zF6y1Fc84bSBjrsbFDa0ZE,17727 +h5py/tests/test_vds/test_lowlevel_vds.py,sha256=Z5M8YCAIUyu6-cPcsiOL65UT_oD7zuMVuFgBhiFqSmo,12163 +h5py/tests/test_vds/test_virtual_source.py,sha256=YF-uoptLTweL0gAKQaWhBNAgt2DVWH9MetPTgC6Bedw,6199 +h5py/utils.cp39-win_amd64.pyd,sha256=r3bdInG0FhafkHpPtG44XAfLgWLExsOAIlHCAqWvoXg,56832 +h5py/version.py,sha256=SUsE3HYDlhAPzewelasTL_PvVsOQqRnFD2f1PqD604Y,1976 +h5py/zlib.dll,sha256=jgKgB_0sD64jXgwuQF1YO9srBZFi6hotwpe2kLt8N8I,86016 diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/WHEEL b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..b7132af6d27aab726a7499fc58ccd63c206a0a33 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: false +Tag: cp39-cp39-win_amd64 + diff --git a/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5a4eac431789a52d7f7d521a84937511cdc400c --- /dev/null +++ b/MLPY/Lib/site-packages/h5py-3.11.0.dist-info/top_level.txt @@ -0,0 +1 @@ +h5py diff --git a/MLPY/Lib/site-packages/h5py/__init__.py b/MLPY/Lib/site-packages/h5py/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f049f0ed2054acbdf105145f8513b1e7f620835 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/__init__.py @@ -0,0 +1,115 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + This is the h5py package, a Python interface to the HDF5 + scientific data format. +""" + +from warnings import warn as _warn +import atexit + + +# --- Library setup ----------------------------------------------------------- + +# When importing from the root of the unpacked tarball or git checkout, +# Python sees the "h5py" source directory and tries to load it, which fails. +# We tried working around this by using "package_dir" but that breaks Cython. +try: + from . import _errors +except ImportError: + import os.path as _op + if _op.exists(_op.join(_op.dirname(__file__), '..', 'setup.py')): + raise ImportError("You cannot import h5py from inside the install directory.\nChange to another directory first.") + else: + raise + +from . import version + +if version.hdf5_version_tuple != version.hdf5_built_version_tuple: + _warn(("h5py is running against HDF5 {0} when it was built against {1}, " + "this may cause problems").format( + '{0}.{1}.{2}'.format(*version.hdf5_version_tuple), + '{0}.{1}.{2}'.format(*version.hdf5_built_version_tuple) + )) + + +_errors.silence_errors() + +from ._conv import register_converters as _register_converters, \ + unregister_converters as _unregister_converters +_register_converters() +atexit.register(_unregister_converters) + +from .h5z import _register_lzf +_register_lzf() + + +# --- Public API -------------------------------------------------------------- + +from . import h5a, h5d, h5ds, h5f, h5fd, h5g, h5r, h5s, h5t, h5p, h5z, h5pl + +from ._hl import filters +from ._hl.base import is_hdf5, HLObject, Empty +from ._hl.files import ( + File, + register_driver, + unregister_driver, + registered_drivers, +) +from ._hl.group import Group, SoftLink, ExternalLink, HardLink +from ._hl.dataset import Dataset +from ._hl.datatype import Datatype +from ._hl.attrs import AttributeManager +from ._hl.vds import VirtualSource, VirtualLayout + +from ._selector import MultiBlockSlice +from .h5 import get_config +from .h5r import Reference, RegionReference +from .h5t import (special_dtype, check_dtype, + vlen_dtype, string_dtype, enum_dtype, ref_dtype, regionref_dtype, + opaque_dtype, + check_vlen_dtype, check_string_dtype, check_enum_dtype, check_ref_dtype, + check_opaque_dtype, +) +from .h5s import UNLIMITED + +from .version import version as __version__ + + +def run_tests(args=''): + """Run tests with pytest and returns the exit status as an int. + """ + # Lazy-loading of tests package to avoid strong dependency on test + # requirements, e.g. pytest + from .tests import run_tests + return run_tests(args) + + +def enable_ipython_completer(): + """ Call this from an interactive IPython session to enable tab-completion + of group and attribute names. + """ + import sys + if 'IPython' in sys.modules: + ip_running = False + try: + from IPython.core.interactiveshell import InteractiveShell + ip_running = InteractiveShell.initialized() + except ImportError: + # support .attrs. +""" + +import numpy +import uuid + +from .. import h5, h5s, h5t, h5a, h5p +from . import base +from .base import phil, with_phil, Empty, is_empty_dataspace, product +from .datatype import Datatype + + +class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject): + + """ + Allows dictionary-style access to an HDF5 object's attributes. + + These are created exclusively by the library and are available as + a Python attribute at .attrs + + Like Group objects, attributes provide a minimal dictionary- + style interface. Anything which can be reasonably converted to a + Numpy array or Numpy scalar can be stored. + + Attributes are automatically created on assignment with the + syntax .attrs[name] = value, with the HDF5 type automatically + deduced from the value. Existing attributes are overwritten. + + To modify an existing attribute while preserving its type, use the + method modify(). To specify an attribute of a particular type and + shape, use create(). + """ + + def __init__(self, parent): + """ Private constructor. + """ + self._id = parent.id + + @with_phil + def __getitem__(self, name): + """ Read the value of an attribute. + """ + attr = h5a.open(self._id, self._e(name)) + shape = attr.shape + + # shape is None for empty dataspaces + if shape is None: + return Empty(attr.dtype) + + dtype = attr.dtype + + # Do this first, as we'll be fiddling with the dtype for top-level + # array types + htype = h5t.py_create(dtype) + + # NumPy doesn't support top-level array types, so we have to "fake" + # the correct type and shape for the array. For example, consider + # attr.shape == (5,) and attr.dtype == '(3,)f'. Then: + if dtype.subdtype is not None: + subdtype, subshape = dtype.subdtype + shape = attr.shape + subshape # (5, 3) + dtype = subdtype # 'f' + + arr = numpy.zeros(shape, dtype=dtype, order='C') + attr.read(arr, mtype=htype) + + string_info = h5t.check_string_dtype(dtype) + if string_info and (string_info.length is None): + # Vlen strings: convert bytes to Python str + arr = numpy.array([ + b.decode('utf-8', 'surrogateescape') for b in arr.flat + ], dtype=dtype).reshape(arr.shape) + + if arr.ndim == 0: + return arr[()] + return arr + + def get_id(self, name): + """Get a low-level AttrID object for the named attribute. + """ + return h5a.open(self._id, self._e(name)) + + @with_phil + def __setitem__(self, name, value): + """ Set a new attribute, overwriting any existing attribute. + + The type and shape of the attribute are determined from the data. To + use a specific type or shape, or to preserve the type of an attribute, + use the methods create() and modify(). + """ + self.create(name, data=value) + + @with_phil + def __delitem__(self, name): + """ Delete an attribute (which must already exist). """ + h5a.delete(self._id, self._e(name)) + + def create(self, name, data, shape=None, dtype=None): + """ Create a new attribute, overwriting any existing attribute. + + name + Name of the new attribute (required) + data + An array to initialize the attribute (required) + shape + Shape of the attribute. Overrides data.shape if both are + given, in which case the total number of points must be unchanged. + dtype + Data type of the attribute. Overrides data.dtype if both + are given. + """ + name = self._e(name) + + with phil: + # First, make sure we have a NumPy array. We leave the data type + # conversion for HDF5 to perform. + if not isinstance(data, Empty): + data = base.array_for_new_object(data, specified_dtype=dtype) + + if shape is None: + shape = data.shape + elif isinstance(shape, int): + shape = (shape,) + + use_htype = None # If a committed type is given, we must use it + # in the call to h5a.create. + + if isinstance(dtype, Datatype): + use_htype = dtype.id + dtype = dtype.dtype + elif dtype is None: + dtype = data.dtype + else: + dtype = numpy.dtype(dtype) # In case a string, e.g. 'i8' is passed + + original_dtype = dtype # We'll need this for top-level array types + + # Where a top-level array type is requested, we have to do some + # fiddling around to present the data as a smaller array of + # subarrays. + if dtype.subdtype is not None: + + subdtype, subshape = dtype.subdtype + + # Make sure the subshape matches the last N axes' sizes. + if shape[-len(subshape):] != subshape: + raise ValueError("Array dtype shape %s is incompatible with data shape %s" % (subshape, shape)) + + # New "advertised" shape and dtype + shape = shape[0:len(shape)-len(subshape)] + dtype = subdtype + + # Not an array type; make sure to check the number of elements + # is compatible, and reshape if needed. + else: + + if shape is not None and product(shape) != product(data.shape): + raise ValueError("Shape of new attribute conflicts with shape of data") + + if shape != data.shape: + data = data.reshape(shape) + + # We need this to handle special string types. + if not isinstance(data, Empty): + data = numpy.asarray(data, dtype=dtype) + + # Make HDF5 datatype and dataspace for the H5A calls + if use_htype is None: + htype = h5t.py_create(original_dtype, logical=True) + htype2 = h5t.py_create(original_dtype) # Must be bit-for-bit representation rather than logical + else: + htype = use_htype + htype2 = None + + if isinstance(data, Empty): + space = h5s.create(h5s.NULL) + else: + space = h5s.create_simple(shape) + + # For a long time, h5py would create attributes with a random name + # and then rename them, imitating how you can atomically replace + # a file in a filesystem. But HDF5 does not offer atomic replacement + # (you have to delete the existing attribute first), and renaming + # exposes some bugs - see https://github.com/h5py/h5py/issues/1385 + # So we've gone back to the simpler delete & recreate model. + if h5a.exists(self._id, name): + h5a.delete(self._id, name) + + attr = h5a.create(self._id, name, htype, space) + try: + if not isinstance(data, Empty): + attr.write(data, mtype=htype2) + except: + attr.close() + h5a.delete(self._id, name) + raise + attr.close() + + def modify(self, name, value): + """ Change the value of an attribute while preserving its type. + + Differs from __setitem__ in that if the attribute already exists, its + type is preserved. This can be very useful for interacting with + externally generated files. + + If the attribute doesn't exist, it will be automatically created. + """ + with phil: + if not name in self: + self[name] = value + else: + attr = h5a.open(self._id, self._e(name)) + + if is_empty_dataspace(attr): + raise OSError("Empty attributes can't be modified") + + # If the input data is already an array, let HDF5 do the conversion. + # If it's a list or similar, don't make numpy guess a dtype for it. + dt = None if isinstance(value, numpy.ndarray) else attr.dtype + value = numpy.asarray(value, order='C', dtype=dt) + + # Allow the case of () <-> (1,) + if (value.shape != attr.shape) and not \ + (value.size == 1 and product(attr.shape) == 1): + raise TypeError("Shape of data is incompatible with existing attribute") + attr.write(value) + + @with_phil + def __len__(self): + """ Number of attributes attached to the object. """ + # I expect we will not have more than 2**32 attributes + return h5a.get_num_attrs(self._id) + + def __iter__(self): + """ Iterate over the names of attributes. """ + with phil: + + attrlist = [] + def iter_cb(name, *args): + """ Callback to gather attribute names """ + attrlist.append(self._d(name)) + + cpl = self._id.get_create_plist() + crt_order = cpl.get_attr_creation_order() + cpl.close() + if crt_order & h5p.CRT_ORDER_TRACKED: + idx_type = h5.INDEX_CRT_ORDER + else: + idx_type = h5.INDEX_NAME + + h5a.iterate(self._id, iter_cb, index_type=idx_type) + + for name in attrlist: + yield name + + @with_phil + def __contains__(self, name): + """ Determine if an attribute exists, by name. """ + return h5a.exists(self._id, self._e(name)) + + @with_phil + def __repr__(self): + if not self._id: + return "" + return "" % id(self._id) diff --git a/MLPY/Lib/site-packages/h5py/_hl/base.py b/MLPY/Lib/site-packages/h5py/_hl/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b15536b96a29614a7d04adde3e4c572a3cdd7500 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/base.py @@ -0,0 +1,537 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements operations common to all high-level objects (File, etc.). +""" + +from collections.abc import ( + Mapping, MutableMapping, KeysView, ValuesView, ItemsView +) +import os +import posixpath + +import numpy as np + +# The high-level interface is serialized; every public API function & method +# is wrapped in a lock. We reuse the low-level lock because (1) it's fast, +# and (2) it eliminates the possibility of deadlocks due to out-of-order +# lock acquisition. +from .._objects import phil, with_phil +from .. import h5d, h5i, h5r, h5p, h5f, h5t, h5s +from .compat import fspath, filename_encode + + +def is_hdf5(fname): + """ Determine if a file is valid HDF5 (False if it doesn't exist). """ + with phil: + fname = os.path.abspath(fspath(fname)) + + if os.path.isfile(fname): + return h5f.is_hdf5(filename_encode(fname)) + return False + + +def find_item_type(data): + """Find the item type of a simple object or collection of objects. + + E.g. [[['a']]] -> str + + The focus is on collections where all items have the same type; we'll return + None if that's not the case. + + The aim is to treat numpy arrays of Python objects like normal Python + collections, while treating arrays with specific dtypes differently. + We're also only interested in array-like collections - lists and tuples, + possibly nested - not things like sets or dicts. + """ + if isinstance(data, np.ndarray): + if ( + data.dtype.kind == 'O' + and not h5t.check_string_dtype(data.dtype) + and not h5t.check_vlen_dtype(data.dtype) + ): + item_types = {type(e) for e in data.flat} + else: + return None + elif isinstance(data, (list, tuple)): + item_types = {find_item_type(e) for e in data} + else: + return type(data) + + if len(item_types) != 1: + return None + return item_types.pop() + + +def guess_dtype(data): + """ Attempt to guess an appropriate dtype for the object, returning None + if nothing is appropriate (or if it should be left up the the array + constructor to figure out) + """ + with phil: + if isinstance(data, h5r.RegionReference): + return h5t.regionref_dtype + if isinstance(data, h5r.Reference): + return h5t.ref_dtype + + item_type = find_item_type(data) + + if item_type is bytes: + return h5t.string_dtype(encoding='ascii') + if item_type is str: + return h5t.string_dtype() + + return None + + +def is_float16_dtype(dt): + if dt is None: + return False + + dt = np.dtype(dt) # normalize strings -> np.dtype objects + return dt.kind == 'f' and dt.itemsize == 2 + + +def array_for_new_object(data, specified_dtype=None): + """Prepare an array from data used to create a new dataset or attribute""" + + # We mostly let HDF5 convert data as necessary when it's written. + # But if we are going to a float16 datatype, pre-convert in python + # to workaround a bug in the conversion. + # https://github.com/h5py/h5py/issues/819 + if is_float16_dtype(specified_dtype): + as_dtype = specified_dtype + elif not isinstance(data, np.ndarray) and (specified_dtype is not None): + # If we need to convert e.g. a list to an array, don't leave numpy + # to guess a dtype we already know. + as_dtype = specified_dtype + else: + as_dtype = guess_dtype(data) + + data = np.asarray(data, order="C", dtype=as_dtype) + + # In most cases, this does nothing. But if data was already an array, + # and as_dtype is a tagged h5py dtype (e.g. for an object array of strings), + # asarray() doesn't replace its dtype object. This gives it the tagged dtype: + if as_dtype is not None: + data = data.view(dtype=as_dtype) + + return data + + +def default_lapl(): + """ Default link access property list """ + lapl = h5p.create(h5p.LINK_ACCESS) + fapl = h5p.create(h5p.FILE_ACCESS) + fapl.set_fclose_degree(h5f.CLOSE_STRONG) + lapl.set_elink_fapl(fapl) + return lapl + + +def default_lcpl(): + """ Default link creation property list """ + lcpl = h5p.create(h5p.LINK_CREATE) + lcpl.set_create_intermediate_group(True) + return lcpl + +dlapl = default_lapl() +dlcpl = default_lcpl() + + +def is_empty_dataspace(obj): + """ Check if an object's dataspace is empty """ + if obj.get_space().get_simple_extent_type() == h5s.NULL: + return True + return False + + +class CommonStateObject: + + """ + Mixin class that allows sharing information between objects which + reside in the same HDF5 file. Requires that the host class have + a ".id" attribute which returns a low-level ObjectID subclass. + + Also implements Unicode operations. + """ + + @property + def _lapl(self): + """ Fetch the link access property list appropriate for this object + """ + return dlapl + + @property + def _lcpl(self): + """ Fetch the link creation property list appropriate for this object + """ + return dlcpl + + def _e(self, name, lcpl=None): + """ Encode a name according to the current file settings. + + Returns name, or 2-tuple (name, lcpl) if lcpl is True + + - Binary strings are always passed as-is, h5t.CSET_ASCII + - Unicode strings are encoded utf8, h5t.CSET_UTF8 + + If name is None, returns either None or (None, None) appropriately. + """ + def get_lcpl(coding): + """ Create an appropriate link creation property list """ + lcpl = self._lcpl.copy() + lcpl.set_char_encoding(coding) + return lcpl + + if name is None: + return (None, None) if lcpl else None + + if isinstance(name, bytes): + coding = h5t.CSET_ASCII + elif isinstance(name, str): + try: + name = name.encode('ascii') + coding = h5t.CSET_ASCII + except UnicodeEncodeError: + name = name.encode('utf8') + coding = h5t.CSET_UTF8 + else: + raise TypeError(f"A name should be string or bytes, not {type(name)}") + + if lcpl: + return name, get_lcpl(coding) + return name + + def _d(self, name): + """ Decode a name according to the current file settings. + + - Try to decode utf8 + - Failing that, return the byte string + + If name is None, returns None. + """ + if name is None: + return None + + try: + return name.decode('utf8') + except UnicodeDecodeError: + pass + return name + + +class _RegionProxy: + + """ + Proxy object which handles region references. + + To create a new region reference (datasets only), use slicing syntax: + + >>> newref = obj.regionref[0:10:2] + + To determine the target dataset shape from an existing reference: + + >>> shape = obj.regionref.shape(existingref) + + where may be any object in the file. To determine the shape of + the selection in use on the target dataset: + + >>> selection_shape = obj.regionref.selection(existingref) + """ + + def __init__(self, obj): + self.obj = obj + self.id = obj.id + + def __getitem__(self, args): + if not isinstance(self.id, h5d.DatasetID): + raise TypeError("Region references can only be made to datasets") + from . import selections + with phil: + selection = selections.select(self.id.shape, args, dataset=self.obj) + return h5r.create(self.id, b'.', h5r.DATASET_REGION, selection.id) + + def shape(self, ref): + """ Get the shape of the target dataspace referred to by *ref*. """ + with phil: + sid = h5r.get_region(ref, self.id) + return sid.shape + + def selection(self, ref): + """ Get the shape of the target dataspace selection referred to by *ref* + """ + from . import selections + with phil: + sid = h5r.get_region(ref, self.id) + return selections.guess_shape(sid) + + +class HLObject(CommonStateObject): + + """ + Base class for high-level interface objects. + """ + + @property + def file(self): + """ Return a File instance associated with this object """ + from . import files + with phil: + return files.File(self.id) + + @property + @with_phil + def name(self): + """ Return the full name of this object. None if anonymous. """ + return self._d(h5i.get_name(self.id)) + + @property + @with_phil + def parent(self): + """Return the parent group of this object. + + This is always equivalent to obj.file[posixpath.dirname(obj.name)]. + ValueError if this object is anonymous. + """ + if self.name is None: + raise ValueError("Parent of an anonymous object is undefined") + return self.file[posixpath.dirname(self.name)] + + @property + @with_phil + def id(self): + """ Low-level identifier appropriate for this object """ + return self._id + + @property + @with_phil + def ref(self): + """ An (opaque) HDF5 reference to this object """ + return h5r.create(self.id, b'.', h5r.OBJECT) + + @property + @with_phil + def regionref(self): + """Create a region reference (Datasets only). + + The syntax is regionref[]. For example, dset.regionref[...] + creates a region reference in which the whole dataset is selected. + + Can also be used to determine the shape of the referenced dataset + (via .shape property), or the shape of the selection (via the + .selection property). + """ + return _RegionProxy(self) + + @property + def attrs(self): + """ Attributes attached to this object """ + from . import attrs + with phil: + return attrs.AttributeManager(self) + + @with_phil + def __init__(self, oid): + """ Setup this object, given its low-level identifier """ + self._id = oid + + @with_phil + def __hash__(self): + return hash(self.id) + + @with_phil + def __eq__(self, other): + if hasattr(other, 'id'): + return self.id == other.id + return NotImplemented + + def __bool__(self): + with phil: + return bool(self.id) + __nonzero__ = __bool__ + + def __getnewargs__(self): + """Disable pickle. + + Handles for HDF5 objects can't be reliably deserialised, because the + recipient may not have access to the same files. So we do this to + fail early. + + If you really want to pickle h5py objects and can live with some + limitations, look at the h5pickle project on PyPI. + """ + raise TypeError("h5py objects cannot be pickled") + + def __getstate__(self): + # Pickle protocols 0 and 1 use this instead of __getnewargs__ + raise TypeError("h5py objects cannot be pickled") + +# --- Dictionary-style interface ---------------------------------------------- + +# To implement the dictionary-style interface from groups and attributes, +# we inherit from the appropriate abstract base classes in collections. +# +# All locking is taken care of by the subclasses. +# We have to override ValuesView and ItemsView here because Group and +# AttributeManager can only test for key names. + + +class KeysViewHDF5(KeysView): + def __str__(self): + return "".format(list(self)) + + def __reversed__(self): + yield from reversed(self._mapping) + + __repr__ = __str__ + +class ValuesViewHDF5(ValuesView): + + """ + Wraps e.g. a Group or AttributeManager to provide a value view. + + Note that __contains__ will have poor performance as it has + to scan all the links or attributes. + """ + + def __contains__(self, value): + with phil: + for key in self._mapping: + if value == self._mapping.get(key): + return True + return False + + def __iter__(self): + with phil: + for key in self._mapping: + yield self._mapping.get(key) + + def __reversed__(self): + with phil: + for key in reversed(self._mapping): + yield self._mapping.get(key) + + +class ItemsViewHDF5(ItemsView): + + """ + Wraps e.g. a Group or AttributeManager to provide an items view. + """ + + def __contains__(self, item): + with phil: + key, val = item + if key in self._mapping: + return val == self._mapping.get(key) + return False + + def __iter__(self): + with phil: + for key in self._mapping: + yield (key, self._mapping.get(key)) + + def __reversed__(self): + with phil: + for key in reversed(self._mapping): + yield (key, self._mapping.get(key)) + + +class MappingHDF5(Mapping): + + """ + Wraps a Group, AttributeManager or DimensionManager object to provide + an immutable mapping interface. + + We don't inherit directly from MutableMapping because certain + subclasses, for example DimensionManager, are read-only. + """ + def keys(self): + """ Get a view object on member names """ + return KeysViewHDF5(self) + + def values(self): + """ Get a view object on member objects """ + return ValuesViewHDF5(self) + + def items(self): + """ Get a view object on member items """ + return ItemsViewHDF5(self) + + def _ipython_key_completions_(self): + """ Custom tab completions for __getitem__ in IPython >=5.0. """ + return sorted(self.keys()) + + +class MutableMappingHDF5(MappingHDF5, MutableMapping): + + """ + Wraps a Group or AttributeManager object to provide a mutable + mapping interface, in contrast to the read-only mapping of + MappingHDF5. + """ + + pass + + +class Empty: + + """ + Proxy object to represent empty/null dataspaces (a.k.a H5S_NULL). + + This can have an associated dtype, but has no shape or data. This is not + the same as an array with shape (0,). + """ + shape = None + size = None + + def __init__(self, dtype): + self.dtype = np.dtype(dtype) + + def __eq__(self, other): + if isinstance(other, Empty) and self.dtype == other.dtype: + return True + return False + + def __repr__(self): + return "Empty(dtype={0!r})".format(self.dtype) + + +def product(nums): + """Calculate a numeric product + + For small amounts of data (e.g. shape tuples), this simple code is much + faster than calling numpy.prod(). + """ + prod = 1 + for n in nums: + prod *= n + return prod + + +# Simple variant of cached_property: +# Unlike functools, this has no locking, so we don't have to worry about +# deadlocks with phil (see issue gh-2064). Unlike cached-property on PyPI, it +# doesn't try to import asyncio (which can be ~100 extra modules). +# Many projects seem to have similar variants of this, often without attribution, +# but to be cautious, this code comes from cached-property (Copyright (c) 2015, +# Daniel Greenfeld, BSD license), where it is attributed to bottle (Copyright +# (c) 2009-2022, Marcel Hellkamp, MIT license). + +class cached_property: + def __init__(self, func): + self.__doc__ = getattr(func, "__doc__") + self.func = func + + def __get__(self, obj, cls): + if obj is None: + return self + + value = obj.__dict__[self.func.__name__] = self.func(obj) + return value diff --git a/MLPY/Lib/site-packages/h5py/_hl/compat.py b/MLPY/Lib/site-packages/h5py/_hl/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..60d704263c86bc601ee1d7b7eb5b9c504b0162e7 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/compat.py @@ -0,0 +1,42 @@ +""" +Compatibility module for high-level h5py +""" +import sys +from os import fspath, fsencode, fsdecode +from ..version import hdf5_built_version_tuple + +WINDOWS_ENCODING = "utf-8" if hdf5_built_version_tuple >= (1, 10, 6) else "mbcs" + + +def filename_encode(filename): + """ + Encode filename for use in the HDF5 library. + + Due to how HDF5 handles filenames on different systems, this should be + called on any filenames passed to the HDF5 library. See the documentation on + filenames in h5py for more information. + """ + filename = fspath(filename) + if sys.platform == "win32": + if isinstance(filename, str): + return filename.encode(WINDOWS_ENCODING, "strict") + return filename + return fsencode(filename) + + +def filename_decode(filename): + """ + Decode filename used by HDF5 library. + + Due to how HDF5 handles filenames on different systems, this should be + called on any filenames passed from the HDF5 library. See the documentation + on filenames in h5py for more information. + """ + if sys.platform == "win32": + if isinstance(filename, bytes): + return filename.decode(WINDOWS_ENCODING, "strict") + elif isinstance(filename, str): + return filename + else: + raise TypeError("expect bytes or str, not %s" % type(filename).__name__) + return fsdecode(filename) diff --git a/MLPY/Lib/site-packages/h5py/_hl/dataset.py b/MLPY/Lib/site-packages/h5py/_hl/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3855a4d8097669ee6981d8c979b3101bc502442e --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/dataset.py @@ -0,0 +1,1142 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2020 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements support for high-level dataset access. +""" + +import posixpath as pp +import sys + +import numpy + +from .. import h5, h5s, h5t, h5r, h5d, h5p, h5fd, h5ds, _selector +from .base import ( + array_for_new_object, cached_property, Empty, find_item_type, HLObject, + phil, product, with_phil, +) +from . import filters +from . import selections as sel +from . import selections2 as sel2 +from .datatype import Datatype +from .compat import filename_decode +from .vds import VDSmap, vds_support + +_LEGACY_GZIP_COMPRESSION_VALS = frozenset(range(10)) +MPI = h5.get_config().mpi + + +def make_new_dset(parent, shape=None, dtype=None, data=None, name=None, + chunks=None, compression=None, shuffle=None, + fletcher32=None, maxshape=None, compression_opts=None, + fillvalue=None, scaleoffset=None, track_times=False, + external=None, track_order=None, dcpl=None, dapl=None, + efile_prefix=None, virtual_prefix=None, allow_unknown_filter=False, + rdcc_nslots=None, rdcc_nbytes=None, rdcc_w0=None): + """ Return a new low-level dataset identifier """ + + # Convert data to a C-contiguous ndarray + if data is not None and not isinstance(data, Empty): + data = array_for_new_object(data, specified_dtype=dtype) + + # Validate shape + if shape is None: + if data is None: + if dtype is None: + raise TypeError("One of data, shape or dtype must be specified") + data = Empty(dtype) + shape = data.shape + else: + shape = (shape,) if isinstance(shape, int) else tuple(shape) + if data is not None and (product(shape) != product(data.shape)): + raise ValueError("Shape tuple is incompatible with data") + + if isinstance(maxshape, int): + maxshape = (maxshape,) + tmp_shape = maxshape if maxshape is not None else shape + + # Validate chunk shape + if isinstance(chunks, int) and not isinstance(chunks, bool): + chunks = (chunks,) + if isinstance(chunks, tuple) and any( + chunk > dim for dim, chunk in zip(tmp_shape, chunks) if dim is not None + ): + errmsg = "Chunk shape must not be greater than data shape in any dimension. "\ + "{} is not compatible with {}".format(chunks, shape) + raise ValueError(errmsg) + + if isinstance(dtype, Datatype): + # Named types are used as-is + tid = dtype.id + dtype = tid.dtype # Following code needs this + else: + # Validate dtype + if dtype is None and data is None: + dtype = numpy.dtype("=f4") + elif dtype is None and data is not None: + dtype = data.dtype + else: + dtype = numpy.dtype(dtype) + tid = h5t.py_create(dtype, logical=1) + + # Legacy + if any((compression, shuffle, fletcher32, maxshape, scaleoffset)) and chunks is False: + raise ValueError("Chunked format required for given storage options") + + # Legacy + if compression is True: + if compression_opts is None: + compression_opts = 4 + compression = 'gzip' + + # Legacy + if compression in _LEGACY_GZIP_COMPRESSION_VALS: + if compression_opts is not None: + raise TypeError("Conflict in compression options") + compression_opts = compression + compression = 'gzip' + dcpl = filters.fill_dcpl( + dcpl or h5p.create(h5p.DATASET_CREATE), shape, dtype, + chunks, compression, compression_opts, shuffle, fletcher32, + maxshape, scaleoffset, external, allow_unknown_filter) + + if fillvalue is not None: + # prepare string-type dtypes for fillvalue + string_info = h5t.check_string_dtype(dtype) + if string_info is not None: + # fake vlen dtype for fixed len string fillvalue + # to not trigger unwanted encoding + dtype = h5t.string_dtype(string_info.encoding) + fillvalue = numpy.array(fillvalue, dtype=dtype) + else: + fillvalue = numpy.array(fillvalue) + dcpl.set_fill_value(fillvalue) + + if track_times is None: + # In case someone explicitly passes None for the default + track_times = False + if track_times in (True, False): + dcpl.set_obj_track_times(track_times) + else: + raise TypeError("track_times must be either True or False") + if track_order is True: + dcpl.set_attr_creation_order( + h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) + elif track_order is False: + dcpl.set_attr_creation_order(0) + elif track_order is not None: + raise TypeError("track_order must be either True or False") + + if maxshape is not None: + maxshape = tuple(m if m is not None else h5s.UNLIMITED for m in maxshape) + + if any([efile_prefix, virtual_prefix, rdcc_nbytes, rdcc_nslots, rdcc_w0]): + dapl = dapl or h5p.create(h5p.DATASET_ACCESS) + + if efile_prefix is not None: + dapl.set_efile_prefix(efile_prefix) + + if virtual_prefix is not None: + dapl.set_virtual_prefix(virtual_prefix) + + if rdcc_nbytes or rdcc_nslots or rdcc_w0: + cache_settings = list(dapl.get_chunk_cache()) + if rdcc_nslots is not None: + cache_settings[0] = rdcc_nslots + if rdcc_nbytes is not None: + cache_settings[1] = rdcc_nbytes + if rdcc_w0 is not None: + cache_settings[2] = rdcc_w0 + dapl.set_chunk_cache(*cache_settings) + + if isinstance(data, Empty): + sid = h5s.create(h5s.NULL) + else: + sid = h5s.create_simple(shape, maxshape) + + dset_id = h5d.create(parent.id, name, tid, sid, dcpl=dcpl, dapl=dapl) + + if (data is not None) and (not isinstance(data, Empty)): + dset_id.write(h5s.ALL, h5s.ALL, data) + + return dset_id + + +def open_dset(parent, name, dapl=None, efile_prefix=None, virtual_prefix=None, + rdcc_nslots=None, rdcc_nbytes=None, rdcc_w0=None, **kwds): + """ Return an existing low-level dataset identifier """ + + if any([efile_prefix, virtual_prefix, rdcc_nbytes, rdcc_nslots, rdcc_w0]): + dapl = dapl or h5p.create(h5p.DATASET_ACCESS) + + if efile_prefix is not None: + dapl.set_efile_prefix(efile_prefix) + + if virtual_prefix is not None: + dapl.set_virtual_prefix(virtual_prefix) + + if rdcc_nbytes or rdcc_nslots or rdcc_w0: + cache_settings = list(dapl.get_chunk_cache()) + if rdcc_nslots is not None: + cache_settings[0] = rdcc_nslots + if rdcc_nbytes is not None: + cache_settings[1] = rdcc_nbytes + if rdcc_w0 is not None: + cache_settings[2] = rdcc_w0 + dapl.set_chunk_cache(*cache_settings) + + dset_id = h5d.open(parent.id, name, dapl=dapl) + + return dset_id + + +class AstypeWrapper: + """Wrapper to convert data on reading from a dataset. + """ + def __init__(self, dset, dtype): + self._dset = dset + self._dtype = numpy.dtype(dtype) + + def __getitem__(self, args): + return self._dset.__getitem__(args, new_dtype=self._dtype) + + def __len__(self): + """ Get the length of the underlying dataset + + >>> length = len(dataset.astype('f8')) + """ + return len(self._dset) + + def __array__(self, dtype=None): + data = self[:] + if dtype is not None: + data = data.astype(dtype) + return data + + +class AsStrWrapper: + """Wrapper to decode strings on reading the dataset""" + def __init__(self, dset, encoding, errors='strict'): + self._dset = dset + if encoding is None: + encoding = h5t.check_string_dtype(dset.dtype).encoding + self.encoding = encoding + self.errors = errors + + def __getitem__(self, args): + bytes_arr = self._dset[args] + # numpy.char.decode() seems like the obvious thing to use. But it only + # accepts numpy string arrays, not object arrays of bytes (which we + # return from HDF5 variable-length strings). And the numpy + # implementation is not faster than doing it with a loop; in fact, by + # not converting the result to a numpy unicode array, the + # naive way can be faster! (Comparing with numpy 1.18.4, June 2020) + if numpy.isscalar(bytes_arr): + return bytes_arr.decode(self.encoding, self.errors) + + return numpy.array([ + b.decode(self.encoding, self.errors) for b in bytes_arr.flat + ], dtype=object).reshape(bytes_arr.shape) + + def __len__(self): + """ Get the length of the underlying dataset + + >>> length = len(dataset.asstr()) + """ + return len(self._dset) + + def __array__(self): + return numpy.array([ + b.decode(self.encoding, self.errors) for b in self._dset + ], dtype=object).reshape(self._dset.shape) + + +class FieldsWrapper: + """Wrapper to extract named fields from a dataset with a struct dtype""" + extract_field = None + + def __init__(self, dset, prior_dtype, names): + self._dset = dset + if isinstance(names, str): + self.extract_field = names + names = [names] + self.read_dtype = readtime_dtype(prior_dtype, names) + + def __array__(self, dtype=None): + data = self[:] + if dtype is not None: + data = data.astype(dtype) + return data + + def __getitem__(self, args): + data = self._dset.__getitem__(args, new_dtype=self.read_dtype) + if self.extract_field is not None: + data = data[self.extract_field] + return data + + def __len__(self): + """ Get the length of the underlying dataset + + >>> length = len(dataset.fields(['x', 'y'])) + """ + return len(self._dset) + + +def readtime_dtype(basetype, names): + """Make a NumPy compound dtype with a subset of available fields""" + if basetype.names is None: # Names provided, but not compound + raise ValueError("Field names only allowed for compound types") + + for name in names: # Check all names are legal + if name not in basetype.names: + raise ValueError("Field %s does not appear in this type." % name) + + return numpy.dtype([(name, basetype.fields[name][0]) for name in names]) + + +if MPI: + class CollectiveContext: + + """ Manages collective I/O in MPI mode """ + + # We don't bother with _local as threads are forbidden in MPI mode + + def __init__(self, dset): + self._dset = dset + + def __enter__(self): + # pylint: disable=protected-access + self._dset._dxpl.set_dxpl_mpio(h5fd.MPIO_COLLECTIVE) + + def __exit__(self, *args): + # pylint: disable=protected-access + self._dset._dxpl.set_dxpl_mpio(h5fd.MPIO_INDEPENDENT) + + +class ChunkIterator: + """ + Class to iterate through list of chunks of a given dataset + """ + def __init__(self, dset, source_sel=None): + self._shape = dset.shape + rank = len(dset.shape) + + if not dset.chunks: + # can only use with chunked datasets + raise TypeError("Chunked dataset required") + + self._layout = dset.chunks + if source_sel is None: + # select over entire dataset + self._sel = tuple( + slice(0, self._shape[dim]) + for dim in range(rank) + ) + else: + if isinstance(source_sel, slice): + self._sel = (source_sel,) + else: + self._sel = source_sel + if len(self._sel) != rank: + raise ValueError("Invalid selection - selection region must have same rank as dataset") + self._chunk_index = [] + for dim in range(rank): + s = self._sel[dim] + if s.start < 0 or s.stop > self._shape[dim] or s.stop <= s.start: + raise ValueError("Invalid selection - selection region must be within dataset space") + index = s.start // self._layout[dim] + self._chunk_index.append(index) + + def __iter__(self): + return self + + def __next__(self): + rank = len(self._shape) + slices = [] + if rank == 0 or self._chunk_index[0] * self._layout[0] >= self._sel[0].stop: + # ran past the last chunk, end iteration + raise StopIteration() + + for dim in range(rank): + s = self._sel[dim] + start = self._chunk_index[dim] * self._layout[dim] + stop = (self._chunk_index[dim] + 1) * self._layout[dim] + # adjust the start if this is an edge chunk + if start < s.start: + start = s.start + if stop > s.stop: + stop = s.stop # trim to end of the selection + s = slice(start, stop, 1) + slices.append(s) + + # bump up the last index and carry forward if we run outside the selection + dim = rank - 1 + while dim >= 0: + s = self._sel[dim] + self._chunk_index[dim] += 1 + + chunk_end = self._chunk_index[dim] * self._layout[dim] + if chunk_end < s.stop: + # we still have room to extend along this dimensions + return tuple(slices) + + if dim > 0: + # reset to the start and continue iterating with higher dimension + self._chunk_index[dim] = s.start // self._layout[dim] + dim -= 1 + return tuple(slices) + + +class Dataset(HLObject): + + """ + Represents an HDF5 dataset + """ + + def astype(self, dtype): + """ Get a wrapper allowing you to perform reads to a + different destination type, e.g.: + + >>> double_precision = dataset.astype('f8')[0:100:2] + """ + return AstypeWrapper(self, dtype) + + def asstr(self, encoding=None, errors='strict'): + """Get a wrapper to read string data as Python strings: + + >>> str_array = dataset.asstr()[:] + + The parameters have the same meaning as in ``bytes.decode()``. + If ``encoding`` is unspecified, it will use the encoding in the HDF5 + datatype (either ascii or utf-8). + """ + string_info = h5t.check_string_dtype(self.dtype) + if string_info is None: + raise TypeError( + "dset.asstr() can only be used on datasets with " + "an HDF5 string datatype" + ) + if encoding is None: + encoding = string_info.encoding + return AsStrWrapper(self, encoding, errors=errors) + + def fields(self, names, *, _prior_dtype=None): + """Get a wrapper to read a subset of fields from a compound data type: + + >>> 2d_coords = dataset.fields(['x', 'y'])[:] + + If names is a string, a single field is extracted, and the resulting + arrays will have that dtype. Otherwise, it should be an iterable, + and the read data will have a compound dtype. + """ + if _prior_dtype is None: + _prior_dtype = self.dtype + return FieldsWrapper(self, _prior_dtype, names) + + if MPI: + @property + @with_phil + def collective(self): + """ Context manager for MPI collective reads & writes """ + return CollectiveContext(self) + + @property + def dims(self): + """ Access dimension scales attached to this dataset. """ + from .dims import DimensionManager + with phil: + return DimensionManager(self) + + @property + @with_phil + def ndim(self): + """Numpy-style attribute giving the number of dimensions""" + return self.id.rank + + @property + def shape(self): + """Numpy-style shape tuple giving dataset dimensions""" + if 'shape' in self._cache_props: + return self._cache_props['shape'] + + with phil: + shape = self.id.shape + + # If the file is read-only, cache the shape to speed-up future uses. + # This cache is invalidated by .refresh() when using SWMR. + if self._readonly: + self._cache_props['shape'] = shape + return shape + + @shape.setter + @with_phil + def shape(self, shape): + # pylint: disable=missing-docstring + self.resize(shape) + + @property + def size(self): + """Numpy-style attribute giving the total dataset size""" + if 'size' in self._cache_props: + return self._cache_props['size'] + + if self._is_empty: + size = None + else: + size = product(self.shape) + + # If the file is read-only, cache the size to speed-up future uses. + # This cache is invalidated by .refresh() when using SWMR. + if self._readonly: + self._cache_props['size'] = size + return size + + @property + def nbytes(self): + """Numpy-style attribute giving the raw dataset size as the number of bytes""" + size = self.size + if size is None: # if we are an empty 0-D array, then there are no bytes in the dataset + return 0 + return self.dtype.itemsize * size + + @property + def _selector(self): + """Internal object for optimised selection of data""" + if '_selector' in self._cache_props: + return self._cache_props['_selector'] + + slr = _selector.Selector(self.id.get_space()) + + # If the file is read-only, cache the reader to speed up future uses. + # This cache is invalidated by .refresh() when using SWMR. + if self._readonly: + self._cache_props['_selector'] = slr + return slr + + @property + def _fast_reader(self): + """Internal object for optimised reading of data""" + if '_fast_reader' in self._cache_props: + return self._cache_props['_fast_reader'] + + rdr = _selector.Reader(self.id) + + # If the file is read-only, cache the reader to speed up future uses. + # This cache is invalidated by .refresh() when using SWMR. + if self._readonly: + self._cache_props['_fast_reader'] = rdr + return rdr + + @property + @with_phil + def dtype(self): + """Numpy dtype representing the datatype""" + return self.id.dtype + + @property + @with_phil + def chunks(self): + """Dataset chunks (or None)""" + dcpl = self._dcpl + if dcpl.get_layout() == h5d.CHUNKED: + return dcpl.get_chunk() + return None + + @property + @with_phil + def compression(self): + """Compression strategy (or None)""" + for x in ('gzip','lzf','szip'): + if x in self._filters: + return x + return None + + @property + @with_phil + def compression_opts(self): + """ Compression setting. Int(0-9) for gzip, 2-tuple for szip. """ + return self._filters.get(self.compression, None) + + @property + @with_phil + def shuffle(self): + """Shuffle filter present (T/F)""" + return 'shuffle' in self._filters + + @property + @with_phil + def fletcher32(self): + """Fletcher32 filter is present (T/F)""" + return 'fletcher32' in self._filters + + @property + @with_phil + def scaleoffset(self): + """Scale/offset filter settings. For integer data types, this is + the number of bits stored, or 0 for auto-detected. For floating + point data types, this is the number of decimal places retained. + If the scale/offset filter is not in use, this is None.""" + try: + return self._filters['scaleoffset'][1] + except KeyError: + return None + + @property + @with_phil + def external(self): + """External file settings. Returns a list of tuples of + (name, offset, size) for each external file entry, or returns None + if no external files are used.""" + count = self._dcpl.get_external_count() + if count<=0: + return None + ext_list = list() + for x in range(count): + (name, offset, size) = self._dcpl.get_external(x) + ext_list.append( (filename_decode(name), offset, size) ) + return ext_list + + @property + @with_phil + def maxshape(self): + """Shape up to which this dataset can be resized. Axes with value + None have no resize limit. """ + space = self.id.get_space() + dims = space.get_simple_extent_dims(True) + if dims is None: + return None + + return tuple(x if x != h5s.UNLIMITED else None for x in dims) + + @property + @with_phil + def fillvalue(self): + """Fill value for this dataset (0 by default)""" + arr = numpy.zeros((1,), dtype=self.dtype) + self._dcpl.get_fill_value(arr) + return arr[0] + + @cached_property + @with_phil + def _extent_type(self): + """Get extent type for this dataset - SIMPLE, SCALAR or NULL""" + return self.id.get_space().get_simple_extent_type() + + @cached_property + def _is_empty(self): + """Check if extent type is empty""" + return self._extent_type == h5s.NULL + + @with_phil + def __init__(self, bind, *, readonly=False): + """ Create a new Dataset object by binding to a low-level DatasetID. + """ + if not isinstance(bind, h5d.DatasetID): + raise ValueError("%s is not a DatasetID" % bind) + super().__init__(bind) + + self._dcpl = self.id.get_create_plist() + self._dxpl = h5p.create(h5p.DATASET_XFER) + self._filters = filters.get_filters(self._dcpl) + self._readonly = readonly + self._cache_props = {} + + def resize(self, size, axis=None): + """ Resize the dataset, or the specified axis. + + The dataset must be stored in chunked format; it can be resized up to + the "maximum shape" (keyword maxshape) specified at creation time. + The rank of the dataset cannot be changed. + + "Size" should be a shape tuple, or if an axis is specified, an integer. + + BEWARE: This functions differently than the NumPy resize() method! + The data is not "reshuffled" to fit in the new shape; each axis is + grown or shrunk independently. The coordinates of existing data are + fixed. + """ + with phil: + if self.chunks is None: + raise TypeError("Only chunked datasets can be resized") + + if axis is not None: + if not (axis >=0 and axis < self.id.rank): + raise ValueError("Invalid axis (0 to %s allowed)" % (self.id.rank-1)) + try: + newlen = int(size) + except TypeError: + raise TypeError("Argument must be a single int if axis is specified") + size = list(self.shape) + size[axis] = newlen + + size = tuple(size) + self.id.set_extent(size) + #h5f.flush(self.id) # THG recommends + + @with_phil + def __len__(self): + """ The size of the first axis. TypeError if scalar. + + Limited to 2**32 on 32-bit systems; Dataset.len() is preferred. + """ + size = self.len() + if size > sys.maxsize: + raise OverflowError("Value too big for Python's __len__; use Dataset.len() instead.") + return size + + def len(self): + """ The size of the first axis. TypeError if scalar. + + Use of this method is preferred to len(dset), as Python's built-in + len() cannot handle values greater then 2**32 on 32-bit systems. + """ + with phil: + shape = self.shape + if len(shape) == 0: + raise TypeError("Attempt to take len() of scalar dataset") + return shape[0] + + @with_phil + def __iter__(self): + """ Iterate over the first axis. TypeError if scalar. + + BEWARE: Modifications to the yielded data are *NOT* written to file. + """ + shape = self.shape + if len(shape) == 0: + raise TypeError("Can't iterate over a scalar dataset") + for i in range(shape[0]): + yield self[i] + + @with_phil + def iter_chunks(self, sel=None): + """ Return chunk iterator. If set, the sel argument is a slice or + tuple of slices that defines the region to be used. If not set, the + entire dataspace will be used for the iterator. + + For each chunk within the given region, the iterator yields a tuple of + slices that gives the intersection of the given chunk with the + selection area. + + A TypeError will be raised if the dataset is not chunked. + + A ValueError will be raised if the selection region is invalid. + + """ + return ChunkIterator(self, sel) + + @cached_property + def _fast_read_ok(self): + """Is this dataset suitable for simple reading""" + return ( + self._extent_type == h5s.SIMPLE + and isinstance(self.id.get_type(), (h5t.TypeIntegerID, h5t.TypeFloatID)) + ) + + @with_phil + def __getitem__(self, args, new_dtype=None): + """ Read a slice from the HDF5 dataset. + + Takes slices and recarray-style field names (more than one is + allowed!) in any order. Obeys basic NumPy rules, including + broadcasting. + + Also supports: + + * Boolean "mask" array indexing + """ + args = args if isinstance(args, tuple) else (args,) + + if self._fast_read_ok and (new_dtype is None): + try: + return self._fast_reader.read(args) + except TypeError: + pass # Fall back to Python read pathway below + + if self._is_empty: + # Check 'is Ellipsis' to avoid equality comparison with an array: + # array equality returns an array, not a boolean. + if args == () or (len(args) == 1 and args[0] is Ellipsis): + return Empty(self.dtype) + raise ValueError("Empty datasets cannot be sliced") + + # Sort field names from the rest of the args. + names = tuple(x for x in args if isinstance(x, str)) + + if names: + # Read a subset of the fields in this structured dtype + if len(names) == 1: + names = names[0] # Read with simpler dtype of this field + args = tuple(x for x in args if not isinstance(x, str)) + return self.fields(names, _prior_dtype=new_dtype)[args] + + if new_dtype is None: + new_dtype = self.dtype + mtype = h5t.py_create(new_dtype) + + # === Special-case region references ==== + + if len(args) == 1 and isinstance(args[0], h5r.RegionReference): + + obj = h5r.dereference(args[0], self.id) + if obj != self.id: + raise ValueError("Region reference must point to this dataset") + + sid = h5r.get_region(args[0], self.id) + mshape = sel.guess_shape(sid) + if mshape is None: + # 0D with no data (NULL or deselected SCALAR) + return Empty(new_dtype) + out = numpy.zeros(mshape, dtype=new_dtype) + if out.size == 0: + return out + + sid_out = h5s.create_simple(mshape) + sid_out.select_all() + self.id.read(sid_out, sid, out, mtype) + return out + + # === Check for zero-sized datasets ===== + + if self.size == 0: + # Check 'is Ellipsis' to avoid equality comparison with an array: + # array equality returns an array, not a boolean. + if args == () or (len(args) == 1 and args[0] is Ellipsis): + return numpy.zeros(self.shape, dtype=new_dtype) + + # === Scalar dataspaces ================= + + if self.shape == (): + fspace = self.id.get_space() + selection = sel2.select_read(fspace, args) + if selection.mshape is None: + arr = numpy.zeros((), dtype=new_dtype) + else: + arr = numpy.zeros(selection.mshape, dtype=new_dtype) + for mspace, fspace in selection: + self.id.read(mspace, fspace, arr, mtype) + if selection.mshape is None: + return arr[()] + return arr + + # === Everything else =================== + + # Perform the dataspace selection. + selection = sel.select(self.shape, args, dataset=self) + + if selection.nselect == 0: + return numpy.zeros(selection.array_shape, dtype=new_dtype) + + arr = numpy.zeros(selection.array_shape, new_dtype, order='C') + + # Perform the actual read + mspace = h5s.create_simple(selection.mshape) + fspace = selection.id + self.id.read(mspace, fspace, arr, mtype, dxpl=self._dxpl) + + # Patch up the output for NumPy + if arr.shape == (): + return arr[()] # 0 dim array -> numpy scalar + return arr + + @with_phil + def __setitem__(self, args, val): + """ Write to the HDF5 dataset from a Numpy array. + + NumPy's broadcasting rules are honored, for "simple" indexing + (slices and integers). For advanced indexing, the shapes must + match. + """ + args = args if isinstance(args, tuple) else (args,) + + # Sort field indices from the slicing + names = tuple(x for x in args if isinstance(x, str)) + args = tuple(x for x in args if not isinstance(x, str)) + + # Generally we try to avoid converting the arrays on the Python + # side. However, for compound literals this is unavoidable. + vlen = h5t.check_vlen_dtype(self.dtype) + if vlen is not None and vlen not in (bytes, str): + try: + val = numpy.asarray(val, dtype=vlen) + except (ValueError, TypeError): + try: + val = numpy.array([numpy.array(x, dtype=vlen) + for x in val], dtype=self.dtype) + except (ValueError, TypeError): + pass + if vlen == val.dtype: + if val.ndim > 1: + tmp = numpy.empty(shape=val.shape[:-1], dtype=object) + tmp.ravel()[:] = [i for i in val.reshape( + (product(val.shape[:-1]), val.shape[-1]) + )] + else: + tmp = numpy.array([None], dtype=object) + tmp[0] = val + val = tmp + elif self.dtype.kind == "O" or \ + (self.dtype.kind == 'V' and \ + (not isinstance(val, numpy.ndarray) or val.dtype.kind != 'V') and \ + (self.dtype.subdtype is None)): + if len(names) == 1 and self.dtype.fields is not None: + # Single field selected for write, from a non-array source + if not names[0] in self.dtype.fields: + raise ValueError("No such field for indexing: %s" % names[0]) + dtype = self.dtype.fields[names[0]][0] + cast_compound = True + else: + dtype = self.dtype + cast_compound = False + + val = numpy.asarray(val, dtype=dtype.base, order='C') + if cast_compound: + val = val.view(numpy.dtype([(names[0], dtype)])) + val = val.reshape(val.shape[:len(val.shape) - len(dtype.shape)]) + elif (self.dtype.kind == 'S' + and (h5t.check_string_dtype(self.dtype).encoding == 'utf-8') + and (find_item_type(val) is str) + ): + # Writing str objects to a fixed-length UTF-8 string dataset. + # Numpy's normal conversion only handles ASCII characters, but + # when the destination is UTF-8, we want to allow any unicode. + # This *doesn't* handle numpy fixed-length unicode data ('U' dtype), + # as HDF5 has no equivalent, and converting fixed length UTF-32 + # to variable length UTF-8 would obscure what's going on. + str_array = numpy.asarray(val, order='C', dtype=object) + val = numpy.array([ + s.encode('utf-8') for s in str_array.flat + ], dtype=self.dtype).reshape(str_array.shape) + else: + # If the input data is already an array, let HDF5 do the conversion. + # If it's a list or similar, don't make numpy guess a dtype for it. + dt = None if isinstance(val, numpy.ndarray) else self.dtype.base + val = numpy.asarray(val, order='C', dtype=dt) + + # Check for array dtype compatibility and convert + if self.dtype.subdtype is not None: + shp = self.dtype.subdtype[1] + valshp = val.shape[-len(shp):] + if valshp != shp: # Last dimension has to match + raise TypeError("When writing to array types, last N dimensions have to match (got %s, but should be %s)" % (valshp, shp,)) + mtype = h5t.py_create(numpy.dtype((val.dtype, shp))) + mshape = val.shape[0:len(val.shape)-len(shp)] + + # Make a compound memory type if field-name slicing is required + elif len(names) != 0: + + mshape = val.shape + + # Catch common errors + if self.dtype.fields is None: + raise TypeError("Illegal slicing argument (not a compound dataset)") + mismatch = [x for x in names if x not in self.dtype.fields] + if len(mismatch) != 0: + mismatch = ", ".join('"%s"'%x for x in mismatch) + raise ValueError("Illegal slicing argument (fields %s not in dataset type)" % mismatch) + + # Write non-compound source into a single dataset field + if len(names) == 1 and val.dtype.fields is None: + subtype = h5t.py_create(val.dtype) + mtype = h5t.create(h5t.COMPOUND, subtype.get_size()) + mtype.insert(self._e(names[0]), 0, subtype) + + # Make a new source type keeping only the requested fields + else: + fieldnames = [x for x in val.dtype.names if x in names] # Keep source order + mtype = h5t.create(h5t.COMPOUND, val.dtype.itemsize) + for fieldname in fieldnames: + subtype = h5t.py_create(val.dtype.fields[fieldname][0]) + offset = val.dtype.fields[fieldname][1] + mtype.insert(self._e(fieldname), offset, subtype) + + # Use mtype derived from array (let DatasetID.write figure it out) + else: + mshape = val.shape + mtype = None + + # Perform the dataspace selection + selection = sel.select(self.shape, args, dataset=self) + + if selection.nselect == 0: + return + + # Broadcast scalars if necessary. + # In order to avoid slow broadcasting filling the destination by + # the scalar value, we create an intermediate array of the same + # size as the destination buffer provided that size is reasonable. + # We assume as reasonable a size smaller or equal as the used dataset + # chunk size if any. + # In case of dealing with a non-chunked destination dataset or with + # a selection whose size is larger than the dataset chunk size we fall + # back to using an intermediate array of size equal to the last dimension + # of the destination buffer. + # The reasoning behind is that it makes sense to assume the creator of + # the dataset used an appropriate chunk size according the available + # memory. In any case, if we cannot afford to create an intermediate + # array of the same size as the dataset chunk size, the user program has + # little hope to go much further. Solves h5py issue #1067 + if mshape == () and selection.array_shape != (): + if self.dtype.subdtype is not None: + raise TypeError("Scalar broadcasting is not supported for array dtypes") + if self.chunks and (product(self.chunks) >= product(selection.array_shape)): + val2 = numpy.empty(selection.array_shape, dtype=val.dtype) + else: + val2 = numpy.empty(selection.array_shape[-1], dtype=val.dtype) + val2[...] = val + val = val2 + mshape = val.shape + + # Perform the write, with broadcasting + mspace = h5s.create_simple(selection.expand_shape(mshape)) + for fspace in selection.broadcast(mshape): + self.id.write(mspace, fspace, val, mtype, dxpl=self._dxpl) + + def read_direct(self, dest, source_sel=None, dest_sel=None): + """ Read data directly from HDF5 into an existing NumPy array. + + The destination array must be C-contiguous and writable. + Selections must be the output of numpy.s_[]. + + Broadcasting is supported for simple indexing. + """ + with phil: + if self._is_empty: + raise TypeError("Empty datasets have no numpy representation") + if source_sel is None: + source_sel = sel.SimpleSelection(self.shape) + else: + source_sel = sel.select(self.shape, source_sel, self) # for numpy.s_ + fspace = source_sel.id + + if dest_sel is None: + dest_sel = sel.SimpleSelection(dest.shape) + else: + dest_sel = sel.select(dest.shape, dest_sel) + + for mspace in dest_sel.broadcast(source_sel.array_shape): + self.id.read(mspace, fspace, dest, dxpl=self._dxpl) + + def write_direct(self, source, source_sel=None, dest_sel=None): + """ Write data directly to HDF5 from a NumPy array. + + The source array must be C-contiguous. Selections must be + the output of numpy.s_[]. + + Broadcasting is supported for simple indexing. + """ + with phil: + if self._is_empty: + raise TypeError("Empty datasets cannot be written to") + if source_sel is None: + source_sel = sel.SimpleSelection(source.shape) + else: + source_sel = sel.select(source.shape, source_sel) # for numpy.s_ + mspace = source_sel.id + + if dest_sel is None: + dest_sel = sel.SimpleSelection(self.shape) + else: + dest_sel = sel.select(self.shape, dest_sel, self) + + for fspace in dest_sel.broadcast(source_sel.array_shape): + self.id.write(mspace, fspace, source, dxpl=self._dxpl) + + @with_phil + def __array__(self, dtype=None): + """ Create a Numpy array containing the whole dataset. DON'T THINK + THIS MEANS DATASETS ARE INTERCHANGEABLE WITH ARRAYS. For one thing, + you have to read the whole dataset every time this method is called. + """ + arr = numpy.zeros(self.shape, dtype=self.dtype if dtype is None else dtype) + + # Special case for (0,)*-shape datasets + if self.size == 0: + return arr + + self.read_direct(arr) + return arr + + @with_phil + def __repr__(self): + if not self: + r = '' + else: + if self.name is None: + namestr = '("anonymous")' + else: + name = pp.basename(pp.normpath(self.name)) + namestr = '"%s"' % (name if name != '' else '/') + r = '' % ( + namestr, self.shape, self.dtype.str + ) + return r + + if hasattr(h5d.DatasetID, "refresh"): + @with_phil + def refresh(self): + """ Refresh the dataset metadata by reloading from the file. + + This is part of the SWMR features and only exist when the HDF5 + library version >=1.9.178 + """ + self._id.refresh() + self._cache_props.clear() + + if hasattr(h5d.DatasetID, "flush"): + @with_phil + def flush(self): + """ Flush the dataset data and metadata to the file. + If the dataset is chunked, raw data chunks are written to the file. + + This is part of the SWMR features and only exist when the HDF5 + library version >=1.9.178 + """ + self._id.flush() + + if vds_support: + @property + @with_phil + def is_virtual(self): + """Check if this is a virtual dataset""" + return self._dcpl.get_layout() == h5d.VIRTUAL + + @with_phil + def virtual_sources(self): + """Get a list of the data mappings for a virtual dataset""" + if not self.is_virtual: + raise RuntimeError("Not a virtual dataset") + dcpl = self._dcpl + return [ + VDSmap(dcpl.get_virtual_vspace(j), + dcpl.get_virtual_filename(j), + dcpl.get_virtual_dsetname(j), + dcpl.get_virtual_srcspace(j)) + for j in range(dcpl.get_virtual_count())] + + @with_phil + def make_scale(self, name=''): + """Make this dataset an HDF5 dimension scale. + + You can then attach it to dimensions of other datasets like this:: + + other_ds.dims[0].attach_scale(ds) + + You can optionally pass a name to associate with this scale. + """ + h5ds.set_scale(self._id, self._e(name)) + + @property + @with_phil + def is_scale(self): + """Return ``True`` if this dataset is also a dimension scale. + + Return ``False`` otherwise. + """ + return h5ds.is_scale(self._id) diff --git a/MLPY/Lib/site-packages/h5py/_hl/datatype.py b/MLPY/Lib/site-packages/h5py/_hl/datatype.py new file mode 100644 index 0000000000000000000000000000000000000000..2081266bf5e77ecf982350e5fe180f599468b308 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/datatype.py @@ -0,0 +1,55 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements high-level access to committed datatypes in the file. +""" + +import posixpath as pp + +from ..h5t import TypeID +from .base import HLObject, with_phil + +class Datatype(HLObject): + + """ + Represents an HDF5 named datatype stored in a file. + + To store a datatype, simply assign it to a name in a group: + + >>> MyGroup["name"] = numpy.dtype("f") + >>> named_type = MyGroup["name"] + >>> assert named_type.dtype == numpy.dtype("f") + """ + + @property + @with_phil + def dtype(self): + """Numpy dtype equivalent for this datatype""" + return self.id.dtype + + @with_phil + def __init__(self, bind): + """ Create a new Datatype object by binding to a low-level TypeID. + """ + if not isinstance(bind, TypeID): + raise ValueError("%s is not a TypeID" % bind) + super().__init__(bind) + + @with_phil + def __repr__(self): + if not self.id: + return "" + if self.name is None: + namestr = '("anonymous")' + else: + name = pp.basename(pp.normpath(self.name)) + namestr = '"%s"' % (name if name != '' else '/') + return '' % \ + (namestr, self.dtype.str) diff --git a/MLPY/Lib/site-packages/h5py/_hl/dims.py b/MLPY/Lib/site-packages/h5py/_hl/dims.py new file mode 100644 index 0000000000000000000000000000000000000000..ef688cd0b67885d9ad0ae811fac3774d39d569b1 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/dims.py @@ -0,0 +1,181 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements support for HDF5 dimension scales. +""" + +import warnings + +from .. import h5ds +from ..h5py_warnings import H5pyDeprecationWarning +from . import base +from .base import phil, with_phil +from .dataset import Dataset + + +class DimensionProxy(base.CommonStateObject): + + """ + Represents an HDF5 "dimension". + """ + + @property + @with_phil + def label(self): + """ Get or set the dimension scale label """ + return self._d(h5ds.get_label(self._id, self._dimension)) + + @label.setter + @with_phil + def label(self, val): + # pylint: disable=missing-docstring + h5ds.set_label(self._id, self._dimension, self._e(val)) + + @with_phil + def __init__(self, id_, dimension): + self._id = id_ + self._dimension = dimension + + @with_phil + def __hash__(self): + return hash((type(self), self._id, self._dimension)) + + @with_phil + def __eq__(self, other): + return hash(self) == hash(other) + + @with_phil + def __iter__(self): + yield from self.keys() + + @with_phil + def __len__(self): + return h5ds.get_num_scales(self._id, self._dimension) + + @with_phil + def __getitem__(self, item): + + if isinstance(item, int): + scales = [] + h5ds.iterate(self._id, self._dimension, scales.append, 0) + return Dataset(scales[item]) + + else: + def f(dsid): + """ Iterate over scales to find a matching name """ + if h5ds.get_scale_name(dsid) == self._e(item): + return dsid + + res = h5ds.iterate(self._id, self._dimension, f, 0) + if res is None: + raise KeyError(item) + return Dataset(res) + + def attach_scale(self, dset): + """ Attach a scale to this dimension. + + Provide the Dataset of the scale you would like to attach. + """ + with phil: + h5ds.attach_scale(self._id, dset.id, self._dimension) + + def detach_scale(self, dset): + """ Remove a scale from this dimension. + + Provide the Dataset of the scale you would like to remove. + """ + with phil: + h5ds.detach_scale(self._id, dset.id, self._dimension) + + def items(self): + """ Get a list of (name, Dataset) pairs with all scales on this + dimension. + """ + with phil: + scales = [] + + # H5DSiterate raises an error if there are no dimension scales, + # rather than iterating 0 times. See #483. + if len(self) > 0: + h5ds.iterate(self._id, self._dimension, scales.append, 0) + + return [ + (self._d(h5ds.get_scale_name(x)), Dataset(x)) + for x in scales + ] + + def keys(self): + """ Get a list of names for the scales on this dimension. """ + with phil: + return [key for (key, _) in self.items()] + + def values(self): + """ Get a list of Dataset for scales on this dimension. """ + with phil: + return [val for (_, val) in self.items()] + + @with_phil + def __repr__(self): + if not self._id: + return "" + return ('<"%s" dimension %d of HDF5 dataset at %s>' + % (self.label, self._dimension, id(self._id))) + + +class DimensionManager(base.CommonStateObject): + + """ + Represents a collection of dimension associated with a dataset. + + Like AttributeManager, an instance of this class is returned when + accessing the ".dims" property on a Dataset. + """ + + @with_phil + def __init__(self, parent): + """ Private constructor. + """ + self._id = parent.id + + @with_phil + def __getitem__(self, index): + """ Return a Dimension object + """ + if index > len(self) - 1: + raise IndexError('Index out of range') + return DimensionProxy(self._id, index) + + @with_phil + def __len__(self): + """ Number of dimensions associated with the dataset. """ + return self._id.rank + + @with_phil + def __iter__(self): + """ Iterate over the dimensions. """ + for i in range(len(self)): + yield self[i] + + @with_phil + def __repr__(self): + if not self._id: + return "" + return "" % id(self._id) + + def create_scale(self, dset, name=''): + """ Create a new dimension, from an initial scale. + + Provide the dataset and a name for the scale. + """ + warnings.warn("other_ds.dims.create_scale(ds, name) is deprecated. " + "Use ds.make_scale(name) instead.", + H5pyDeprecationWarning, stacklevel=2, + ) + dset.make_scale(name) diff --git a/MLPY/Lib/site-packages/h5py/_hl/files.py b/MLPY/Lib/site-packages/h5py/_hl/files.py new file mode 100644 index 0000000000000000000000000000000000000000..59a6d2ce295f0037bbe6ead1252d72950148c65c --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/files.py @@ -0,0 +1,613 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements high-level support for HDF5 file objects. +""" + +import sys +import os +from warnings import warn + +from .compat import filename_decode, filename_encode + +from .base import phil, with_phil +from .group import Group +from .. import h5, h5f, h5p, h5i, h5fd, _objects +from .. import version + +mpi = h5.get_config().mpi +ros3 = h5.get_config().ros3 +direct_vfd = h5.get_config().direct_vfd +hdf5_version = version.hdf5_version_tuple[0:3] + +swmr_support = True + + +libver_dict = {'earliest': h5f.LIBVER_EARLIEST, 'latest': h5f.LIBVER_LATEST, + 'v108': h5f.LIBVER_V18, 'v110': h5f.LIBVER_V110} +libver_dict_r = dict((y, x) for x, y in libver_dict.items()) + +if hdf5_version >= (1, 11, 4): + libver_dict.update({'v112': h5f.LIBVER_V112}) + libver_dict_r.update({h5f.LIBVER_V112: 'v112'}) + +if hdf5_version >= (1, 13, 0): + libver_dict.update({'v114': h5f.LIBVER_V114}) + libver_dict_r.update({h5f.LIBVER_V114: 'v114'}) + + +def _set_fapl_mpio(plist, **kwargs): + """Set file access property list for mpio driver""" + if not mpi: + raise ValueError("h5py was built without MPI support, can't use mpio driver") + + import mpi4py.MPI + kwargs.setdefault('info', mpi4py.MPI.Info()) + plist.set_fapl_mpio(**kwargs) + + +def _set_fapl_fileobj(plist, **kwargs): + """Set the Python file object driver in a file access property list""" + plist.set_fileobj_driver(h5fd.fileobj_driver, kwargs.get('fileobj')) + + +_drivers = { + 'sec2': lambda plist, **kwargs: plist.set_fapl_sec2(**kwargs), + 'stdio': lambda plist, **kwargs: plist.set_fapl_stdio(**kwargs), + 'core': lambda plist, **kwargs: plist.set_fapl_core(**kwargs), + 'family': lambda plist, **kwargs: plist.set_fapl_family( + memb_fapl=plist.copy(), + **kwargs + ), + 'mpio': _set_fapl_mpio, + 'fileobj': _set_fapl_fileobj, + 'split': lambda plist, **kwargs: plist.set_fapl_split(**kwargs), +} + +if ros3: + _drivers['ros3'] = lambda plist, **kwargs: plist.set_fapl_ros3(**kwargs) + +if direct_vfd: + _drivers['direct'] = lambda plist, **kwargs: plist.set_fapl_direct(**kwargs) # noqa + + +def register_driver(name, set_fapl): + """Register a custom driver. + + Parameters + ---------- + name : str + The name of the driver. + set_fapl : callable[PropFAID, **kwargs] -> NoneType + The function to set the fapl to use your custom driver. + """ + _drivers[name] = set_fapl + + +def unregister_driver(name): + """Unregister a custom driver. + + Parameters + ---------- + name : str + The name of the driver. + """ + del _drivers[name] + + +def registered_drivers(): + """Return a frozenset of the names of all of the registered drivers. + """ + return frozenset(_drivers) + + +def make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0, locking, + page_buf_size, min_meta_keep, min_raw_keep, + alignment_threshold, alignment_interval, meta_block_size, + **kwds): + """ Set up a file access property list """ + plist = h5p.create(h5p.FILE_ACCESS) + + if libver is not None: + if libver in libver_dict: + low = libver_dict[libver] + high = h5f.LIBVER_LATEST + else: + low, high = (libver_dict[x] for x in libver) + else: + # we default to earliest + low, high = h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST + plist.set_libver_bounds(low, high) + plist.set_alignment(alignment_threshold, alignment_interval) + + cache_settings = list(plist.get_cache()) + if rdcc_nslots is not None: + cache_settings[1] = rdcc_nslots + if rdcc_nbytes is not None: + cache_settings[2] = rdcc_nbytes + if rdcc_w0 is not None: + cache_settings[3] = rdcc_w0 + plist.set_cache(*cache_settings) + + if page_buf_size: + plist.set_page_buffer_size(int(page_buf_size), int(min_meta_keep), + int(min_raw_keep)) + + if meta_block_size is not None: + plist.set_meta_block_size(int(meta_block_size)) + + if locking is not None: + if hdf5_version < (1, 12, 1) and (hdf5_version[:2] != (1, 10) or hdf5_version[2] < 7): + raise ValueError( + "HDF5 version >= 1.12.1 or 1.10.x >= 1.10.7 required for file locking.") + + if locking in ("false", False): + plist.set_file_locking(False, ignore_when_disabled=False) + elif locking in ("true", True): + plist.set_file_locking(True, ignore_when_disabled=False) + elif locking == "best-effort": + plist.set_file_locking(True, ignore_when_disabled=True) + else: + raise ValueError(f"Unsupported locking value: {locking}") + + if driver is None or (driver == 'windows' and sys.platform == 'win32'): + # Prevent swallowing unused key arguments + if kwds: + msg = "'{key}' is an invalid keyword argument for this function" \ + .format(key=next(iter(kwds))) + raise TypeError(msg) + return plist + + try: + set_fapl = _drivers[driver] + except KeyError: + raise ValueError('Unknown driver type "%s"' % driver) + else: + if driver == 'ros3': + token = kwds.pop('session_token', None) + set_fapl(plist, **kwds) + if token: + if hdf5_version < (1, 14, 2): + raise ValueError('HDF5 >= 1.14.2 required for AWS session token') + plist.set_fapl_ros3_token(token) + else: + set_fapl(plist, **kwds) + + return plist + + +def make_fcpl(track_order=False, fs_strategy=None, fs_persist=False, + fs_threshold=1, fs_page_size=None): + """ Set up a file creation property list """ + if track_order or fs_strategy: + plist = h5p.create(h5p.FILE_CREATE) + if track_order: + plist.set_link_creation_order( + h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) + plist.set_attr_creation_order( + h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) + if fs_strategy: + strategies = { + 'fsm': h5f.FSPACE_STRATEGY_FSM_AGGR, + 'page': h5f.FSPACE_STRATEGY_PAGE, + 'aggregate': h5f.FSPACE_STRATEGY_AGGR, + 'none': h5f.FSPACE_STRATEGY_NONE + } + fs_strat_num = strategies.get(fs_strategy, -1) + if fs_strat_num == -1: + raise ValueError("Invalid file space strategy type") + + plist.set_file_space_strategy(fs_strat_num, fs_persist, fs_threshold) + if fs_page_size and fs_strategy == 'page': + plist.set_file_space_page_size(int(fs_page_size)) + else: + plist = None + return plist + + +def make_fid(name, mode, userblock_size, fapl, fcpl=None, swmr=False): + """ Get a new FileID by opening or creating a file. + Also validates mode argument.""" + + if userblock_size is not None: + if mode in ('r', 'r+'): + raise ValueError("User block may only be specified " + "when creating a file") + try: + userblock_size = int(userblock_size) + except (TypeError, ValueError): + raise ValueError("User block size must be an integer") + if fcpl is None: + fcpl = h5p.create(h5p.FILE_CREATE) + fcpl.set_userblock(userblock_size) + + if mode == 'r': + flags = h5f.ACC_RDONLY + if swmr and swmr_support: + flags |= h5f.ACC_SWMR_READ + fid = h5f.open(name, flags, fapl=fapl) + elif mode == 'r+': + fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl) + elif mode in ['w-', 'x']: + fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl) + elif mode == 'w': + fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl) + elif mode == 'a': + # Open in append mode (read/write). + # If that fails, create a new file only if it won't clobber an + # existing one (ACC_EXCL) + try: + fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl) + # Not all drivers raise FileNotFoundError (commented those that do not) + except FileNotFoundError if fapl.get_driver() in ( + h5fd.SEC2, + h5fd.DIRECT if direct_vfd else -1, + # h5fd.STDIO, + # h5fd.CORE, + h5fd.FAMILY, + h5fd.WINDOWS, + # h5fd.MPIO, + # h5fd.MPIPOSIX, + h5fd.fileobj_driver, + h5fd.ROS3D if ros3 else -1, + ) else OSError: + fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl) + else: + raise ValueError("Invalid mode; must be one of r, r+, w, w-, x, a") + + try: + if userblock_size is not None: + existing_fcpl = fid.get_create_plist() + if existing_fcpl.get_userblock() != userblock_size: + raise ValueError("Requested userblock size (%d) does not match that of existing file (%d)" % (userblock_size, existing_fcpl.get_userblock())) + except Exception as e: + fid.close() + raise e + + return fid + + +class File(Group): + + """ + Represents an HDF5 file. + """ + + @property + def attrs(self): + """ Attributes attached to this object """ + # hdf5 complains that a file identifier is an invalid location for an + # attribute. Instead of self, pass the root group to AttributeManager: + from . import attrs + with phil: + return attrs.AttributeManager(self['/']) + + @property + @with_phil + def filename(self): + """File name on disk""" + return filename_decode(h5f.get_name(self.id)) + + @property + @with_phil + def driver(self): + """Low-level HDF5 file driver used to open file""" + drivers = {h5fd.SEC2: 'sec2', + h5fd.STDIO: 'stdio', + h5fd.CORE: 'core', + h5fd.FAMILY: 'family', + h5fd.WINDOWS: 'windows', + h5fd.MPIO: 'mpio', + h5fd.MPIPOSIX: 'mpiposix', + h5fd.fileobj_driver: 'fileobj'} + if ros3: + drivers[h5fd.ROS3D] = 'ros3' + if direct_vfd: + drivers[h5fd.DIRECT] = 'direct' + return drivers.get(self.id.get_access_plist().get_driver(), 'unknown') + + @property + @with_phil + def mode(self): + """ Python mode used to open file """ + write_intent = h5f.ACC_RDWR + if swmr_support: + write_intent |= h5f.ACC_SWMR_WRITE + return 'r+' if self.id.get_intent() & write_intent else 'r' + + @property + @with_phil + def libver(self): + """File format version bounds (2-tuple: low, high)""" + bounds = self.id.get_access_plist().get_libver_bounds() + return tuple(libver_dict_r[x] for x in bounds) + + @property + @with_phil + def userblock_size(self): + """ User block size (in bytes) """ + fcpl = self.id.get_create_plist() + return fcpl.get_userblock() + + @property + @with_phil + def meta_block_size(self): + """ Meta block size (in bytes) """ + fapl = self.id.get_access_plist() + return fapl.get_meta_block_size() + + if mpi: + + @property + @with_phil + def atomic(self): + """ Set/get MPI-IO atomic mode + """ + return self.id.get_mpi_atomicity() + + @atomic.setter + @with_phil + def atomic(self, value): + # pylint: disable=missing-docstring + self.id.set_mpi_atomicity(value) + + @property + @with_phil + def swmr_mode(self): + """ Controls single-writer multiple-reader mode """ + return swmr_support and bool(self.id.get_intent() & (h5f.ACC_SWMR_READ | h5f.ACC_SWMR_WRITE)) + + @swmr_mode.setter + @with_phil + def swmr_mode(self, value): + # pylint: disable=missing-docstring + if value: + self.id.start_swmr_write() + else: + raise ValueError("It is not possible to forcibly switch SWMR mode off.") + + def __init__(self, name, mode='r', driver=None, libver=None, userblock_size=None, swmr=False, + rdcc_nslots=None, rdcc_nbytes=None, rdcc_w0=None, track_order=None, + fs_strategy=None, fs_persist=False, fs_threshold=1, fs_page_size=None, + page_buf_size=None, min_meta_keep=0, min_raw_keep=0, locking=None, + alignment_threshold=1, alignment_interval=1, meta_block_size=None, **kwds): + """Create a new file object. + + See the h5py user guide for a detailed explanation of the options. + + name + Name of the file on disk, or file-like object. Note: for files + created with the 'core' driver, HDF5 still requires this be + non-empty. + mode + r Readonly, file must exist (default) + r+ Read/write, file must exist + w Create file, truncate if exists + w- or x Create file, fail if exists + a Read/write if exists, create otherwise + driver + Name of the driver to use. Legal values are None (default, + recommended), 'core', 'sec2', 'direct', 'stdio', 'mpio', 'ros3'. + libver + Library version bounds. Supported values: 'earliest', 'v108', + 'v110', 'v112' and 'latest'. The 'v108', 'v110' and 'v112' + options can only be specified with the HDF5 1.10.2 library or later. + userblock_size + Desired size of user block. Only allowed when creating a new + file (mode w, w- or x). + swmr + Open the file in SWMR read mode. Only used when mode = 'r'. + rdcc_nbytes + Total size of the dataset chunk cache in bytes. The default size + is 1024**2 (1 MiB) per dataset. Applies to all datasets unless individually changed. + rdcc_w0 + The chunk preemption policy for all datasets. This must be + between 0 and 1 inclusive and indicates the weighting according to + which chunks which have been fully read or written are penalized + when determining which chunks to flush from cache. A value of 0 + means fully read or written chunks are treated no differently than + other chunks (the preemption is strictly LRU) while a value of 1 + means fully read or written chunks are always preempted before + other chunks. If your application only reads or writes data once, + this can be safely set to 1. Otherwise, this should be set lower + depending on how often you re-read or re-write the same data. The + default value is 0.75. Applies to all datasets unless individually changed. + rdcc_nslots + The number of chunk slots in the raw data chunk cache for this + file. Increasing this value reduces the number of cache collisions, + but slightly increases the memory used. Due to the hashing + strategy, this value should ideally be a prime number. As a rule of + thumb, this value should be at least 10 times the number of chunks + that can fit in rdcc_nbytes bytes. For maximum performance, this + value should be set approximately 100 times that number of + chunks. The default value is 521. Applies to all datasets unless individually changed. + track_order + Track dataset/group/attribute creation order under root group + if True. If None use global default h5.get_config().track_order. + fs_strategy + The file space handling strategy to be used. Only allowed when + creating a new file (mode w, w- or x). Defined as: + "fsm" FSM, Aggregators, VFD + "page" Paged FSM, VFD + "aggregate" Aggregators, VFD + "none" VFD + If None use HDF5 defaults. + fs_page_size + File space page size in bytes. Only used when fs_strategy="page". If + None use the HDF5 default (4096 bytes). + fs_persist + A boolean value to indicate whether free space should be persistent + or not. Only allowed when creating a new file. The default value + is False. + fs_threshold + The smallest free-space section size that the free space manager + will track. Only allowed when creating a new file. The default + value is 1. + page_buf_size + Page buffer size in bytes. Only allowed for HDF5 files created with + fs_strategy="page". Must be a power of two value and greater or + equal than the file space page size when creating the file. It is + not used by default. + min_meta_keep + Minimum percentage of metadata to keep in the page buffer before + allowing pages containing metadata to be evicted. Applicable only if + page_buf_size is set. Default value is zero. + min_raw_keep + Minimum percentage of raw data to keep in the page buffer before + allowing pages containing raw data to be evicted. Applicable only if + page_buf_size is set. Default value is zero. + locking + The file locking behavior. Defined as: + + - False (or "false") -- Disable file locking + - True (or "true") -- Enable file locking + - "best-effort" -- Enable file locking but ignore some errors + - None -- Use HDF5 defaults + + .. warning:: + + The HDF5_USE_FILE_LOCKING environment variable can override + this parameter. + + Only available with HDF5 >= 1.12.1 or 1.10.x >= 1.10.7. + + alignment_threshold + Together with ``alignment_interval``, this property ensures that + any file object greater than or equal in size to the alignment + threshold (in bytes) will be aligned on an address which is a + multiple of alignment interval. + + alignment_interval + This property should be used in conjunction with + ``alignment_threshold``. See the description above. For more + details, see + https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALIGNMENT + + meta_block_size + Set the current minimum size, in bytes, of new metadata block allocations. + See https://portal.hdfgroup.org/display/HDF5/H5P_SET_META_BLOCK_SIZE + + Additional keywords + Passed on to the selected file driver. + """ + if driver == 'ros3': + if ros3: + from urllib.parse import urlparse + url = urlparse(name) + if url.scheme == 's3': + aws_region = kwds.get('aws_region', b'').decode('ascii') + if len(aws_region) == 0: + raise ValueError('AWS region required for s3:// location') + name = f'https://s3.{aws_region}.amazonaws.com/{url.netloc}{url.path}' + elif url.scheme not in ('https', 'http'): + raise ValueError(f'{name}: S3 location must begin with ' + 'either "https://", "http://", or "s3://"') + else: + raise ValueError( + "h5py was built without ROS3 support, can't use ros3 driver") + + if locking is not None and hdf5_version < (1, 12, 1) and ( + hdf5_version[:2] != (1, 10) or hdf5_version[2] < 7): + raise ValueError("HDF5 version >= 1.12.1 or 1.10.x >= 1.10.7 required for file locking options.") + + if isinstance(name, _objects.ObjectID): + if fs_strategy: + raise ValueError("Unable to set file space strategy of an existing file") + + with phil: + fid = h5i.get_file_id(name) + else: + if hasattr(name, 'read') and hasattr(name, 'seek'): + if driver not in (None, 'fileobj'): + raise ValueError("Driver must be 'fileobj' for file-like object if specified.") + driver = 'fileobj' + if kwds.get('fileobj', name) != name: + raise ValueError("Invalid value of 'fileobj' argument; " + "must equal to file-like object if specified.") + kwds.update(fileobj=name) + name = repr(name).encode('ASCII', 'replace') + else: + name = filename_encode(name) + + if track_order is None: + track_order = h5.get_config().track_order + + if fs_strategy and mode not in ('w', 'w-', 'x'): + raise ValueError("Unable to set file space strategy of an existing file") + + if swmr and mode != 'r': + warn( + "swmr=True only affects read ('r') mode. For swmr write " + "mode, set f.swmr_mode = True after opening the file.", + stacklevel=2, + ) + + with phil: + fapl = make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0, + locking, page_buf_size, min_meta_keep, min_raw_keep, + alignment_threshold=alignment_threshold, + alignment_interval=alignment_interval, + meta_block_size=meta_block_size, + **kwds) + fcpl = make_fcpl(track_order=track_order, fs_strategy=fs_strategy, + fs_persist=fs_persist, fs_threshold=fs_threshold, + fs_page_size=fs_page_size) + fid = make_fid(name, mode, userblock_size, fapl, fcpl, swmr=swmr) + + if isinstance(libver, tuple): + self._libver = libver + else: + self._libver = (libver, 'latest') + + super().__init__(fid) + + def close(self): + """ Close the file. All open objects become invalid """ + with phil: + # Check that the file is still open, otherwise skip + if self.id.valid: + # We have to explicitly murder all open objects related to the file + + # Close file-resident objects first, then the files. + # Otherwise we get errors in MPI mode. + self.id._close_open_objects(h5f.OBJ_LOCAL | ~h5f.OBJ_FILE) + self.id._close_open_objects(h5f.OBJ_LOCAL | h5f.OBJ_FILE) + + self.id.close() + _objects.nonlocal_close() + + def flush(self): + """ Tell the HDF5 library to flush its buffers. + """ + with phil: + h5f.flush(self.id) + + @with_phil + def __enter__(self): + return self + + @with_phil + def __exit__(self, *args): + if self.id: + self.close() + + @with_phil + def __repr__(self): + if not self.id: + r = '' + else: + # Filename has to be forced to Unicode if it comes back bytes + # Mode is always a "native" string + filename = self.filename + if isinstance(filename, bytes): # Can't decode fname + filename = filename.decode('utf8', 'replace') + r = f'' + + return r diff --git a/MLPY/Lib/site-packages/h5py/_hl/filters.py b/MLPY/Lib/site-packages/h5py/_hl/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..9cc1c8c912680936154342c45a4ec55703d6bd1a --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/filters.py @@ -0,0 +1,394 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements support for HDF5 compression filters via the high-level + interface. The following types of filter are available: + + "gzip" + Standard DEFLATE-based compression, at integer levels from 0 to 9. + Built-in to all public versions of HDF5. Use this if you want a + decent-to-good ratio, good portability, and don't mind waiting. + + "lzf" + Custom compression filter for h5py. This filter is much, much faster + than gzip (roughly 10x in compression vs. gzip level 4, and 3x faster + in decompressing), but at the cost of a worse compression ratio. Use + this if you want cheap compression and portability is not a concern. + + "szip" + Access to the HDF5 SZIP encoder. SZIP is a non-mainstream compression + format used in space science on integer and float datasets. SZIP is + subject to license requirements, which means the encoder is not + guaranteed to be always available. However, it is also much faster + than gzip. + + The following constants in this module are also useful: + + decode + Tuple of available filter names for decoding + + encode + Tuple of available filter names for encoding +""" +from collections.abc import Mapping +import operator + +import numpy as np +from .base import product +from .compat import filename_encode +from .. import h5z, h5p, h5d, h5f + + +_COMP_FILTERS = {'gzip': h5z.FILTER_DEFLATE, + 'szip': h5z.FILTER_SZIP, + 'lzf': h5z.FILTER_LZF, + 'shuffle': h5z.FILTER_SHUFFLE, + 'fletcher32': h5z.FILTER_FLETCHER32, + 'scaleoffset': h5z.FILTER_SCALEOFFSET } + +DEFAULT_GZIP = 4 +DEFAULT_SZIP = ('nn', 8) + +def _gen_filter_tuples(): + """ Bootstrap function to figure out what filters are available. """ + dec = [] + enc = [] + for name, code in _COMP_FILTERS.items(): + if h5z.filter_avail(code): + info = h5z.get_filter_info(code) + if info & h5z.FILTER_CONFIG_ENCODE_ENABLED: + enc.append(name) + if info & h5z.FILTER_CONFIG_DECODE_ENABLED: + dec.append(name) + + return tuple(dec), tuple(enc) + +decode, encode = _gen_filter_tuples() + +def _external_entry(entry): + """ Check for and return a well-formed entry tuple for + a call to h5p.set_external. """ + # We require only an iterable entry but also want to guard against + # raising a confusing exception from unpacking below a str or bytes that + # was mistakenly passed as an entry. We go further than that and accept + # only a tuple, which allows simpler documentation and exception + # messages. + if not isinstance(entry, tuple): + raise TypeError( + "Each external entry must be a tuple of (name, offset, size)") + name, offset, size = entry # raise ValueError without three elements + name = filename_encode(name) + offset = operator.index(offset) + size = operator.index(size) + return (name, offset, size) + +def _normalize_external(external): + """ Normalize external into a well-formed list of tuples and return. """ + if external is None: + return [] + try: + # Accept a solitary name---a str, bytes, or os.PathLike acceptable to + # filename_encode. + return [_external_entry((external, 0, h5f.UNLIMITED))] + except TypeError: + pass + # Check and rebuild each entry to be well-formed. + return [_external_entry(entry) for entry in external] + +class FilterRefBase(Mapping): + """Base class for referring to an HDF5 and describing its options + + Your subclass must define filter_id, and may define a filter_options tuple. + """ + filter_id = None + filter_options = () + + # Mapping interface supports using instances as **kwargs for compatibility + # with older versions of h5py + @property + def _kwargs(self): + return { + 'compression': self.filter_id, + 'compression_opts': self.filter_options + } + + def __hash__(self): + return hash((self.filter_id, self.filter_options)) + + def __eq__(self, other): + return ( + isinstance(other, FilterRefBase) + and self.filter_id == other.filter_id + and self.filter_options == other.filter_options + ) + + def __len__(self): + return len(self._kwargs) + + def __iter__(self): + return iter(self._kwargs) + + def __getitem__(self, item): + return self._kwargs[item] + +class Gzip(FilterRefBase): + filter_id = h5z.FILTER_DEFLATE + + def __init__(self, level=DEFAULT_GZIP): + self.filter_options = (level,) + +def fill_dcpl(plist, shape, dtype, chunks, compression, compression_opts, + shuffle, fletcher32, maxshape, scaleoffset, external, + allow_unknown_filter=False): + """ Generate a dataset creation property list. + + Undocumented and subject to change without warning. + """ + + if shape is None or shape == (): + shapetype = 'Empty' if shape is None else 'Scalar' + if any((chunks, compression, compression_opts, shuffle, fletcher32, + scaleoffset is not None)): + raise TypeError( + f"{shapetype} datasets don't support chunk/filter options" + ) + if maxshape and maxshape != (): + raise TypeError(f"{shapetype} datasets cannot be extended") + return h5p.create(h5p.DATASET_CREATE) + + def rq_tuple(tpl, name): + """ Check if chunks/maxshape match dataset rank """ + if tpl in (None, True): + return + try: + tpl = tuple(tpl) + except TypeError: + raise TypeError('"%s" argument must be None or a sequence object' % name) + if len(tpl) != len(shape): + raise ValueError('"%s" must have same rank as dataset shape' % name) + + rq_tuple(chunks, 'chunks') + rq_tuple(maxshape, 'maxshape') + + if compression is not None: + if isinstance(compression, FilterRefBase): + compression_opts = compression.filter_options + compression = compression.filter_id + + if compression not in encode and not isinstance(compression, int): + raise ValueError('Compression filter "%s" is unavailable' % compression) + + if compression == 'gzip': + if compression_opts is None: + gzip_level = DEFAULT_GZIP + elif compression_opts in range(10): + gzip_level = compression_opts + else: + raise ValueError("GZIP setting must be an integer from 0-9, not %r" % compression_opts) + + elif compression == 'lzf': + if compression_opts is not None: + raise ValueError("LZF compression filter accepts no options") + + elif compression == 'szip': + if compression_opts is None: + compression_opts = DEFAULT_SZIP + + err = "SZIP options must be a 2-tuple ('ec'|'nn', even integer 0-32)" + try: + szmethod, szpix = compression_opts + except TypeError: + raise TypeError(err) + if szmethod not in ('ec', 'nn'): + raise ValueError(err) + if not (0= 0') + + if dtype.kind == 'f': + if scaleoffset is True: + raise ValueError('integer scaleoffset must be provided for ' + 'floating point types') + elif dtype.kind in ('u', 'i'): + if scaleoffset is True: + scaleoffset = h5z.SO_INT_MINBITS_DEFAULT + else: + raise TypeError('scale/offset filter only supported for integer ' + 'and floating-point types') + + # Scale/offset following fletcher32 in the filter chain will (almost?) + # always triggers a read error, as most scale/offset settings are + # lossy. Since fletcher32 must come first (see comment below) we + # simply prohibit the combination of fletcher32 and scale/offset. + if fletcher32: + raise ValueError('fletcher32 cannot be used with potentially lossy' + ' scale/offset filter') + + external = _normalize_external(external) + # End argument validation + + if (chunks is True) or (chunks is None and any(( + shuffle, + fletcher32, + compression, + (maxshape and not len(external)), + scaleoffset is not None, + ))): + chunks = guess_chunk(shape, maxshape, dtype.itemsize) + + if maxshape is True: + maxshape = (None,)*len(shape) + + if chunks is not None: + plist.set_chunk(chunks) + plist.set_fill_time(h5d.FILL_TIME_ALLOC) # prevent resize glitch + + # scale-offset must come before shuffle and compression + if scaleoffset is not None: + if dtype.kind in ('u', 'i'): + plist.set_scaleoffset(h5z.SO_INT, scaleoffset) + else: # dtype.kind == 'f' + plist.set_scaleoffset(h5z.SO_FLOAT_DSCALE, scaleoffset) + + for item in external: + plist.set_external(*item) + + if shuffle: + plist.set_shuffle() + + if compression == 'gzip': + plist.set_deflate(gzip_level) + elif compression == 'lzf': + plist.set_filter(h5z.FILTER_LZF, h5z.FLAG_OPTIONAL) + elif compression == 'szip': + opts = {'ec': h5z.SZIP_EC_OPTION_MASK, 'nn': h5z.SZIP_NN_OPTION_MASK} + plist.set_szip(opts[szmethod], szpix) + elif isinstance(compression, int): + if not allow_unknown_filter and not h5z.filter_avail(compression): + raise ValueError("Unknown compression filter number: %s" % compression) + + plist.set_filter(compression, h5z.FLAG_OPTIONAL, compression_opts) + + # `fletcher32` must come after `compression`, otherwise, if `compression` + # is "szip" and the data is 64bit, the fletcher32 checksum will be wrong + # (see GitHub issue #953). + if fletcher32: + plist.set_fletcher32() + + return plist + +def get_filters(plist): + """ Extract a dictionary of active filters from a DCPL, along with + their settings. + + Undocumented and subject to change without warning. + """ + + filters = {h5z.FILTER_DEFLATE: 'gzip', h5z.FILTER_SZIP: 'szip', + h5z.FILTER_SHUFFLE: 'shuffle', h5z.FILTER_FLETCHER32: 'fletcher32', + h5z.FILTER_LZF: 'lzf', h5z.FILTER_SCALEOFFSET: 'scaleoffset'} + + pipeline = {} + + nfilters = plist.get_nfilters() + + for i in range(nfilters): + + code, _, vals, _ = plist.get_filter(i) + + if code == h5z.FILTER_DEFLATE: + vals = vals[0] # gzip level + + elif code == h5z.FILTER_SZIP: + mask, pixels = vals[0:2] + if mask & h5z.SZIP_EC_OPTION_MASK: + mask = 'ec' + elif mask & h5z.SZIP_NN_OPTION_MASK: + mask = 'nn' + else: + raise TypeError("Unknown SZIP configuration") + vals = (mask, pixels) + elif code == h5z.FILTER_LZF: + vals = None + else: + if len(vals) == 0: + vals = None + + pipeline[filters.get(code, str(code))] = vals + + return pipeline + +CHUNK_BASE = 16*1024 # Multiplier by which chunks are adjusted +CHUNK_MIN = 8*1024 # Soft lower limit (8k) +CHUNK_MAX = 1024*1024 # Hard upper limit (1M) + +def guess_chunk(shape, maxshape, typesize): + """ Guess an appropriate chunk layout for a dataset, given its shape and + the size of each element in bytes. Will allocate chunks only as large + as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of + each axis, slightly favoring bigger values for the last index. + + Undocumented and subject to change without warning. + """ + # pylint: disable=unused-argument + + # For unlimited dimensions we have to guess 1024 + shape = tuple((x if x!=0 else 1024) for i, x in enumerate(shape)) + + ndims = len(shape) + if ndims == 0: + raise ValueError("Chunks not allowed for scalar datasets.") + + chunks = np.array(shape, dtype='=f8') + if not np.all(np.isfinite(chunks)): + raise ValueError("Illegal value in chunk tuple") + + # Determine the optimal chunk size in bytes using a PyTables expression. + # This is kept as a float. + dset_size = product(chunks)*typesize + target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024))) + + if target_size > CHUNK_MAX: + target_size = CHUNK_MAX + elif target_size < CHUNK_MIN: + target_size = CHUNK_MIN + + idx = 0 + while True: + # Repeatedly loop over the axes, dividing them by 2. Stop when: + # 1a. We're smaller than the target chunk size, OR + # 1b. We're within 50% of the target chunk size, AND + # 2. The chunk is smaller than the maximum chunk size + + chunk_bytes = product(chunks)*typesize + + if (chunk_bytes < target_size or \ + abs(chunk_bytes-target_size)/target_size < 0.5) and \ + chunk_bytes < CHUNK_MAX: + break + + if product(chunks) == 1: + break # Element size larger than CHUNK_MAX + + chunks[idx%ndims] = np.ceil(chunks[idx%ndims] / 2.0) + idx += 1 + + return tuple(int(x) for x in chunks) diff --git a/MLPY/Lib/site-packages/h5py/_hl/group.py b/MLPY/Lib/site-packages/h5py/_hl/group.py new file mode 100644 index 0000000000000000000000000000000000000000..315c6fb1a3bed3092c1848ceb100132a02ce9a4a --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/group.py @@ -0,0 +1,801 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements support for high-level access to HDF5 groups. +""" + +from contextlib import contextmanager +import posixpath as pp +import numpy + + +from .compat import filename_decode, filename_encode + +from .. import h5, h5g, h5i, h5o, h5r, h5t, h5l, h5p +from . import base +from .base import HLObject, MutableMappingHDF5, phil, with_phil +from . import dataset +from . import datatype +from .vds import vds_support + + +class Group(HLObject, MutableMappingHDF5): + + """ Represents an HDF5 group. + """ + + def __init__(self, bind): + """ Create a new Group object by binding to a low-level GroupID. + """ + with phil: + if not isinstance(bind, h5g.GroupID): + raise ValueError("%s is not a GroupID" % bind) + super().__init__(bind) + + _gcpl_crt_order = h5p.create(h5p.GROUP_CREATE) + _gcpl_crt_order.set_link_creation_order( + h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) + _gcpl_crt_order.set_attr_creation_order( + h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) + + def create_group(self, name, track_order=None): + """ Create and return a new subgroup. + + Name may be absolute or relative. Fails if the target name already + exists. + + track_order + Track dataset/group/attribute creation order under this group + if True. If None use global default h5.get_config().track_order. + """ + if track_order is None: + track_order = h5.get_config().track_order + + with phil: + name, lcpl = self._e(name, lcpl=True) + gcpl = Group._gcpl_crt_order if track_order else None + gid = h5g.create(self.id, name, lcpl=lcpl, gcpl=gcpl) + return Group(gid) + + def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds): + """ Create a new HDF5 dataset + + name + Name of the dataset (absolute or relative). Provide None to make + an anonymous dataset. + shape + Dataset shape. Use "()" for scalar datasets. Required if "data" + isn't provided. + dtype + Numpy dtype or string. If omitted, dtype('f') will be used. + Required if "data" isn't provided; otherwise, overrides data + array's dtype. + data + Provide data to initialize the dataset. If used, you can omit + shape and dtype arguments. + + Keyword-only arguments: + + chunks + (Tuple or int) Chunk shape, or True to enable auto-chunking. Integers can + be used for 1D shape. + + maxshape + (Tuple or int) Make the dataset resizable up to this shape. Use None for + axes you want to be unlimited. Integers can be used for 1D shape. + compression + (String or int) Compression strategy. Legal values are 'gzip', + 'szip', 'lzf'. If an integer in range(10), this indicates gzip + compression level. Otherwise, an integer indicates the number of a + dynamically loaded compression filter. + compression_opts + Compression settings. This is an integer for gzip, 2-tuple for + szip, etc. If specifying a dynamically loaded compression filter + number, this must be a tuple of values. + scaleoffset + (Integer) Enable scale/offset filter for (usually) lossy + compression of integer or floating-point data. For integer + data, the value of scaleoffset is the number of bits to + retain (pass 0 to let HDF5 determine the minimum number of + bits necessary for lossless compression). For floating point + data, scaleoffset is the number of digits after the decimal + place to retain; stored values thus have absolute error + less than 0.5*10**(-scaleoffset). + shuffle + (T/F) Enable shuffle filter. + fletcher32 + (T/F) Enable fletcher32 error detection. Not permitted in + conjunction with the scale/offset filter. + fillvalue + (Scalar) Use this value for uninitialized parts of the dataset. + track_times + (T/F) Enable dataset creation timestamps. + track_order + (T/F) Track attribute creation order if True. If omitted use + global default h5.get_config().track_order. + external + (Iterable of tuples) Sets the external storage property, thus + designating that the dataset will be stored in one or more + non-HDF5 files external to the HDF5 file. Adds each tuple + of (name, offset, size) to the dataset's list of external files. + Each name must be a str, bytes, or os.PathLike; each offset and + size, an integer. If only a name is given instead of an iterable + of tuples, it is equivalent to [(name, 0, h5py.h5f.UNLIMITED)]. + efile_prefix + (String) External dataset file prefix for dataset access property + list. Does not persist in the file. + virtual_prefix + (String) Virtual dataset file prefix for dataset access property + list. Does not persist in the file. + allow_unknown_filter + (T/F) Do not check that the requested filter is available for use. + This should only be used with ``write_direct_chunk``, where the caller + compresses the data before handing it to h5py. + rdcc_nbytes + Total size of the dataset's chunk cache in bytes. The default size + is 1024**2 (1 MiB). + rdcc_w0 + The chunk preemption policy for this dataset. This must be + between 0 and 1 inclusive and indicates the weighting according to + which chunks which have been fully read or written are penalized + when determining which chunks to flush from cache. A value of 0 + means fully read or written chunks are treated no differently than + other chunks (the preemption is strictly LRU) while a value of 1 + means fully read or written chunks are always preempted before + other chunks. If your application only reads or writes data once, + this can be safely set to 1. Otherwise, this should be set lower + depending on how often you re-read or re-write the same data. The + default value is 0.75. + rdcc_nslots + The number of chunk slots in the dataset's chunk cache. Increasing + this value reduces the number of cache collisions, but slightly + increases the memory used. Due to the hashing strategy, this value + should ideally be a prime number. As a rule of thumb, this value + should be at least 10 times the number of chunks that can fit in + rdcc_nbytes bytes. For maximum performance, this value should be set + approximately 100 times that number of chunks. The default value is + 521. + """ + if 'track_order' not in kwds: + kwds['track_order'] = h5.get_config().track_order + + if 'efile_prefix' in kwds: + kwds['efile_prefix'] = self._e(kwds['efile_prefix']) + + if 'virtual_prefix' in kwds: + kwds['virtual_prefix'] = self._e(kwds['virtual_prefix']) + + with phil: + group = self + if name: + name = self._e(name) + if b'/' in name.lstrip(b'/'): + parent_path, name = name.rsplit(b'/', 1) + group = self.require_group(parent_path) + + dsid = dataset.make_new_dset(group, shape, dtype, data, name, **kwds) + dset = dataset.Dataset(dsid) + return dset + + if vds_support: + def create_virtual_dataset(self, name, layout, fillvalue=None): + """Create a new virtual dataset in this group. + + See virtual datasets in the docs for more information. + + name + (str) Name of the new dataset + + layout + (VirtualLayout) Defines the sources for the virtual dataset + + fillvalue + The value to use where there is no data. + + """ + with phil: + group = self + + if name: + name = self._e(name) + if b'/' in name.lstrip(b'/'): + parent_path, name = name.rsplit(b'/', 1) + group = self.require_group(parent_path) + + dsid = layout.make_dataset( + group, name=name, fillvalue=fillvalue, + ) + dset = dataset.Dataset(dsid) + + return dset + + @contextmanager + def build_virtual_dataset( + self, name, shape, dtype, maxshape=None, fillvalue=None + ): + """Assemble a virtual dataset in this group. + + This is used as a context manager:: + + with f.build_virtual_dataset('virt', (10, 1000), np.uint32) as layout: + layout[0] = h5py.VirtualSource('foo.h5', 'data', (1000,)) + + name + (str) Name of the new dataset + shape + (tuple) Shape of the dataset + dtype + A numpy dtype for data read from the virtual dataset + maxshape + (tuple, optional) Maximum dimensions if the dataset can grow. + Use None for unlimited dimensions. + fillvalue + The value used where no data is available. + """ + from .vds import VirtualLayout + layout = VirtualLayout(shape, dtype, maxshape, self.file.filename) + yield layout + + self.create_virtual_dataset(name, layout, fillvalue) + + def require_dataset(self, name, shape, dtype, exact=False, **kwds): + """ Open a dataset, creating it if it doesn't exist. + + If keyword "exact" is False (default), an existing dataset must have + the same shape and a conversion-compatible dtype to be returned. If + True, the shape and dtype must match exactly. + + If keyword "maxshape" is given, the maxshape and dtype must match + instead. + + If any of the keywords "rdcc_nslots", "rdcc_nbytes", or "rdcc_w0" are + given, they will be used to configure the dataset's chunk cache. + + Other dataset keywords (see create_dataset) may be provided, but are + only used if a new dataset is to be created. + + Raises TypeError if an incompatible object already exists, or if the + shape, maxshape or dtype don't match according to the above rules. + """ + if 'efile_prefix' in kwds: + kwds['efile_prefix'] = self._e(kwds['efile_prefix']) + + if 'virtual_prefix' in kwds: + kwds['virtual_prefix'] = self._e(kwds['virtual_prefix']) + + with phil: + if name not in self: + return self.create_dataset(name, *(shape, dtype), **kwds) + + if isinstance(shape, int): + shape = (shape,) + + try: + dsid = dataset.open_dset(self, self._e(name), **kwds) + dset = dataset.Dataset(dsid) + except KeyError: + dset = self[name] + raise TypeError("Incompatible object (%s) already exists" % dset.__class__.__name__) + + if shape != dset.shape: + if "maxshape" not in kwds: + raise TypeError("Shapes do not match (existing %s vs new %s)" % (dset.shape, shape)) + elif kwds["maxshape"] != dset.maxshape: + raise TypeError("Max shapes do not match (existing %s vs new %s)" % (dset.maxshape, kwds["maxshape"])) + + if exact: + if dtype != dset.dtype: + raise TypeError("Datatypes do not exactly match (existing %s vs new %s)" % (dset.dtype, dtype)) + elif not numpy.can_cast(dtype, dset.dtype): + raise TypeError("Datatypes cannot be safely cast (existing %s vs new %s)" % (dset.dtype, dtype)) + + return dset + + def create_dataset_like(self, name, other, **kwupdate): + """ Create a dataset similar to `other`. + + name + Name of the dataset (absolute or relative). Provide None to make + an anonymous dataset. + other + The dataset which the new dataset should mimic. All properties, such + as shape, dtype, chunking, ... will be taken from it, but no data + or attributes are being copied. + + Any dataset keywords (see create_dataset) may be provided, including + shape and dtype, in which case the provided values take precedence over + those from `other`. + """ + for k in ('shape', 'dtype', 'chunks', 'compression', + 'compression_opts', 'scaleoffset', 'shuffle', 'fletcher32', + 'fillvalue'): + kwupdate.setdefault(k, getattr(other, k)) + # TODO: more elegant way to pass these (dcpl to create_dataset?) + dcpl = other.id.get_create_plist() + kwupdate.setdefault('track_times', dcpl.get_obj_track_times()) + kwupdate.setdefault('track_order', dcpl.get_attr_creation_order() > 0) + + # Special case: the maxshape property always exists, but if we pass it + # to create_dataset, the new dataset will automatically get chunked + # layout. So we copy it only if it is different from shape. + if other.maxshape != other.shape: + kwupdate.setdefault('maxshape', other.maxshape) + + return self.create_dataset(name, **kwupdate) + + def require_group(self, name): + # TODO: support kwargs like require_dataset + """Return a group, creating it if it doesn't exist. + + TypeError is raised if something with that name already exists that + isn't a group. + """ + with phil: + if name not in self: + return self.create_group(name) + grp = self[name] + if not isinstance(grp, Group): + raise TypeError("Incompatible object (%s) already exists" % grp.__class__.__name__) + return grp + + @with_phil + def __getitem__(self, name): + """ Open an object in the file """ + + if isinstance(name, h5r.Reference): + oid = h5r.dereference(name, self.id) + if oid is None: + raise ValueError("Invalid HDF5 object reference") + elif isinstance(name, (bytes, str)): + oid = h5o.open(self.id, self._e(name), lapl=self._lapl) + else: + raise TypeError("Accessing a group is done with bytes or str, " + "not {}".format(type(name))) + + otype = h5i.get_type(oid) + if otype == h5i.GROUP: + return Group(oid) + elif otype == h5i.DATASET: + return dataset.Dataset(oid, readonly=(self.file.mode == 'r')) + elif otype == h5i.DATATYPE: + return datatype.Datatype(oid) + else: + raise TypeError("Unknown object type") + + def get(self, name, default=None, getclass=False, getlink=False): + """ Retrieve an item or other information. + + "name" given only: + Return the item, or "default" if it doesn't exist + + "getclass" is True: + Return the class of object (Group, Dataset, etc.), or "default" + if nothing with that name exists + + "getlink" is True: + Return HardLink, SoftLink or ExternalLink instances. Return + "default" if nothing with that name exists. + + "getlink" and "getclass" are True: + Return HardLink, SoftLink and ExternalLink classes. Return + "default" if nothing with that name exists. + + Example: + + >>> cls = group.get('foo', getclass=True) + >>> if cls == SoftLink: + """ + # pylint: disable=arguments-differ + + with phil: + if not (getclass or getlink): + try: + return self[name] + except KeyError: + return default + + if name not in self: + return default + + elif getclass and not getlink: + typecode = h5o.get_info(self.id, self._e(name), lapl=self._lapl).type + + try: + return {h5o.TYPE_GROUP: Group, + h5o.TYPE_DATASET: dataset.Dataset, + h5o.TYPE_NAMED_DATATYPE: datatype.Datatype}[typecode] + except KeyError: + raise TypeError("Unknown object type") + + elif getlink: + typecode = self.id.links.get_info(self._e(name), lapl=self._lapl).type + + if typecode == h5l.TYPE_SOFT: + if getclass: + return SoftLink + linkbytes = self.id.links.get_val(self._e(name), lapl=self._lapl) + return SoftLink(self._d(linkbytes)) + + elif typecode == h5l.TYPE_EXTERNAL: + if getclass: + return ExternalLink + filebytes, linkbytes = self.id.links.get_val(self._e(name), lapl=self._lapl) + return ExternalLink( + filename_decode(filebytes), self._d(linkbytes) + ) + + elif typecode == h5l.TYPE_HARD: + return HardLink if getclass else HardLink() + + else: + raise TypeError("Unknown link type") + + def __setitem__(self, name, obj): + """ Add an object to the group. The name must not already be in use. + + The action taken depends on the type of object assigned: + + Named HDF5 object (Dataset, Group, Datatype) + A hard link is created at "name" which points to the + given object. + + SoftLink or ExternalLink + Create the corresponding link. + + Numpy ndarray + The array is converted to a dataset object, with default + settings (contiguous storage, etc.). + + Numpy dtype + Commit a copy of the datatype as a named datatype in the file. + + Anything else + Attempt to convert it to an ndarray and store it. Scalar + values are stored as scalar datasets. Raise ValueError if we + can't understand the resulting array dtype. + """ + with phil: + name, lcpl = self._e(name, lcpl=True) + + if isinstance(obj, HLObject): + h5o.link(obj.id, self.id, name, lcpl=lcpl, lapl=self._lapl) + + elif isinstance(obj, SoftLink): + self.id.links.create_soft(name, self._e(obj.path), lcpl=lcpl, lapl=self._lapl) + + elif isinstance(obj, ExternalLink): + fn = filename_encode(obj.filename) + self.id.links.create_external(name, fn, self._e(obj.path), + lcpl=lcpl, lapl=self._lapl) + + elif isinstance(obj, numpy.dtype): + htype = h5t.py_create(obj, logical=True) + htype.commit(self.id, name, lcpl=lcpl) + + else: + ds = self.create_dataset(None, data=obj) + h5o.link(ds.id, self.id, name, lcpl=lcpl) + + @with_phil + def __delitem__(self, name): + """ Delete (unlink) an item from this group. """ + self.id.unlink(self._e(name)) + + @with_phil + def __len__(self): + """ Number of members attached to this group """ + return self.id.get_num_objs() + + @with_phil + def __iter__(self): + """ Iterate over member names """ + for x in self.id.__iter__(): + yield self._d(x) + + @with_phil + def __reversed__(self): + """ Iterate over member names in reverse order. """ + for x in self.id.__reversed__(): + yield self._d(x) + + @with_phil + def __contains__(self, name): + """ Test if a member name exists """ + if hasattr(h5g, "_path_valid"): + if not self.id: + return False + return h5g._path_valid(self.id, self._e(name), self._lapl) + return self._e(name) in self.id + + def copy(self, source, dest, name=None, + shallow=False, expand_soft=False, expand_external=False, + expand_refs=False, without_attrs=False): + """Copy an object or group. + + The source can be a path, Group, Dataset, or Datatype object. The + destination can be either a path or a Group object. The source and + destinations need not be in the same file. + + If the source is a Group object, all objects contained in that group + will be copied recursively. + + When the destination is a Group object, by default the target will + be created in that group with its current name (basename of obj.name). + You can override that by setting "name" to a string. + + There are various options which all default to "False": + + - shallow: copy only immediate members of a group. + + - expand_soft: expand soft links into new objects. + + - expand_external: expand external links into new objects. + + - expand_refs: copy objects that are pointed to by references. + + - without_attrs: copy object without copying attributes. + + Example: + + >>> f = File('myfile.hdf5', 'w') + >>> f.create_group("MyGroup") + >>> list(f.keys()) + ['MyGroup'] + >>> f.copy('MyGroup', 'MyCopy') + >>> list(f.keys()) + ['MyGroup', 'MyCopy'] + + """ + with phil: + if isinstance(source, HLObject): + source_path = '.' + else: + # Interpret source as a path relative to this group + source_path = source + source = self + + if isinstance(dest, Group): + if name is not None: + dest_path = name + elif source_path == '.': + dest_path = pp.basename(h5i.get_name(source.id)) + else: + # copy source into dest group: dest_name/source_name + dest_path = pp.basename(h5i.get_name(source[source_path].id)) + + elif isinstance(dest, HLObject): + raise TypeError("Destination must be path or Group object") + else: + # Interpret destination as a path relative to this group + dest_path = dest + dest = self + + flags = 0 + if shallow: + flags |= h5o.COPY_SHALLOW_HIERARCHY_FLAG + if expand_soft: + flags |= h5o.COPY_EXPAND_SOFT_LINK_FLAG + if expand_external: + flags |= h5o.COPY_EXPAND_EXT_LINK_FLAG + if expand_refs: + flags |= h5o.COPY_EXPAND_REFERENCE_FLAG + if without_attrs: + flags |= h5o.COPY_WITHOUT_ATTR_FLAG + if flags: + copypl = h5p.create(h5p.OBJECT_COPY) + copypl.set_copy_object(flags) + else: + copypl = None + + h5o.copy(source.id, self._e(source_path), dest.id, self._e(dest_path), + copypl, base.dlcpl) + + def move(self, source, dest): + """ Move a link to a new location in the file. + + If "source" is a hard link, this effectively renames the object. If + "source" is a soft or external link, the link itself is moved, with its + value unmodified. + """ + with phil: + if source == dest: + return + self.id.links.move(self._e(source), self.id, self._e(dest), + lapl=self._lapl, lcpl=self._lcpl) + + def visit(self, func): + """ Recursively visit all names in this group and subgroups. + + Note: visit ignores soft and external links. To visit those, use + visit_links. + + You supply a callable (function, method or callable object); it + will be called exactly once for each link in this group and every + group below it. Your callable must conform to the signature: + + func() => + + Returning None continues iteration, returning anything else stops + and immediately returns that value from the visit method. No + particular order of iteration within groups is guaranteed. + + Example: + + >>> # List the entire contents of the file + >>> f = File("foo.hdf5") + >>> list_of_names = [] + >>> f.visit(list_of_names.append) + """ + with phil: + def proxy(name): + """ Call the function with the text name, not bytes """ + return func(self._d(name)) + return h5o.visit(self.id, proxy) + + def visititems(self, func): + """ Recursively visit names and objects in this group. + + Note: visititems ignores soft and external links. To visit those, use + visititems_links. + + You supply a callable (function, method or callable object); it + will be called exactly once for each link in this group and every + group below it. Your callable must conform to the signature: + + func(, ) => + + Returning None continues iteration, returning anything else stops + and immediately returns that value from the visit method. No + particular order of iteration within groups is guaranteed. + + Example: + + # Get a list of all datasets in the file + >>> mylist = [] + >>> def func(name, obj): + ... if isinstance(obj, Dataset): + ... mylist.append(name) + ... + >>> f = File('foo.hdf5') + >>> f.visititems(func) + """ + with phil: + def proxy(name): + """ Use the text name of the object, not bytes """ + name = self._d(name) + return func(name, self[name]) + return h5o.visit(self.id, proxy) + + def visit_links(self, func): + """ Recursively visit all names in this group and subgroups. + Each link will be visited exactly once, regardless of its target. + + You supply a callable (function, method or callable object); it + will be called exactly once for each link in this group and every + group below it. Your callable must conform to the signature: + + func() => + + Returning None continues iteration, returning anything else stops + and immediately returns that value from the visit method. No + particular order of iteration within groups is guaranteed. + + Example: + + >>> # List the entire contents of the file + >>> f = File("foo.hdf5") + >>> list_of_names = [] + >>> f.visit_links(list_of_names.append) + """ + with phil: + def proxy(name): + """ Call the function with the text name, not bytes """ + return func(self._d(name)) + return self.id.links.visit(proxy) + + def visititems_links(self, func): + """ Recursively visit links in this group. + Each link will be visited exactly once, regardless of its target. + + You supply a callable (function, method or callable object); it + will be called exactly once for each link in this group and every + group below it. Your callable must conform to the signature: + + func(, ) => + + Returning None continues iteration, returning anything else stops + and immediately returns that value from the visit method. No + particular order of iteration within groups is guaranteed. + + Example: + + # Get a list of all softlinks in the file + >>> mylist = [] + >>> def func(name, link): + ... if isinstance(link, SoftLink): + ... mylist.append(name) + ... + >>> f = File('foo.hdf5') + >>> f.visititems_links(func) + """ + with phil: + def proxy(name): + """ Use the text name of the object, not bytes """ + name = self._d(name) + return func(name, self.get(name, getlink=True)) + return self.id.links.visit(proxy) + + @with_phil + def __repr__(self): + if not self: + r = u"" + else: + namestr = ( + '"%s"' % self.name + ) if self.name is not None else u"(anonymous)" + r = '' % (namestr, len(self)) + + return r + + +class HardLink: + + """ + Represents a hard link in an HDF5 file. Provided only so that + Group.get works in a sensible way. Has no other function. + """ + + pass + + +class SoftLink: + + """ + Represents a symbolic ("soft") link in an HDF5 file. The path + may be absolute or relative. No checking is performed to ensure + that the target actually exists. + """ + + @property + def path(self): + """ Soft link value. Not guaranteed to be a valid path. """ + return self._path + + def __init__(self, path): + self._path = str(path) + + def __repr__(self): + return '' % self.path + + +class ExternalLink: + + """ + Represents an HDF5 external link. Paths may be absolute or relative. + No checking is performed to ensure either the target or file exists. + """ + + @property + def path(self): + """ Soft link path, i.e. the part inside the HDF5 file. """ + return self._path + + @property + def filename(self): + """ Path to the external HDF5 file in the filesystem. """ + return self._filename + + def __init__(self, filename, path): + self._filename = filename_decode(filename_encode(filename)) + self._path = path + + def __repr__(self): + return ' Create a new selection on "shape"-tuple + __getitem__(args) => Perform a selection with the range specified. + What args are allowed depends on the + particular subclass in use. + + id (read-only) => h5py.h5s.SpaceID instance + shape (read-only) => The shape of the dataspace. + mshape (read-only) => The shape of the selection region. + Not guaranteed to fit within "shape", although + the total number of points is less than + product(shape). + nselect (read-only) => Number of selected points. Always equal to + product(mshape). + + broadcast(target_shape) => Return an iterable which yields dataspaces + for read, based on target_shape. + + The base class represents "unshaped" selections (1-D). + """ + + def __init__(self, shape, spaceid=None): + """ Create a selection. Shape may be None if spaceid is given. """ + if spaceid is not None: + self._id = spaceid + self._shape = spaceid.shape + else: + shape = tuple(shape) + self._shape = shape + self._id = h5s.create_simple(shape, (h5s.UNLIMITED,)*len(shape)) + self._id.select_all() + + @property + def id(self): + """ SpaceID instance """ + return self._id + + @property + def shape(self): + """ Shape of whole dataspace """ + return self._shape + + @property + def nselect(self): + """ Number of elements currently selected """ + return self._id.get_select_npoints() + + @property + def mshape(self): + """ Shape of selection (always 1-D for this class) """ + return (self.nselect,) + + @property + def array_shape(self): + """Shape of array to read/write (always 1-D for this class)""" + return self.mshape + + # expand_shape and broadcast only really make sense for SimpleSelection + def expand_shape(self, source_shape): + if product(source_shape) != self.nselect: + raise TypeError("Broadcasting is not supported for point-wise selections") + return source_shape + + def broadcast(self, source_shape): + """ Get an iterable for broadcasting """ + if product(source_shape) != self.nselect: + raise TypeError("Broadcasting is not supported for point-wise selections") + yield self._id + + def __getitem__(self, args): + raise NotImplementedError("This class does not support indexing") + +class PointSelection(Selection): + + """ + Represents a point-wise selection. You can supply sequences of + points to the three methods append(), prepend() and set(), or + instantiate it with a single boolean array using from_mask(). + """ + def __init__(self, shape, spaceid=None, points=None): + super().__init__(shape, spaceid) + if points is not None: + self._perform_selection(points, h5s.SELECT_SET) + + def _perform_selection(self, points, op): + """ Internal method which actually performs the selection """ + points = np.asarray(points, order='C', dtype='u8') + if len(points.shape) == 1: + points.shape = (1,points.shape[0]) + + if self._id.get_select_type() != h5s.SEL_POINTS: + op = h5s.SELECT_SET + + if len(points) == 0: + self._id.select_none() + else: + self._id.select_elements(points, op) + + @classmethod + def from_mask(cls, mask, spaceid=None): + """Create a point-wise selection from a NumPy boolean array """ + if not (isinstance(mask, np.ndarray) and mask.dtype.kind == 'b'): + raise TypeError("PointSelection.from_mask only works with bool arrays") + + points = np.transpose(mask.nonzero()) + return cls(mask.shape, spaceid, points=points) + + def append(self, points): + """ Add the sequence of points to the end of the current selection """ + self._perform_selection(points, h5s.SELECT_APPEND) + + def prepend(self, points): + """ Add the sequence of points to the beginning of the current selection """ + self._perform_selection(points, h5s.SELECT_PREPEND) + + def set(self, points): + """ Replace the current selection with the given sequence of points""" + self._perform_selection(points, h5s.SELECT_SET) + + +class SimpleSelection(Selection): + + """ A single "rectangular" (regular) selection composed of only slices + and integer arguments. Can participate in broadcasting. + """ + + @property + def mshape(self): + """ Shape of current selection """ + return self._sel[1] + + @property + def array_shape(self): + scalar = self._sel[3] + return tuple(x for x, s in zip(self.mshape, scalar) if not s) + + def __init__(self, shape, spaceid=None, hyperslab=None): + super().__init__(shape, spaceid) + if hyperslab is not None: + self._sel = hyperslab + else: + # No hyperslab specified - select all + rank = len(self.shape) + self._sel = ((0,)*rank, self.shape, (1,)*rank, (False,)*rank) + + def expand_shape(self, source_shape): + """Match the dimensions of an array to be broadcast to the selection + + The returned shape describes an array of the same size as the input + shape, but its dimensions + + E.g. with a dataset shape (10, 5, 4, 2), writing like this:: + + ds[..., 0] = np.ones((5, 4)) + + The source shape (5, 4) will expand to (1, 5, 4, 1). + Then the broadcast method below repeats that chunk 10 + times to write to an effective shape of (10, 5, 4, 1). + """ + start, count, step, scalar = self._sel + + rank = len(count) + remaining_src_dims = list(source_shape) + + eshape = [] + for idx in range(1, rank + 1): + if len(remaining_src_dims) == 0 or scalar[-idx]: # Skip scalar axes + eshape.append(1) + else: + t = remaining_src_dims.pop() + if t == 1 or count[-idx] == t: + eshape.append(t) + else: + raise TypeError("Can't broadcast %s -> %s" % (source_shape, self.array_shape)) # array shape + + if any([n > 1 for n in remaining_src_dims]): + # All dimensions from target_shape should either have been popped + # to match the selection shape, or be 1. + raise TypeError("Can't broadcast %s -> %s" % (source_shape, self.array_shape)) # array shape + + # We have built eshape backwards, so now reverse it + return tuple(eshape[::-1]) + + + def broadcast(self, source_shape): + """ Return an iterator over target dataspaces for broadcasting. + + Follows the standard NumPy broadcasting rules against the current + selection shape (self.mshape). + """ + if self.shape == (): + if product(source_shape) != 1: + raise TypeError("Can't broadcast %s to scalar" % source_shape) + self._id.select_all() + yield self._id + return + + start, count, step, scalar = self._sel + + rank = len(count) + tshape = self.expand_shape(source_shape) + + chunks = tuple(x//y for x, y in zip(count, tshape)) + nchunks = product(chunks) + + if nchunks == 1: + yield self._id + else: + sid = self._id.copy() + sid.select_hyperslab((0,)*rank, tshape, step) + for idx in range(nchunks): + offset = tuple(x*y*z + s for x, y, z, s in zip(np.unravel_index(idx, chunks), tshape, step, start)) + sid.offset_simple(offset) + yield sid + + +class FancySelection(Selection): + + """ + Implements advanced NumPy-style selection operations in addition to + the standard slice-and-int behavior. + + Indexing arguments may be ints, slices, lists of indices, or + per-axis (1D) boolean arrays. + + Broadcasting is not supported for these selections. + """ + + @property + def mshape(self): + return self._mshape + + @property + def array_shape(self): + return self._array_shape + + def __init__(self, shape, spaceid=None, mshape=None, array_shape=None): + super().__init__(shape, spaceid) + if mshape is None: + mshape = self.shape + if array_shape is None: + array_shape = mshape + self._mshape = mshape + self._array_shape = array_shape + + def expand_shape(self, source_shape): + if not source_shape == self.array_shape: + raise TypeError("Broadcasting is not supported for complex selections") + return source_shape + + def broadcast(self, source_shape): + if not source_shape == self.array_shape: + raise TypeError("Broadcasting is not supported for complex selections") + yield self._id + + +def guess_shape(sid): + """ Given a dataspace, try to deduce the shape of the selection. + + Returns one of: + * A tuple with the selection shape, same length as the dataspace + * A 1D selection shape for point-based and multiple-hyperslab selections + * None, for unselected scalars and for NULL dataspaces + """ + + sel_class = sid.get_simple_extent_type() # Dataspace class + sel_type = sid.get_select_type() # Flavor of selection in use + + if sel_class == h5s.NULL: + # NULL dataspaces don't support selections + return None + + elif sel_class == h5s.SCALAR: + # NumPy has no way of expressing empty 0-rank selections, so we use None + if sel_type == h5s.SEL_NONE: return None + if sel_type == h5s.SEL_ALL: return tuple() + + elif sel_class != h5s.SIMPLE: + raise TypeError("Unrecognized dataspace class %s" % sel_class) + + # We have a "simple" (rank >= 1) dataspace + + N = sid.get_select_npoints() + rank = len(sid.shape) + + if sel_type == h5s.SEL_NONE: + return (0,)*rank + + elif sel_type == h5s.SEL_ALL: + return sid.shape + + elif sel_type == h5s.SEL_POINTS: + # Like NumPy, point-based selections yield 1D arrays regardless of + # the dataspace rank + return (N,) + + elif sel_type != h5s.SEL_HYPERSLABS: + raise TypeError("Unrecognized selection method %s" % sel_type) + + # We have a hyperslab-based selection + + if N == 0: + return (0,)*rank + + bottomcorner, topcorner = (np.array(x) for x in sid.get_select_bounds()) + + # Shape of full selection box + boxshape = topcorner - bottomcorner + np.ones((rank,)) + + def get_n_axis(sid, axis): + """ Determine the number of elements selected along a particular axis. + + To do this, we "mask off" the axis by making a hyperslab selection + which leaves only the first point along the axis. For a 2D dataset + with selection box shape (X, Y), for axis 1, this would leave a + selection of shape (X, 1). We count the number of points N_leftover + remaining in the selection and compute the axis selection length by + N_axis = N/N_leftover. + """ + + if(boxshape[axis]) == 1: + return 1 + + start = bottomcorner.copy() + start[axis] += 1 + count = boxshape.copy() + count[axis] -= 1 + + # Throw away all points along this axis + masked_sid = sid.copy() + masked_sid.select_hyperslab(tuple(start), tuple(count), op=h5s.SELECT_NOTB) + + N_leftover = masked_sid.get_select_npoints() + + return N//N_leftover + + + shape = tuple(get_n_axis(sid, x) for x in range(rank)) + + if product(shape) != N: + # This means multiple hyperslab selections are in effect, + # so we fall back to a 1D shape + return (N,) + + return shape diff --git a/MLPY/Lib/site-packages/h5py/_hl/selections2.py b/MLPY/Lib/site-packages/h5py/_hl/selections2.py new file mode 100644 index 0000000000000000000000000000000000000000..a458bad09162cccbdc551c7ecc890d65cfd92ee8 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/selections2.py @@ -0,0 +1,103 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Implements a portion of the selection operations. +""" + +import numpy as np +from .. import h5s + +def read_dtypes(dataset_dtype, names): + """ Returns a 2-tuple containing: + + 1. Output dataset dtype + 2. Dtype containing HDF5-appropriate description of destination + """ + + if len(names) == 0: # Not compound, or all fields needed + format_dtype = dataset_dtype + + elif dataset_dtype.names is None: + raise ValueError("Field names only allowed for compound types") + + elif any(x not in dataset_dtype.names for x in names): + raise ValueError("Field does not appear in this type.") + + else: + format_dtype = np.dtype([(name, dataset_dtype.fields[name][0]) for name in names]) + + if len(names) == 1: + # We don't preserve the field information if only one explicitly selected. + output_dtype = format_dtype.fields[names[0]][0] + + else: + output_dtype = format_dtype + + return output_dtype, format_dtype + + +def read_selections_scalar(dsid, args): + """ Returns a 2-tuple containing: + + 1. Output dataset shape + 2. HDF5 dataspace containing source selection. + + Works for scalar datasets. + """ + + if dsid.shape != (): + raise RuntimeError("Illegal selection function for non-scalar dataset") + + if args == (): + # This is a signal that an array scalar should be returned instead + # of an ndarray with shape () + out_shape = None + + elif args == (Ellipsis,): + out_shape = () + + else: + raise ValueError("Illegal slicing argument for scalar dataspace") + + source_space = dsid.get_space() + source_space.select_all() + + return out_shape, source_space + +class ScalarReadSelection: + + """ + Implements slicing for scalar datasets. + """ + + def __init__(self, fspace, args): + if args == (): + self.mshape = None + elif args == (Ellipsis,): + self.mshape = () + else: + raise ValueError("Illegal slicing argument for scalar dataspace") + + self.mspace = h5s.create(h5s.SCALAR) + self.fspace = fspace + + def __iter__(self): + self.mspace.select_all() + yield self.fspace, self.mspace + +def select_read(fspace, args): + """ Top-level dispatch function for reading. + + At the moment, only supports reading from scalar datasets. + """ + if fspace.shape == (): + return ScalarReadSelection(fspace, args) + + raise NotImplementedError() diff --git a/MLPY/Lib/site-packages/h5py/_hl/vds.py b/MLPY/Lib/site-packages/h5py/_hl/vds.py new file mode 100644 index 0000000000000000000000000000000000000000..d6337502aed371dcff8bf714d2137ef63cf04cae --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/_hl/vds.py @@ -0,0 +1,248 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + High-level interface for creating HDF5 virtual datasets +""" + +from copy import deepcopy as copy +from collections import namedtuple + +import numpy as np + +from .compat import filename_encode +from .datatype import Datatype +from .selections import SimpleSelection, select +from .. import h5d, h5p, h5s, h5t + + +class VDSmap(namedtuple('VDSmap', ('vspace', 'file_name', + 'dset_name', 'src_space'))): + '''Defines a region in a virtual dataset mapping to part of a source dataset + ''' + + +vds_support = True + + +def _convert_space_for_key(space, key): + """ + Converts the space with the given key. Mainly used to allow unlimited + dimensions in virtual space selection. + """ + key = key if isinstance(key, tuple) else (key,) + type_code = space.get_select_type() + + # check for unlimited selections in case where selection is regular + # hyperslab, which is the only allowed case for h5s.UNLIMITED to be + # in the selection + if type_code == h5s.SEL_HYPERSLABS and space.is_regular_hyperslab(): + rank = space.get_simple_extent_ndims() + nargs = len(key) + + idx_offset = 0 + start, stride, count, block = space.get_regular_hyperslab() + # iterate through keys. we ignore numeral indices. if we get a + # slice, we check for an h5s.UNLIMITED value as the stop + # if we get an ellipsis, we offset index by (rank - nargs) + for i, sl in enumerate(key): + if isinstance(sl, slice): + if sl.stop == h5s.UNLIMITED: + counts = list(count) + idx = i + idx_offset + counts[idx] = h5s.UNLIMITED + count = tuple(counts) + elif sl is Ellipsis: + idx_offset = rank - nargs + + space.select_hyperslab(start, count, stride, block) + + +class VirtualSource: + """Source definition for virtual data sets. + + Instantiate this class to represent an entire source dataset, and then + slice it to indicate which regions should be used in the virtual dataset. + + path_or_dataset + The path to a file, or an h5py dataset. If a dataset is given, + no other parameters are allowed, as the relevant values are taken from + the dataset instead. + name + The name of the source dataset within the file. + shape + A tuple giving the shape of the dataset. + dtype + Numpy dtype or string. + maxshape + The source dataset is resizable up to this shape. Use None for + axes you want to be unlimited. + """ + def __init__(self, path_or_dataset, name=None, + shape=None, dtype=None, maxshape=None): + from .dataset import Dataset + if isinstance(path_or_dataset, Dataset): + failed = {k: v + for k, v in + {'name': name, 'shape': shape, + 'dtype': dtype, 'maxshape': maxshape}.items() + if v is not None} + if failed: + raise TypeError("If a Dataset is passed as the first argument " + "then no other arguments may be passed. You " + "passed {failed}".format(failed=failed)) + ds = path_or_dataset + path = ds.file.filename + name = ds.name + shape = ds.shape + dtype = ds.dtype + maxshape = ds.maxshape + else: + path = path_or_dataset + if name is None: + raise TypeError("The name parameter is required when " + "specifying a source by path") + if shape is None: + raise TypeError("The shape parameter is required when " + "specifying a source by path") + elif isinstance(shape, int): + shape = (shape,) + + if isinstance(maxshape, int): + maxshape = (maxshape,) + + self.path = path + self.name = name + self.dtype = dtype + + if maxshape is None: + self.maxshape = shape + else: + self.maxshape = tuple([h5s.UNLIMITED if ix is None else ix + for ix in maxshape]) + self.sel = SimpleSelection(shape) + self._all_selected = True + + @property + def shape(self): + return self.sel.array_shape + + def __getitem__(self, key): + if not self._all_selected: + raise RuntimeError("VirtualSource objects can only be sliced once.") + tmp = copy(self) + tmp.sel = select(self.shape, key, dataset=None) + _convert_space_for_key(tmp.sel.id, key) + tmp._all_selected = False + return tmp + +class VirtualLayout: + """Object for building a virtual dataset. + + Instantiate this class to define a virtual dataset, assign to slices of it + (using VirtualSource objects), and then pass it to + group.create_virtual_dataset() to add the virtual dataset to a file. + + This class does not allow access to the data; the virtual dataset must + be created in a file before it can be used. + + shape + A tuple giving the shape of the dataset. + dtype + Numpy dtype or string. + maxshape + The virtual dataset is resizable up to this shape. Use None for + axes you want to be unlimited. + filename + The name of the destination file, if known in advance. Mappings from + data in the same file will be stored with filename '.', allowing the + file to be renamed later. + """ + def __init__(self, shape, dtype, maxshape=None, filename=None): + self.shape = (shape,) if isinstance(shape, int) else shape + self.dtype = dtype + self.maxshape = (maxshape,) if isinstance(maxshape, int) else maxshape + self._filename = filename + self._src_filenames = set() + self.dcpl = h5p.create(h5p.DATASET_CREATE) + + def __setitem__(self, key, source): + sel = select(self.shape, key, dataset=None) + _convert_space_for_key(sel.id, key) + src_filename = self._source_file_name(source.path, self._filename) + + self.dcpl.set_virtual( + sel.id, src_filename, source.name.encode('utf-8'), source.sel.id + ) + if self._filename is None: + self._src_filenames.add(src_filename) + + @staticmethod + def _source_file_name(src_filename, dst_filename) -> bytes: + src_filename = filename_encode(src_filename) + if dst_filename and (src_filename == filename_encode(dst_filename)): + # use relative path if the source dataset is in the same + # file, in order to keep the virtual dataset valid in case + # the file is renamed. + return b'.' + return filename_encode(src_filename) + + def _get_dcpl(self, dst_filename): + """Get the property list containing virtual dataset mappings + + If the destination filename wasn't known when the VirtualLayout was + created, it is handled here. + """ + dst_filename = filename_encode(dst_filename) + if self._filename is not None: + # filename was known in advance; check dst_filename matches + if dst_filename != filename_encode(self._filename): + raise Exception(f"{dst_filename!r} != {self._filename!r}") + return self.dcpl + + # destination file not known in advance + if dst_filename in self._src_filenames: + # At least 1 source file is the same as the destination file, + # but we didn't know this when making the mapping. Copy the mappings + # to a new property list, replacing the dest filename with '.' + new_dcpl = h5p.create(h5p.DATASET_CREATE) + for i in range(self.dcpl.get_virtual_count()): + src_filename = self.dcpl.get_virtual_filename(i) + new_dcpl.set_virtual( + self.dcpl.get_virtual_vspace(i), + self._source_file_name(src_filename, dst_filename), + self.dcpl.get_virtual_dsetname(i).encode('utf-8'), + self.dcpl.get_virtual_srcspace(i), + ) + return new_dcpl + else: + return self.dcpl # Mappings are all from other files + + def make_dataset(self, parent, name, fillvalue=None): + """ Return a new low-level dataset identifier for a virtual dataset """ + dcpl = self._get_dcpl(parent.file.filename) + + if fillvalue is not None: + dcpl.set_fill_value(np.array([fillvalue])) + + maxshape = self.maxshape + if maxshape is not None: + maxshape = tuple(m if m is not None else h5s.UNLIMITED for m in maxshape) + + virt_dspace = h5s.create_simple(self.shape, maxshape) + + if isinstance(self.dtype, Datatype): + # Named types are used as-is + tid = self.dtype.id + else: + dtype = np.dtype(self.dtype) + tid = h5t.py_create(dtype, logical=1) + + return h5d.create(parent.id, name=name, tid=tid, space=virt_dspace, + dcpl=dcpl) diff --git a/MLPY/Lib/site-packages/h5py/_objects.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/_objects.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..3f67bc0d558cc83e9478030ae4dc9f6f260b9c9b Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/_objects.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/_proxy.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/_proxy.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fba7a489e79fec025f59ae73a0d83ad22861f5e9 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/_proxy.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/_selector.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/_selector.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..474594498384c5adb9fcb5a09671eb0cb666c589 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/_selector.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/defs.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/defs.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..6538dea21618700b0b1392a3000c3bcc9fb7247d Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/defs.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..97fef1409717c33e43766aced9bc1376cd880a29 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5a.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5a.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..4365f6186f30e2fec122c77793e448c9036e34f3 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5a.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5ac.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5ac.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..acb942127307bd70cd35e182ca5cbe5cb1bfb5b7 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5ac.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5d.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5d.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..30c51b85aaf0568eb6b966e54a0926b37189d19c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5d.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5ds.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5ds.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..97a91addcc65f8edd11dba0d607243ef0f9a3ea6 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5ds.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5f.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5f.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0056660de916868d47b8e207d8ed3b669de52879 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5f.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5fd.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5fd.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e0d89d4c6e42c7ea370ec084726481e517702a86 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5fd.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5g.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5g.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..cc89ccb7d43aae43b71361322433a954c77e5086 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5g.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5i.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5i.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fbe02c595c4d471193d8942b7be4c28d673e18db Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5i.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5l.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5l.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f4fef19e9d1985a6ce30eee93ef3119ad8e55574 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5l.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5o.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5o.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e11056ac9fe25f0149de1a00eda259a005fe8a7c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5o.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5p.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5p.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9784fabbd3ee5cf7b6d3a0d9eac29cabcc723326 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5p.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5pl.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5pl.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..41eb5faee7e75448fb1b693fbeedda68949f328a Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5pl.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5py_warnings.py b/MLPY/Lib/site-packages/h5py/h5py_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..b863abadae94be24d410241ca47bd55b69439ae6 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/h5py_warnings.py @@ -0,0 +1,21 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + This module contains the warning classes for h5py. These classes are part of + the public API of h5py, and should be imported from this module. +""" + + +class H5pyWarning(UserWarning): + pass + + +class H5pyDeprecationWarning(H5pyWarning): + pass diff --git a/MLPY/Lib/site-packages/h5py/h5r.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5r.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f15d231588bab8ec09367426c1011fa4dbc64873 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5r.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5s.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5s.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..4974432573d97ee5933a2733ef2e55bdac81688e Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5s.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5t.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5t.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..6c2eabfcb3c1d8c27daf3b6ab5e57e2d68eb1d00 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5t.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/h5z.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/h5z.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..73f5ae2eebae060f4a54f09a9b70cdae7ba6f0a7 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/h5z.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/hdf5.dll b/MLPY/Lib/site-packages/h5py/hdf5.dll new file mode 100644 index 0000000000000000000000000000000000000000..8895336a2d2fb7124e28d64508b7b926e692bd72 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/hdf5.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a01cbfa8613100726e8d92774b8b41dc95da0c14148becf7bfdf2e630ebe580a +size 3458560 diff --git a/MLPY/Lib/site-packages/h5py/hdf5_hl.dll b/MLPY/Lib/site-packages/h5py/hdf5_hl.dll new file mode 100644 index 0000000000000000000000000000000000000000..1db7a56ce8855bf8cc9eec88e1820183cac48675 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/hdf5_hl.dll differ diff --git a/MLPY/Lib/site-packages/h5py/ipy_completer.py b/MLPY/Lib/site-packages/h5py/ipy_completer.py new file mode 100644 index 0000000000000000000000000000000000000000..0e8848c2293a8ae83d3fabb17f6ab8d0c43f4c08 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/ipy_completer.py @@ -0,0 +1,156 @@ +#+ +# +# This file is part of h5py, a low-level Python interface to the HDF5 library. +# +# Contributed by Darren Dale +# +# Copyright (C) 2009 Darren Dale +# +# http://h5py.org +# License: BSD (See LICENSE.txt for full license) +# +#- + +# pylint: disable=eval-used,protected-access + +""" + This is the h5py completer extension for ipython. It is loaded by + calling the function h5py.enable_ipython_completer() from within an + interactive IPython session. + + It will let you do things like:: + + f=File('foo.h5') + f[' + # or: + f['ite + + which will do tab completion based on the subgroups of `f`. Also:: + + f['item1'].at + + will perform tab completion for the attributes in the usual way. This should + also work:: + + a = b = f['item1'].attrs. + + as should:: + + f['item1/item2/it +""" + +import posixpath +import re +from ._hl.attrs import AttributeManager +from ._hl.base import HLObject + + +from IPython import get_ipython + +from IPython.core.error import TryNext +from IPython.utils import generics + +re_attr_match = re.compile(r"(?:.*\=)?(.+\[.*\].*)\.(\w*)$") +re_item_match = re.compile(r"""(?:.*\=)?(.*)\[(?P['|"])(?!.*(?P=s))(.*)$""") +re_object_match = re.compile(r"(?:.*\=)?(.+?)(?:\[)") + + +def _retrieve_obj(name, context): + """ Filter function for completion. """ + + # we don't want to call any functions, but I couldn't find a robust regex + # that filtered them without unintended side effects. So keys containing + # "(" will not complete. + + if '(' in name: + raise ValueError() + + return eval(name, context.user_ns) + + +def h5py_item_completer(context, command): + """Compute possible item matches for dict-like objects""" + + base, item = re_item_match.split(command)[1:4:2] + + try: + obj = _retrieve_obj(base, context) + except Exception: + return [] + + path, _ = posixpath.split(item) + + try: + if path: + items = (posixpath.join(path, name) for name in obj[path].keys()) + else: + items = obj.keys() + except AttributeError: + return [] + + items = list(items) + + return [i for i in items if i[:len(item)] == item] + + +def h5py_attr_completer(context, command): + """Compute possible attr matches for nested dict-like objects""" + + base, attr = re_attr_match.split(command)[1:3] + base = base.strip() + + try: + obj = _retrieve_obj(base, context) + except Exception: + return [] + + attrs = dir(obj) + try: + attrs = generics.complete_object(obj, attrs) + except TryNext: + pass + + try: + # support >=ipython-0.12 + omit__names = get_ipython().Completer.omit__names + except AttributeError: + omit__names = 0 + + if omit__names == 1: + attrs = [a for a in attrs if not a.startswith('__')] + elif omit__names == 2: + attrs = [a for a in attrs if not a.startswith('_')] + + return ["%s.%s" % (base, a) for a in attrs if a[:len(attr)] == attr] + + +def h5py_completer(self, event): + """ Completer function to be loaded into IPython """ + base = re_object_match.split(event.line)[1] + + try: + obj = self._ofind(base).obj + except AttributeError: + obj = self._ofind(base).get('obj') + + if not isinstance(obj, (AttributeManager, HLObject)): + raise TryNext + + try: + return h5py_attr_completer(self, event.line) + except ValueError: + pass + + try: + return h5py_item_completer(self, event.line) + except ValueError: + pass + + return [] + + +def load_ipython_extension(ip=None): + """ Load completer function into IPython """ + if ip is None: + ip = get_ipython() + ip.set_hook('complete_command', h5py_completer, re_key=r"(?:.*\=)?(.+?)\[") diff --git a/MLPY/Lib/site-packages/h5py/tests/__init__.py b/MLPY/Lib/site-packages/h5py/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9598536bd4548e629f66cb8a8c199178099dff72 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/__init__.py @@ -0,0 +1,23 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + + +def run_tests(args=''): + try: + from pytest import main + except ImportError: + print("Tests require pytest, pytest not installed") + return 1 + else: + from shlex import split + from subprocess import call + from sys import executable + cli = [executable, "-m", "pytest", "--pyargs", "h5py"] + cli.extend(split(args)) + return call(cli) diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d74f6cb614e1fe5ff0b42ffc07313ba43800283c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/common.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c60e72bd0c0f4edad15eb729b42a39e61382556 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/common.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/conftest.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/conftest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..379892217ec33481f6aba8a74aea77e904e7e83d Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/conftest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attribute_create.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attribute_create.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e02bedf710838ef6b7c084901bb28c3d6c1b53 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attribute_create.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..960df597970f01f137abc7d870816f8fffe2fe25 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs_data.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs_data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ac631d3952d0a1dcd4c3cc1b116830ee8e5ae22 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_attrs_data.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_base.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be136dfd6f873bcf8dcce9fda14ff4a43e279855 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_big_endian_file.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_big_endian_file.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e4c7fdc7e5b9e0e5b2b9783d0e6af62bc1ac77 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_big_endian_file.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_completions.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_completions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e01e70af3c97814fd2651be8657d4c350f6f6261 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_completions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca10a4dbc78e9143b686a686528ef261b46cc6c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_getitem.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_getitem.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..294ee663974d65f2acb3aaad79ac5edade1541f5 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_getitem.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_swmr.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_swmr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8fa5f5002f44f1b51b926230efaedbc5b4e23fd Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dataset_swmr.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_datatype.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_datatype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14066d8ada37c013ac0d957100d5f50a26c1428d Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_datatype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dimension_scales.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dimension_scales.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..493c2b6ee9c5e4bdbdc379cf78773e05b0594c71 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dimension_scales.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dims_dimensionproxy.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dims_dimensionproxy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e01de80c43e000668cf28cce837077f3dd42146a Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dims_dimensionproxy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dtype.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65183673e58ba9c9d96dffd3fcd15fab77a7b107 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_errors.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_errors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7f4e94fd01e39a609cd90dec382ce3689b284d1 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_errors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcf9b9d092c56c71a8b823c07272250a8b3116f5 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file2.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87b2aa57593e408ef00b0f5087163d05845f64ef Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_alignment.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_alignment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..930b2d865c3efbe5d2d9dd568ee736d69dc59469 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_alignment.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_image.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_image.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc054ca1d49d7363772ee2844d07fe1ad15b50d Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_file_image.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_filters.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe06baaf38ccd75e8a3acd3587ce0e92b0e853d7 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_group.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_group.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1ad66d8c3c655034f1c4cddc5cbfc3b9f091b64 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_group.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61cecc72746991357259390867b16e98c4264607 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5d_direct_chunk.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5d_direct_chunk.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba7dff10e7b141f8d4e89bf7d3be8c222a9d2a69 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5d_direct_chunk.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5f.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5f.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9646cffe476c90e20f1be04c7e87617a6c6b8b4c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5f.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5o.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5o.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd81db046a62854920884dadc6a4a41fc571728b Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5o.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5p.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5p.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4710d5d5790e037c640c7289fd0e65944cd94d17 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5p.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5pl.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5pl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d7c18cce3b87c49b4325ea8b3414db87d6aa130 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5pl.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5t.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5t.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc67a40ee0c348217a3665792725b091f610c14e Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5t.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5z.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5z.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55811e8ed8c98ee55a08ff216a4f0193f721a514 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_h5z.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_objects.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_objects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5828a2d6b0cd9629f7ca38b91b309ec4ea8087d5 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_objects.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_ros3.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_ros3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df7a8527c2afddfde5b1105b948f49c417b83377 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_ros3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_selections.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_selections.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7015ad5f4d1bf604366b8a604f8f7cba9319d088 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_selections.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_slicing.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_slicing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f29a37ffa52a35d13dc3b3376b4d083bcfcbb3c Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/__pycache__/test_slicing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/common.py b/MLPY/Lib/site-packages/h5py/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b38ab53d1575b6cf5c804c5e791c72b500adbd81 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/common.py @@ -0,0 +1,238 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import sys +import os +import shutil +import inspect +import tempfile +import subprocess +from contextlib import contextmanager +from functools import wraps + +import numpy as np +from numpy.lib.recfunctions import repack_fields +import h5py + +import unittest as ut + + +# Check if non-ascii filenames are supported +# Evidently this is the most reliable way to check +# See also h5py issue #263 and ipython #466 +# To test for this, run the testsuite with LC_ALL=C +try: + testfile, fname = tempfile.mkstemp(chr(0x03b7)) +except UnicodeError: + UNICODE_FILENAMES = False +else: + UNICODE_FILENAMES = True + os.close(testfile) + os.unlink(fname) + del fname + del testfile + + +class TestCase(ut.TestCase): + + """ + Base class for unit tests. + """ + + @classmethod + def setUpClass(cls): + cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_') + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.tempdir) + + def mktemp(self, suffix='.hdf5', prefix='', dir=None): + if dir is None: + dir = self.tempdir + return tempfile.mktemp(suffix, prefix, dir=dir) + + def mktemp_mpi(self, comm=None, suffix='.hdf5', prefix='', dir=None): + if comm is None: + from mpi4py import MPI + comm = MPI.COMM_WORLD + fname = None + if comm.Get_rank() == 0: + fname = self.mktemp(suffix, prefix, dir) + fname = comm.bcast(fname, 0) + return fname + + def setUp(self): + self.f = h5py.File(self.mktemp(), 'w') + + def tearDown(self): + try: + if self.f: + self.f.close() + except: + pass + + def assertSameElements(self, a, b): + for x in a: + match = False + for y in b: + if x == y: + match = True + if not match: + raise AssertionError("Item '%s' appears in a but not b" % x) + + for x in b: + match = False + for y in a: + if x == y: + match = True + if not match: + raise AssertionError("Item '%s' appears in b but not a" % x) + + def assertArrayEqual(self, dset, arr, message=None, precision=None, check_alignment=True): + """ Make sure dset and arr have the same shape, dtype and contents, to + within the given precision, optionally ignoring differences in dtype alignment. + + Note that dset may be a NumPy array or an HDF5 dataset. + """ + if precision is None: + precision = 1e-5 + if message is None: + message = '' + else: + message = ' (%s)' % message + + if np.isscalar(dset) or np.isscalar(arr): + assert np.isscalar(dset) and np.isscalar(arr), \ + 'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message) + dset = np.asarray(dset) + arr = np.asarray(arr) + + assert dset.shape == arr.shape, \ + "Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message) + if dset.dtype != arr.dtype: + if check_alignment: + normalized_dset_dtype = dset.dtype + normalized_arr_dtype = arr.dtype + else: + normalized_dset_dtype = repack_fields(dset.dtype) + normalized_arr_dtype = repack_fields(arr.dtype) + + assert normalized_dset_dtype == normalized_arr_dtype, \ + "Dtype mismatch (%s vs %s)%s" % (normalized_dset_dtype, normalized_arr_dtype, message) + + if not check_alignment: + if normalized_dset_dtype != dset.dtype: + dset = repack_fields(np.asarray(dset)) + if normalized_arr_dtype != arr.dtype: + arr = repack_fields(np.asarray(arr)) + + if arr.dtype.names is not None: + for n in arr.dtype.names: + message = '[FIELD %s] %s' % (n, message) + self.assertArrayEqual(dset[n], arr[n], message=message, precision=precision, check_alignment=check_alignment) + elif arr.dtype.kind in ('i', 'f'): + assert np.all(np.abs(dset[...] - arr[...]) < precision), \ + "Arrays differ by more than %.3f%s" % (precision, message) + elif arr.dtype.kind == 'O': + for v1, v2 in zip(dset.flat, arr.flat): + self.assertArrayEqual(v1, v2, message=message, precision=precision, check_alignment=check_alignment) + else: + assert np.all(dset[...] == arr[...]), \ + "Arrays are not equal (dtype %s) %s" % (arr.dtype.str, message) + + def assertNumpyBehavior(self, dset, arr, s, skip_fast_reader=False): + """ Apply slicing arguments "s" to both dset and arr. + + Succeeds if the results of the slicing are identical, or the + exception raised is of the same type for both. + + "arr" must be a Numpy array; "dset" may be a NumPy array or dataset. + """ + exc = None + try: + arr_result = arr[s] + except Exception as e: + exc = type(e) + + s_fast = s if isinstance(s, tuple) else (s,) + + if exc is None: + self.assertArrayEqual(dset[s], arr_result) + + if not skip_fast_reader: + self.assertArrayEqual( + dset._fast_reader.read(s_fast), + arr_result, + ) + else: + with self.assertRaises(exc): + dset[s] + + if not skip_fast_reader: + with self.assertRaises(exc): + dset._fast_reader.read(s_fast) + +NUMPY_RELEASE_VERSION = tuple([int(i) for i in np.__version__.split(".")[0:2]]) + +@contextmanager +def closed_tempfile(suffix='', text=None): + """ + Context manager which yields the path to a closed temporary file with the + suffix `suffix`. The file will be deleted on exiting the context. An + additional argument `text` can be provided to have the file contain `text`. + """ + with tempfile.NamedTemporaryFile( + 'w+t', suffix=suffix, delete=False + ) as test_file: + file_name = test_file.name + if text is not None: + test_file.write(text) + test_file.flush() + yield file_name + shutil.rmtree(file_name, ignore_errors=True) + + +def insubprocess(f): + """Runs a test in its own subprocess""" + @wraps(f) + def wrapper(request, *args, **kwargs): + curr_test = inspect.getsourcefile(f) + "::" + request.node.name + # get block around test name + insub = "IN_SUBPROCESS_" + curr_test + for c in "/\\,:.": + insub = insub.replace(c, "_") + defined = os.environ.get(insub, None) + # fork process + if defined: + return f(request, *args, **kwargs) + else: + os.environ[insub] = '1' + env = os.environ.copy() + env[insub] = '1' + env.update(getattr(f, 'subproc_env', {})) + + with closed_tempfile() as stdout: + with open(stdout, 'w+t') as fh: + rtn = subprocess.call([sys.executable, '-m', 'pytest', curr_test], + stdout=fh, stderr=fh, env=env) + with open(stdout, 'rt') as fh: + out = fh.read() + + assert rtn == 0, "\n" + out + return wrapper + + +def subproc_env(d): + """Set environment variables for the @insubprocess decorator""" + def decorator(f): + f.subproc_env = d + return f + + return decorator diff --git a/MLPY/Lib/site-packages/h5py/tests/conftest.py b/MLPY/Lib/site-packages/h5py/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..2be67d7f02451c486cabce1245928f07b81f5014 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/conftest.py @@ -0,0 +1,22 @@ +import h5py +import pytest + + +@pytest.fixture() +def writable_file(tmp_path): + with h5py.File(tmp_path / 'test.h5', 'w') as f: + yield f + + +def pytest_addoption(parser): + parser.addoption( + '--no-network', action='store_true', default=False, help='No network access' + ) + + +def pytest_collection_modifyitems(config, items): + if config.getoption('--no-network'): + nonet = pytest.mark.skip(reason='No Internet') + for item in items: + if 'nonetwork' in item.keywords: + item.add_marker(nonet) diff --git a/MLPY/Lib/site-packages/h5py/tests/data_files/__init__.py b/MLPY/Lib/site-packages/h5py/tests/data_files/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..597d2fefa3f6bb6eb8d16a0ef836fdd2e21d6ad4 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/data_files/__init__.py @@ -0,0 +1,7 @@ +from os.path import dirname, join + +def get_data_file_path(basename): + """ + Returns the path to the test data file given by `basename` + """ + return join(dirname(__file__), basename) diff --git a/MLPY/Lib/site-packages/h5py/tests/data_files/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/data_files/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dca042387887e82a7db7420449c8ac11c0241080 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/data_files/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset.h5 b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset.h5 new file mode 100644 index 0000000000000000000000000000000000000000..dbf021d4c80f66b6343fead92e5d7a6ea8ab647e --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:900f8baf19d3d8c4d1193ac1ac6019ee7774005e85c030af44e6757b38d29793 +size 6304 diff --git a/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset_utc.h5 b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset_utc.h5 new file mode 100644 index 0000000000000000000000000000000000000000..ca72c33de37d93f49b7121526b855f52bd927c47 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_dset_utc.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b728382b833c1da61627b9a334e22822d5c1cb359fe3ba6f25262af4532f63 +size 169904 diff --git a/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_s390x.h5 b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_s390x.h5 new file mode 100644 index 0000000000000000000000000000000000000000..bca4b91ad9029e3d0f0118aff12c28094edc95af --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/data_files/vlen_string_s390x.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea990c68e0371bafab4966f752fa1355b3b9f64360066f6d9daff8f1b7e74ca5 +size 9008 diff --git a/MLPY/Lib/site-packages/h5py/tests/test_attribute_create.py b/MLPY/Lib/site-packages/h5py/tests/test_attribute_create.py new file mode 100644 index 0000000000000000000000000000000000000000..48169776cdb7d5315f1ecc954fc642196a2edfca --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_attribute_create.py @@ -0,0 +1,95 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Tests the h5py.AttributeManager.create() method. +""" + +import numpy as np +from .. import h5t, h5a + +from .common import ut, TestCase + +class TestArray(TestCase): + + """ + Check that top-level array types can be created and read. + """ + + def test_int(self): + # See issue 498 + + dt = np.dtype('(3,)i') + data = np.arange(3, dtype='i') + + self.f.attrs.create('x', data=data, dtype=dt) + + aid = h5a.open(self.f.id, b'x') + + htype = aid.get_type() + self.assertEqual(htype.get_class(), h5t.ARRAY) + + out = self.f.attrs['x'] + + self.assertArrayEqual(out, data) + + def test_string_dtype(self): + # See issue 498 discussion + + self.f.attrs.create('x', data=42, dtype='i8') + + def test_str(self): + # See issue 1057 + self.f.attrs.create('x', chr(0x03A9)) + out = self.f.attrs['x'] + self.assertEqual(out, chr(0x03A9)) + self.assertIsInstance(out, str) + + def test_tuple_of_unicode(self): + # Test that a tuple of unicode strings can be set as an attribute. It will + # be converted to a numpy array of vlen unicode type: + data = ('a', 'b') + self.f.attrs.create('x', data=data) + result = self.f.attrs['x'] + self.assertTrue(all(result == data)) + self.assertEqual(result.dtype, np.dtype('O')) + + # However, a numpy array of type U being passed in will not be + # automatically converted, and should raise an error as it does + # not map to a h5py dtype + data_as_U_array = np.array(data) + self.assertEqual(data_as_U_array.dtype, np.dtype('U1')) + with self.assertRaises(TypeError): + self.f.attrs.create('y', data=data_as_U_array) + + def test_shape(self): + self.f.attrs.create('x', data=42, shape=1) + result = self.f.attrs['x'] + self.assertEqual(result.shape, (1,)) + + self.f.attrs.create('y', data=np.arange(3), shape=3) + result = self.f.attrs['y'] + self.assertEqual(result.shape, (3,)) + + def test_dtype(self): + dt = np.dtype('(3,)i') + array = np.arange(3, dtype='i') + self.f.attrs.create('x', data=array, dtype=dt) + # Array dtype shape is incompatible with data shape + array = np.arange(4, dtype='i') + with self.assertRaises(ValueError): + self.f.attrs.create('x', data=array, dtype=dt) + # Shape of new attribute conflicts with shape of data + dt = np.dtype('()i') + with self.assertRaises(ValueError): + self.f.attrs.create('x', data=array, shape=(5,), dtype=dt) + + def test_key_type(self): + with self.assertRaises(TypeError): + self.f.attrs.create(1, data=('a', 'b')) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_attrs.py b/MLPY/Lib/site-packages/h5py/tests/test_attrs.py new file mode 100644 index 0000000000000000000000000000000000000000..7e7c9f5a620e4ad764b722c32dc8d725f3ccff5d --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_attrs.py @@ -0,0 +1,301 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Attributes testing module + + Covers all operations which access the .attrs property, with the + exception of data read/write and type conversion. Those operations + are tested by module test_attrs_data. +""" + +import numpy as np + +from collections.abc import MutableMapping + +from .common import TestCase, ut + +import h5py +from h5py import File +from h5py import h5a, h5t +from h5py import AttributeManager + + +class BaseAttrs(TestCase): + + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + +class TestRepr(TestCase): + + """ Feature: AttributeManager provide a helpful + __repr__ string + """ + + def test_repr(self): + grp = self.f.create_group('grp') + grp.attrs.create('att', 1) + self.assertIsInstance(repr(grp.attrs), str) + grp.id.close() + self.assertIsInstance(repr(grp.attrs), str) + + +class TestAccess(BaseAttrs): + + """ + Feature: Attribute creation/retrieval via special methods + """ + + def test_create(self): + """ Attribute creation by direct assignment """ + self.f.attrs['a'] = 4.0 + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 4.0) + + def test_create_2(self): + """ Attribute creation by create() method """ + self.f.attrs.create('a', 4.0) + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 4.0) + + def test_modify(self): + """ Attributes are modified by direct assignment""" + self.f.attrs['a'] = 3 + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 3) + self.f.attrs['a'] = 4 + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 4) + + def test_modify_2(self): + """ Attributes are modified by modify() method """ + self.f.attrs.modify('a',3) + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 3) + + self.f.attrs.modify('a', 4) + self.assertEqual(list(self.f.attrs.keys()), ['a']) + self.assertEqual(self.f.attrs['a'], 4) + + # If the attribute doesn't exist, create new + self.f.attrs.modify('b', 5) + self.assertEqual(list(self.f.attrs.keys()), ['a', 'b']) + self.assertEqual(self.f.attrs['a'], 4) + self.assertEqual(self.f.attrs['b'], 5) + + # Shape of new value is incompatible with the previous + new_value = np.arange(5) + with self.assertRaises(TypeError): + self.f.attrs.modify('b', new_value) + + def test_overwrite(self): + """ Attributes are silently overwritten """ + self.f.attrs['a'] = 4.0 + self.f.attrs['a'] = 5.0 + self.assertEqual(self.f.attrs['a'], 5.0) + + def test_rank(self): + """ Attribute rank is preserved """ + self.f.attrs['a'] = (4.0, 5.0) + self.assertEqual(self.f.attrs['a'].shape, (2,)) + self.assertArrayEqual(self.f.attrs['a'], np.array((4.0,5.0))) + + def test_single(self): + """ Attributes of shape (1,) don't become scalars """ + self.f.attrs['a'] = np.ones((1,)) + out = self.f.attrs['a'] + self.assertEqual(out.shape, (1,)) + self.assertEqual(out[()], 1) + + def test_access_exc(self): + """ Attempt to access missing item raises KeyError """ + with self.assertRaises(KeyError): + self.f.attrs['a'] + + def test_get_id(self): + self.f.attrs['a'] = 4.0 + aid = self.f.attrs.get_id('a') + assert isinstance(aid, h5a.AttrID) + + with self.assertRaises(KeyError): + self.f.attrs.get_id('b') + +class TestDelete(BaseAttrs): + + """ + Feature: Deletion of attributes using __delitem__ + """ + + def test_delete(self): + """ Deletion via "del" """ + self.f.attrs['a'] = 4.0 + self.assertIn('a', self.f.attrs) + del self.f.attrs['a'] + self.assertNotIn('a', self.f.attrs) + + def test_delete_exc(self): + """ Attempt to delete missing item raises KeyError """ + with self.assertRaises(KeyError): + del self.f.attrs['a'] + + +class TestUnicode(BaseAttrs): + + """ + Feature: Attributes can be accessed via Unicode or byte strings + """ + + def test_ascii(self): + """ Access via pure-ASCII byte string """ + self.f.attrs[b"ascii"] = 42 + out = self.f.attrs[b"ascii"] + self.assertEqual(out, 42) + + def test_raw(self): + """ Access via non-ASCII byte string """ + name = b"non-ascii\xfe" + self.f.attrs[name] = 42 + out = self.f.attrs[name] + self.assertEqual(out, 42) + + def test_unicode(self): + """ Access via Unicode string with non-ascii characters """ + name = "Omega" + chr(0x03A9) + self.f.attrs[name] = 42 + out = self.f.attrs[name] + self.assertEqual(out, 42) + + +class TestCreate(BaseAttrs): + + """ + Options for explicit attribute creation + """ + + def test_named(self): + """ Attributes created from named types link to the source type object + """ + self.f['type'] = np.dtype('u8') + self.f.attrs.create('x', 42, dtype=self.f['type']) + self.assertEqual(self.f.attrs['x'], 42) + aid = h5a.open(self.f.id, b'x') + htype = aid.get_type() + htype2 = self.f['type'].id + self.assertEqual(htype, htype2) + self.assertTrue(htype.committed()) + + def test_empty(self): + # https://github.com/h5py/h5py/issues/1540 + """ Create attribute with h5py.Empty value + """ + self.f.attrs.create('empty', h5py.Empty('f')) + self.assertEqual(self.f.attrs['empty'], h5py.Empty('f')) + + self.f.attrs.create('empty', h5py.Empty(None)) + self.assertEqual(self.f.attrs['empty'], h5py.Empty(None)) + +class TestMutableMapping(BaseAttrs): + '''Tests if the registration of AttributeManager as a MutableMapping + behaves as expected + ''' + def test_resolution(self): + assert issubclass(AttributeManager, MutableMapping) + assert isinstance(self.f.attrs, MutableMapping) + + def test_validity(self): + ''' + Test that the required functions are implemented. + ''' + AttributeManager.__getitem__ + AttributeManager.__setitem__ + AttributeManager.__delitem__ + AttributeManager.__iter__ + AttributeManager.__len__ + +class TestVlen(BaseAttrs): + def test_vlen(self): + a = np.array([np.arange(3), np.arange(4)], + dtype=h5t.vlen_dtype(int)) + self.f.attrs['a'] = a + self.assertArrayEqual(self.f.attrs['a'][0], a[0]) + + def test_vlen_s1(self): + dt = h5py.vlen_dtype(np.dtype('S1')) + a = np.empty((1,), dtype=dt) + a[0] = np.array([b'a', b'b'], dtype='S1') + + self.f.attrs.create('test', a) + self.assertArrayEqual(self.f.attrs['test'][0], a[0]) + + +class TestTrackOrder(BaseAttrs): + def fill_attrs(self, track_order): + attrs = self.f.create_group('test', track_order=track_order).attrs + for i in range(100): + attrs[str(i)] = i + return attrs + + @ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 10, 6), 'HDF5 1.10.6 required') + # https://forum.hdfgroup.org/t/bug-h5arename-fails-unexpectedly/4881 + def test_track_order(self): + attrs = self.fill_attrs(track_order=True) # creation order + self.assertEqual(list(attrs), + [str(i) for i in range(100)]) + + def test_no_track_order(self): + attrs = self.fill_attrs(track_order=False) # name alphanumeric + self.assertEqual(list(attrs), + sorted([str(i) for i in range(100)])) + + def fill_attrs2(self, track_order): + group = self.f.create_group('test', track_order=track_order) + for i in range(12): + group.attrs[str(i)] = i + return group + + @ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 10, 6), 'HDF5 1.10.6 required') + def test_track_order_overwrite_delete(self): + # issue 1385 + group = self.fill_attrs2(track_order=True) # creation order + self.assertEqual(group.attrs["11"], 11) + # overwrite attribute + group.attrs['11'] = 42.0 + self.assertEqual(group.attrs["11"], 42.0) + # delete attribute + self.assertIn('10', group.attrs) + del group.attrs['10'] + self.assertNotIn('10', group.attrs) + + +class TestDatatype(BaseAttrs): + + def test_datatype(self): + self.f['foo'] = np.dtype('f') + dt = self.f['foo'] + self.assertEqual(list(dt.attrs.keys()), []) + dt.attrs.create('a', 4.0) + self.assertEqual(list(dt.attrs.keys()), ['a']) + self.assertEqual(list(dt.attrs.values()), [4.0]) + +def test_python_int_uint64(writable_file): + f = writable_file + data = [np.iinfo(np.int64).max, np.iinfo(np.int64).max + 1] + + # Check creating a new attribute + f.attrs.create('a', data, dtype=np.uint64) + assert f.attrs['a'].dtype == np.dtype(np.uint64) + np.testing.assert_array_equal(f.attrs['a'], np.array(data, dtype=np.uint64)) + + # Check modifying an existing attribute + f.attrs.modify('a', data) + np.testing.assert_array_equal(f.attrs['a'], np.array(data, dtype=np.uint64)) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_attrs_data.py b/MLPY/Lib/site-packages/h5py/tests/test_attrs_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8156b0bac5b394c46b191a02b790a66a11f79742 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_attrs_data.py @@ -0,0 +1,311 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Attribute data transfer testing module + + Covers all data read/write and type-conversion operations for attributes. +""" + +import numpy as np + +from .common import TestCase, ut + +import h5py +from h5py import h5a, h5s, h5t +from h5py import File +from h5py._hl.base import is_empty_dataspace + + +class BaseAttrs(TestCase): + + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + + +class TestScalar(BaseAttrs): + + """ + Feature: Scalar types map correctly to array scalars + """ + + def test_int(self): + """ Integers are read as correct NumPy type """ + self.f.attrs['x'] = np.array(1, dtype=np.int8) + out = self.f.attrs['x'] + self.assertIsInstance(out, np.int8) + + def test_compound(self): + """ Compound scalars are read as numpy.void """ + dt = np.dtype([('a', 'i'), ('b', 'f')]) + data = np.array((1, 4.2), dtype=dt) + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertIsInstance(out, np.void) + self.assertEqual(out, data) + self.assertEqual(out['b'], data['b']) + + def test_compound_with_vlen_fields(self): + """ Compound scalars with vlen fields can be written and read """ + dt = np.dtype([('a', h5py.vlen_dtype(np.int32)), + ('b', h5py.vlen_dtype(np.int32))]) + + data = np.array((np.array(list(range(1, 5)), dtype=np.int32), + np.array(list(range(8, 10)), dtype=np.int32)), dtype=dt)[()] + + self.f.attrs['x'] = data + out = self.f.attrs['x'] + + # Specifying check_alignment=False because vlen fields have 8 bytes of padding + # because the vlen datatype in hdf5 occupies 16 bytes + self.assertArrayEqual(out, data, check_alignment=False) + + def test_nesting_compound_with_vlen_fields(self): + """ Compound scalars with nested compound vlen fields can be written and read """ + dt_inner = np.dtype([('a', h5py.vlen_dtype(np.int32)), + ('b', h5py.vlen_dtype(np.int32))]) + + dt = np.dtype([('f1', h5py.vlen_dtype(dt_inner)), + ('f2', np.int64)]) + + inner1 = (np.array(range(1, 3), dtype=np.int32), + np.array(range(6, 9), dtype=np.int32)) + + inner2 = (np.array(range(10, 14), dtype=np.int32), + np.array(range(16, 20), dtype=np.int32)) + + data = np.array((np.array([inner1, inner2], dtype=dt_inner), + 2), + dtype=dt)[()] + + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertArrayEqual(out, data, check_alignment=False) + + def test_vlen_compound_with_vlen_string(self): + """ Compound scalars with vlen compounds containing vlen strings can be written and read """ + dt_inner = np.dtype([('a', h5py.string_dtype()), + ('b', h5py.string_dtype())]) + + dt = np.dtype([('f', h5py.vlen_dtype(dt_inner))]) + + data = np.array((np.array([(b"apples", b"bananas"), (b"peaches", b"oranges")], dtype=dt_inner),),dtype=dt)[()] + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertArrayEqual(out, data, check_alignment=False) + + +class TestArray(BaseAttrs): + + """ + Feature: Non-scalar types are correctly retrieved as ndarrays + """ + + def test_single(self): + """ Single-element arrays are correctly recovered """ + data = np.ndarray((1,), dtype='f') + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, (1,)) + + def test_multi(self): + """ Rank-1 arrays are correctly recovered """ + data = np.ndarray((42,), dtype='f') + data[:] = 42.0 + data[10:35] = -47.0 + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, (42,)) + self.assertArrayEqual(out, data) + + +class TestTypes(BaseAttrs): + + """ + Feature: All supported types can be stored in attributes + """ + + def test_int(self): + """ Storage of integer types """ + dtypes = (np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) + for dt in dtypes: + data = np.ndarray((1,), dtype=dt) + data[...] = 42 + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertEqual(out.dtype, dt) + self.assertArrayEqual(out, data) + + def test_float(self): + """ Storage of floating point types """ + dtypes = tuple(np.dtype(x) for x in ('f4', '>f8', 'c8', 'c16')) + + for dt in dtypes: + data = np.ndarray((1,), dtype=dt) + data[...] = -4.2j + 35.9 + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertEqual(out.dtype, dt) + self.assertArrayEqual(out, data) + + def test_string(self): + """ Storage of fixed-length strings """ + dtypes = tuple(np.dtype(x) for x in ('|S1', '|S10')) + + for dt in dtypes: + data = np.ndarray((1,), dtype=dt) + data[...] = 'h' + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertEqual(out.dtype, dt) + self.assertEqual(out[0], data[0]) + + def test_bool(self): + """ Storage of NumPy booleans """ + + data = np.ndarray((2,), dtype=np.bool_) + data[...] = True, False + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertEqual(out.dtype, data.dtype) + self.assertEqual(out[0], data[0]) + self.assertEqual(out[1], data[1]) + + def test_vlen_string_array(self): + """ Storage of vlen byte string arrays""" + dt = h5py.string_dtype(encoding='ascii') + + data = np.ndarray((2,), dtype=dt) + data[...] = "Hello", "Hi there! This is HDF5!" + + self.f.attrs['x'] = data + out = self.f.attrs['x'] + self.assertEqual(out.dtype, dt) + self.assertEqual(out[0], data[0]) + self.assertEqual(out[1], data[1]) + + def test_string_scalar(self): + """ Storage of variable-length byte string scalars (auto-creation) """ + + self.f.attrs['x'] = b'Hello' + out = self.f.attrs['x'] + + self.assertEqual(out, 'Hello') + self.assertEqual(type(out), str) + + aid = h5py.h5a.open(self.f.id, b"x") + tid = aid.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII) + self.assertTrue(tid.is_variable_str()) + + def test_unicode_scalar(self): + """ Storage of variable-length unicode strings (auto-creation) """ + + self.f.attrs['x'] = u"Hello" + chr(0x2340) + u"!!" + out = self.f.attrs['x'] + self.assertEqual(out, u"Hello" + chr(0x2340) + u"!!") + self.assertEqual(type(out), str) + + aid = h5py.h5a.open(self.f.id, b"x") + tid = aid.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8) + self.assertTrue(tid.is_variable_str()) + + +class TestEmpty(BaseAttrs): + + def setUp(self): + BaseAttrs.setUp(self) + sid = h5s.create(h5s.NULL) + tid = h5t.C_S1.copy() + tid.set_size(10) + aid = h5a.create(self.f.id, b'x', tid, sid) + self.empty_obj = h5py.Empty(np.dtype("S10")) + + def test_read(self): + self.assertEqual( + self.empty_obj, self.f.attrs['x'] + ) + + def test_write(self): + self.f.attrs["y"] = self.empty_obj + self.assertTrue(is_empty_dataspace(h5a.open(self.f.id, b'y'))) + + def test_modify(self): + with self.assertRaises(OSError): + self.f.attrs.modify('x', 1) + + def test_values(self): + # list() is for Py3 where these are iterators + values = list(self.f.attrs.values()) + self.assertEqual( + [self.empty_obj], values + ) + + def test_items(self): + items = list(self.f.attrs.items()) + self.assertEqual( + [(u"x", self.empty_obj)], items + ) + + def test_itervalues(self): + values = list(self.f.attrs.values()) + self.assertEqual( + [self.empty_obj], values + ) + + def test_iteritems(self): + items = list(self.f.attrs.items()) + self.assertEqual( + [(u"x", self.empty_obj)], items + ) + + +class TestWriteException(BaseAttrs): + + """ + Ensure failed attribute writes don't leave garbage behind. + """ + + def test_write(self): + """ ValueError on string write wipes out attribute """ + + s = b"Hello\x00Hello" + + try: + self.f.attrs['x'] = s + except ValueError: + pass + + with self.assertRaises(KeyError): + self.f.attrs['x'] diff --git a/MLPY/Lib/site-packages/h5py/tests/test_base.py b/MLPY/Lib/site-packages/h5py/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..b606e4ed10282ea922495f9ab8aa169add19f703 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_base.py @@ -0,0 +1,146 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Common high-level operations test + + Tests features common to all high-level objects, like the .name property. +""" + +from h5py import File +from h5py._hl.base import is_hdf5, Empty +from .common import ut, TestCase, UNICODE_FILENAMES + +import numpy as np +import os +import tempfile + +class BaseTest(TestCase): + + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + + +class TestName(BaseTest): + + """ + Feature: .name attribute returns the object name + """ + + def test_anonymous(self): + """ Anonymous objects have name None """ + grp = self.f.create_group(None) + self.assertIs(grp.name, None) + +class TestParent(BaseTest): + + """ + test the parent group of the high-level interface objects + """ + + def test_object_parent(self): + # Anonymous objects + grp = self.f.create_group(None) + # Parent of an anonymous object is undefined + with self.assertRaises(ValueError): + grp.parent + + # Named objects + grp = self.f.create_group("bar") + sub_grp = grp.create_group("foo") + parent = sub_grp.parent.name + self.assertEqual(parent, "/bar") + +class TestMapping(BaseTest): + + """ + Test if the registration of Group as a + Mapping behaves as expected + """ + + def setUp(self): + super().setUp() + data = ('a', 'b') + self.grp = self.f.create_group('bar') + self.attr = self.f.attrs.create('x', data) + + def test_keys(self): + key_1 = self.f.keys() + self.assertIsInstance(repr(key_1), str) + key_2 = self.grp.keys() + self.assertIsInstance(repr(key_2), str) + + def test_values(self): + value_1 = self.f.values() + self.assertIsInstance(repr(value_1), str) + value_2 = self.grp.values() + self.assertIsInstance(repr(value_2), str) + + def test_items(self): + item_1 = self.f.items() + self.assertIsInstance(repr(item_1), str) + item_2 = self.grp.items() + self.assertIsInstance(repr(item_1), str) + + +class TestRepr(BaseTest): + + """ + repr() works correctly with Unicode names + """ + + USTRING = chr(0xfc) + chr(0xdf) + + def _check_type(self, obj): + self.assertIsInstance(repr(obj), str) + + def test_group(self): + """ Group repr() with unicode """ + grp = self.f.create_group(self.USTRING) + self._check_type(grp) + + def test_dataset(self): + """ Dataset repr() with unicode """ + dset = self.f.create_dataset(self.USTRING, (1,)) + self._check_type(dset) + + def test_namedtype(self): + """ Named type repr() with unicode """ + self.f['type'] = np.dtype('f') + typ = self.f['type'] + self._check_type(typ) + + def test_empty(self): + data = Empty(dtype='f') + self.assertNotEqual(Empty(dtype='i'), data) + self._check_type(data) + + @ut.skipIf(not UNICODE_FILENAMES, "Filesystem unicode support required") + def test_file(self): + """ File object repr() with unicode """ + fname = tempfile.mktemp(self.USTRING+'.hdf5') + try: + with File(fname,'w') as f: + self._check_type(f) + finally: + try: + os.unlink(fname) + except Exception: + pass + +def test_is_hdf5(): + filename = File(tempfile.mktemp(), "w").filename + assert is_hdf5(filename) + # non-existing HDF5 file + filename = tempfile.mktemp() + assert not is_hdf5(filename) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_big_endian_file.py b/MLPY/Lib/site-packages/h5py/tests/test_big_endian_file.py new file mode 100644 index 0000000000000000000000000000000000000000..1246738fba9742d403d9a2d22bc08838d60af162 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_big_endian_file.py @@ -0,0 +1,50 @@ +import pytest + +import numpy as np +from h5py import File +from .common import TestCase +from .data_files import get_data_file_path + + +def test_vlen_big_endian(): + with File(get_data_file_path("vlen_string_s390x.h5")) as f: + assert f.attrs["created_on_s390x"] == 1 + + dset = f["DSvariable"] + assert dset[0] == b"Parting" + assert dset[1] == b"is such" + assert dset[2] == b"sweet" + assert dset[3] == b"sorrow..." + + dset = f["DSLEfloat"] + assert dset[0] == 3.14 + assert dset[1] == 1.61 + assert dset[2] == 2.71 + assert dset[3] == 2.41 + assert dset[4] == 1.2 + assert dset.dtype == "f8" + + assert f["DSLEint"][0] == 1 + assert f["DSLEint"].dtype == "i8" + + +class TestEndianess(TestCase): + def test_simple_int_be(self): + fname = self.mktemp() + + arr = np.ndarray(shape=(1,), dtype=">i4", buffer=bytearray([0, 1, 3, 2])) + be_number = 0 * 256 ** 3 + 1 * 256 ** 2 + 3 * 256 ** 1 + 2 * 256 ** 0 + + with File(fname, mode="w") as f: + f.create_dataset("int", data=arr) + + with File(fname, mode="r") as f: + assert f["int"][()][0] == be_number diff --git a/MLPY/Lib/site-packages/h5py/tests/test_completions.py b/MLPY/Lib/site-packages/h5py/tests/test_completions.py new file mode 100644 index 0000000000000000000000000000000000000000..82adde91280b5407d84277f32573f0146193d858 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_completions.py @@ -0,0 +1,52 @@ +from .common import TestCase + + +class TestCompletions(TestCase): + + def test_group_completions(self): + # Test completions on top-level file. + g = self.f.create_group('g') + self.f.create_group('h') + self.f.create_dataset('data', [1, 2, 3]) + self.assertEqual( + self.f._ipython_key_completions_(), + ['data', 'g', 'h'], + ) + + self.f.create_group('data2', [1, 2, 3]) + self.assertEqual( + self.f._ipython_key_completions_(), + ['data', 'data2', 'g', 'h'], + ) + + # Test on subgroup. + g.create_dataset('g_data1', [1, 2, 3]) + g.create_dataset('g_data2', [4, 5, 6]) + self.assertEqual( + g._ipython_key_completions_(), + ['g_data1', 'g_data2'], + ) + + g.create_dataset('g_data3', [7, 8, 9]) + self.assertEqual( + g._ipython_key_completions_(), + ['g_data1', 'g_data2', 'g_data3'], + ) + + def test_attrs_completions(self): + attrs = self.f.attrs + + # Write out of alphabetical order to test that completions come back in + # alphabetical order, as opposed to, say, insertion order. + attrs['b'] = 1 + attrs['a'] = 2 + self.assertEqual( + attrs._ipython_key_completions_(), + ['a', 'b'] + ) + + attrs['c'] = 3 + self.assertEqual( + attrs._ipython_key_completions_(), + ['a', 'b', 'c'] + ) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dataset.py b/MLPY/Lib/site-packages/h5py/tests/test_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..83095f25233ba579240fd967af5caed3aef63d9d --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dataset.py @@ -0,0 +1,2015 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Dataset testing operations. + + Tests all dataset operations, including creation, with the exception of: + + 1. Slicing operations for read and write, handled by module test_slicing + 2. Type conversion for read and write (currently untested) +""" + +import pathlib +import os +import sys +import numpy as np +import platform +import pytest +import warnings + +from .common import ut, TestCase +from .data_files import get_data_file_path +from h5py import File, Group, Dataset +from h5py._hl.base import is_empty_dataspace, product +from h5py import h5f, h5t +from h5py.h5py_warnings import H5pyDeprecationWarning +from h5py import version +import h5py +import h5py._hl.selections as sel + + +class BaseDataset(TestCase): + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + + +class TestRepr(BaseDataset): + """ + Feature: repr(Dataset) behaves sensibly + """ + + def test_repr_open(self): + """ repr() works on live and dead datasets """ + ds = self.f.create_dataset('foo', (4,)) + self.assertIsInstance(repr(ds), str) + self.f.close() + self.assertIsInstance(repr(ds), str) + + +class TestCreateShape(BaseDataset): + + """ + Feature: Datasets can be created from a shape only + """ + + def test_create_scalar(self): + """ Create a scalar dataset """ + dset = self.f.create_dataset('foo', ()) + self.assertEqual(dset.shape, ()) + + def test_create_simple(self): + """ Create a size-1 dataset """ + dset = self.f.create_dataset('foo', (1,)) + self.assertEqual(dset.shape, (1,)) + + def test_create_integer(self): + """ Create a size-1 dataset with integer shape""" + dset = self.f.create_dataset('foo', 1) + self.assertEqual(dset.shape, (1,)) + + def test_create_extended(self): + """ Create an extended dataset """ + dset = self.f.create_dataset('foo', (63,)) + self.assertEqual(dset.shape, (63,)) + self.assertEqual(dset.size, 63) + dset = self.f.create_dataset('bar', (6, 10)) + self.assertEqual(dset.shape, (6, 10)) + self.assertEqual(dset.size, (60)) + + def test_create_integer_extended(self): + """ Create an extended dataset """ + dset = self.f.create_dataset('foo', 63) + self.assertEqual(dset.shape, (63,)) + self.assertEqual(dset.size, 63) + dset = self.f.create_dataset('bar', (6, 10)) + self.assertEqual(dset.shape, (6, 10)) + self.assertEqual(dset.size, (60)) + + def test_default_dtype(self): + """ Confirm that the default dtype is float """ + dset = self.f.create_dataset('foo', (63,)) + self.assertEqual(dset.dtype, np.dtype('=f4')) + + def test_missing_shape(self): + """ Missing shape raises TypeError """ + with self.assertRaises(TypeError): + self.f.create_dataset('foo') + + def test_long_double(self): + """ Confirm that the default dtype is float """ + dset = self.f.create_dataset('foo', (63,), dtype=np.longdouble) + if platform.machine() in ['ppc64le']: + pytest.xfail("Storage of long double deactivated on %s" % platform.machine()) + self.assertEqual(dset.dtype, np.longdouble) + + @ut.skipIf(not hasattr(np, "complex256"), "No support for complex256") + def test_complex256(self): + """ Confirm that the default dtype is float """ + dset = self.f.create_dataset('foo', (63,), + dtype=np.dtype('complex256')) + self.assertEqual(dset.dtype, np.dtype('complex256')) + + def test_name_bytes(self): + dset = self.f.create_dataset(b'foo', (1,)) + self.assertEqual(dset.shape, (1,)) + + dset2 = self.f.create_dataset(b'bar/baz', (2,)) + self.assertEqual(dset2.shape, (2,)) + +class TestCreateData(BaseDataset): + + """ + Feature: Datasets can be created from existing data + """ + + def test_create_scalar(self): + """ Create a scalar dataset from existing array """ + data = np.ones((), 'f') + dset = self.f.create_dataset('foo', data=data) + self.assertEqual(dset.shape, data.shape) + + def test_create_extended(self): + """ Create an extended dataset from existing data """ + data = np.ones((63,), 'f') + dset = self.f.create_dataset('foo', data=data) + self.assertEqual(dset.shape, data.shape) + + def test_dataset_intermediate_group(self): + """ Create dataset with missing intermediate groups """ + ds = self.f.create_dataset("/foo/bar/baz", shape=(10, 10), dtype='= (1,10,0): + efile_prefix = pathlib.Path(dset.id.get_access_plist().get_efile_prefix().decode()).as_posix() + parent = pathlib.Path(ext_file).parent.as_posix() + assert efile_prefix == parent + + dset2 = self.f.require_dataset('foo', shape, testdata.dtype, efile_prefix=os.path.dirname(ext_file)) + assert dset2.external is not None + dset2[()] == testdata + + def test_name_str(self): + """ External argument may be a file name str only """ + + self.f.create_dataset('foo', (6, 100), external=self.mktemp()) + + def test_name_path(self): + """ External argument may be a file name path only """ + + self.f.create_dataset('foo', (6, 100), + external=pathlib.Path(self.mktemp())) + + def test_iter_multi(self): + """ External argument may be an iterable of multiple tuples """ + + ext_file = self.mktemp() + N = 100 + external = iter((ext_file, x * 1000, 1000) for x in range(N)) + dset = self.f.create_dataset('poo', (6, 100), external=external) + assert len(dset.external) == N + + def test_invalid(self): + """ Test with invalid external lists """ + + shape = (6, 100) + ext_file = self.mktemp() + + for exc_type, external in [ + (TypeError, [ext_file]), + (TypeError, [ext_file, 0]), + (TypeError, [ext_file, 0, h5f.UNLIMITED]), + (ValueError, [(ext_file,)]), + (ValueError, [(ext_file, 0)]), + (ValueError, [(ext_file, 0, h5f.UNLIMITED, 0)]), + (TypeError, [(ext_file, 0, "h5f.UNLIMITED")]), + ]: + with self.assertRaises(exc_type): + self.f.create_dataset('foo', shape, external=external) + + def test_create_expandable(self): + """ Create expandable external dataset """ + + ext_file = self.mktemp() + shape = (128, 64) + maxshape = (None, 64) + exp_dset = self.f.create_dataset('foo', shape=shape, maxshape=maxshape, + external=ext_file) + assert exp_dset.chunks is None + assert exp_dset.shape == shape + assert exp_dset.maxshape == maxshape + + +class TestAutoCreate(BaseDataset): + + """ + Feature: Datasets auto-created from data produce the correct types + """ + def assert_string_type(self, ds, cset, variable=True): + tid = ds.id.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertEqual(tid.get_cset(), cset) + if variable: + assert tid.is_variable_str() + + def test_vlen_bytes(self): + """Assigning byte strings produces a vlen string ASCII dataset """ + self.f['x'] = b"Hello there" + self.assert_string_type(self.f['x'], h5py.h5t.CSET_ASCII) + + self.f['y'] = [b"a", b"bc"] + self.assert_string_type(self.f['y'], h5py.h5t.CSET_ASCII) + + self.f['z'] = np.array([b"a", b"bc"], dtype=np.object_) + self.assert_string_type(self.f['z'], h5py.h5t.CSET_ASCII) + + def test_vlen_unicode(self): + """Assigning unicode strings produces a vlen string UTF-8 dataset """ + self.f['x'] = "Hello there" + chr(0x2034) + self.assert_string_type(self.f['x'], h5py.h5t.CSET_UTF8) + + self.f['y'] = ["a", "bc"] + self.assert_string_type(self.f['y'], h5py.h5t.CSET_UTF8) + + # 2D array; this only works with an array, not nested lists + self.f['z'] = np.array([["a", "bc"]], dtype=np.object_) + self.assert_string_type(self.f['z'], h5py.h5t.CSET_UTF8) + + def test_string_fixed(self): + """ Assignment of fixed-length byte string produces a fixed-length + ascii dataset """ + self.f['x'] = np.bytes_("Hello there") + ds = self.f['x'] + self.assert_string_type(ds, h5py.h5t.CSET_ASCII, variable=False) + self.assertEqual(ds.id.get_type().get_size(), 11) + + +class TestCreateLike(BaseDataset): + def test_no_chunks(self): + self.f['lol'] = np.arange(25).reshape(5, 5) + self.f.create_dataset_like('like_lol', self.f['lol']) + dslike = self.f['like_lol'] + self.assertEqual(dslike.shape, (5, 5)) + self.assertIs(dslike.chunks, None) + + def test_track_times(self): + orig = self.f.create_dataset('honda', data=np.arange(12), + track_times=True) + self.assertNotEqual(0, h5py.h5g.get_objinfo(orig._id).mtime) + similar = self.f.create_dataset_like('hyundai', orig) + self.assertNotEqual(0, h5py.h5g.get_objinfo(similar._id).mtime) + + orig = self.f.create_dataset('ibm', data=np.arange(12), + track_times=False) + self.assertEqual(0, h5py.h5g.get_objinfo(orig._id).mtime) + similar = self.f.create_dataset_like('lenovo', orig) + self.assertEqual(0, h5py.h5g.get_objinfo(similar._id).mtime) + + def test_maxshape(self): + """ Test when other.maxshape != other.shape """ + + other = self.f.create_dataset('other', (10,), maxshape=20) + similar = self.f.create_dataset_like('sim', other) + self.assertEqual(similar.shape, (10,)) + self.assertEqual(similar.maxshape, (20,)) + +class TestChunkIterator(BaseDataset): + def test_no_chunks(self): + dset = self.f.create_dataset("foo", ()) + with self.assertRaises(TypeError): + dset.iter_chunks() + + def test_1d(self): + dset = self.f.create_dataset("foo", (100,), chunks=(32,)) + expected = ((slice(0,32,1),), (slice(32,64,1),), (slice(64,96,1),), + (slice(96,100,1),)) + self.assertEqual(list(dset.iter_chunks()), list(expected)) + expected = ((slice(50,64,1),), (slice(64,96,1),), (slice(96,97,1),)) + self.assertEqual(list(dset.iter_chunks(np.s_[50:97])), list(expected)) + + def test_2d(self): + dset = self.f.create_dataset("foo", (100,100), chunks=(32,64)) + expected = ((slice(0, 32, 1), slice(0, 64, 1)), (slice(0, 32, 1), + slice(64, 100, 1)), (slice(32, 64, 1), slice(0, 64, 1)), + (slice(32, 64, 1), slice(64, 100, 1)), (slice(64, 96, 1), + slice(0, 64, 1)), (slice(64, 96, 1), slice(64, 100, 1)), + (slice(96, 100, 1), slice(0, 64, 1)), (slice(96, 100, 1), + slice(64, 100, 1))) + self.assertEqual(list(dset.iter_chunks()), list(expected)) + + expected = ((slice(48, 52, 1), slice(40, 50, 1)),) + self.assertEqual(list(dset.iter_chunks(np.s_[48:52,40:50])), list(expected)) + + def test_2d_partial_slice(self): + dset = self.f.create_dataset("foo", (5,5), chunks=(2,2)) + expected = ((slice(3, 4, 1), slice(3, 4, 1)), + (slice(3, 4, 1), slice(4, 5, 1)), + (slice(4, 5, 1), slice(3, 4, 1)), + (slice(4, 5, 1), slice(4, 5, 1))) + sel = slice(3,5) + self.assertEqual(list(dset.iter_chunks((sel, sel))), list(expected)) + + + +class TestResize(BaseDataset): + + """ + Feature: Datasets created with "maxshape" may be resized + """ + + def test_create(self): + """ Create dataset with "maxshape" """ + dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60)) + self.assertIsNot(dset.chunks, None) + self.assertEqual(dset.maxshape, (20, 60)) + + def test_create_1D(self): + """ Create dataset with "maxshape" using integer maxshape""" + dset = self.f.create_dataset('foo', (20,), maxshape=20) + self.assertIsNot(dset.chunks, None) + self.assertEqual(dset.maxshape, (20,)) + + dset = self.f.create_dataset('bar', 20, maxshape=20) + self.assertEqual(dset.maxshape, (20,)) + + def test_resize(self): + """ Datasets may be resized up to maxshape """ + dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60)) + self.assertEqual(dset.shape, (20, 30)) + dset.resize((20, 50)) + self.assertEqual(dset.shape, (20, 50)) + dset.resize((20, 60)) + self.assertEqual(dset.shape, (20, 60)) + + def test_resize_1D(self): + """ Datasets may be resized up to maxshape using integer maxshape""" + dset = self.f.create_dataset('foo', 20, maxshape=40) + self.assertEqual(dset.shape, (20,)) + dset.resize((30,)) + self.assertEqual(dset.shape, (30,)) + + def test_resize_over(self): + """ Resizing past maxshape triggers an exception """ + dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60)) + with self.assertRaises(Exception): + dset.resize((20, 70)) + + def test_resize_nonchunked(self): + """ Resizing non-chunked dataset raises TypeError """ + dset = self.f.create_dataset("foo", (20, 30)) + with self.assertRaises(TypeError): + dset.resize((20, 60)) + + def test_resize_axis(self): + """ Resize specified axis """ + dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60)) + dset.resize(50, axis=1) + self.assertEqual(dset.shape, (20, 50)) + + def test_axis_exc(self): + """ Illegal axis raises ValueError """ + dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60)) + with self.assertRaises(ValueError): + dset.resize(50, axis=2) + + def test_zero_dim(self): + """ Allow zero-length initial dims for unlimited axes (issue 111) """ + dset = self.f.create_dataset('foo', (15, 0), maxshape=(15, None)) + self.assertEqual(dset.shape, (15, 0)) + self.assertEqual(dset.maxshape, (15, None)) + + +class TestDtype(BaseDataset): + + """ + Feature: Dataset dtype is available as .dtype property + """ + + def test_dtype(self): + """ Retrieve dtype from dataset """ + dset = self.f.create_dataset('foo', (5,), '|S10') + self.assertEqual(dset.dtype, np.dtype('|S10')) + + def test_dtype_complex32(self): + """ Retrieve dtype from complex float16 dataset (gh-2156) """ + # No native support in numpy as of v1.23.3, so expect compound type. + complex32 = np.dtype([('r', np.float16), ('i', np.float16)]) + dset = self.f.create_dataset('foo', (5,), complex32) + self.assertEqual(dset.dtype, complex32) + + +class TestLen(BaseDataset): + + """ + Feature: Size of first axis is available via Python's len + """ + + def test_len(self): + """ Python len() (under 32 bits) """ + dset = self.f.create_dataset('foo', (312, 15)) + self.assertEqual(len(dset), 312) + + def test_len_big(self): + """ Python len() vs Dataset.len() """ + dset = self.f.create_dataset('foo', (2 ** 33, 15)) + self.assertEqual(dset.shape, (2 ** 33, 15)) + if sys.maxsize == 2 ** 31 - 1: + with self.assertRaises(OverflowError): + len(dset) + else: + self.assertEqual(len(dset), 2 ** 33) + self.assertEqual(dset.len(), 2 ** 33) + + +class TestIter(BaseDataset): + + """ + Feature: Iterating over a dataset yields rows + """ + + def test_iter(self): + """ Iterating over a dataset yields rows """ + data = np.arange(30, dtype='f').reshape((10, 3)) + dset = self.f.create_dataset('foo', data=data) + for x, y in zip(dset, data): + self.assertEqual(len(x), 3) + self.assertArrayEqual(x, y) + + def test_iter_scalar(self): + """ Iterating over scalar dataset raises TypeError """ + dset = self.f.create_dataset('foo', shape=()) + with self.assertRaises(TypeError): + [x for x in dset] + + +class TestStrings(BaseDataset): + + """ + Feature: Datasets created with vlen and fixed datatypes correctly + translate to and from HDF5 + """ + + def test_vlen_bytes(self): + """ Vlen bytes dataset maps to vlen ascii in the file """ + dt = h5py.string_dtype(encoding='ascii') + ds = self.f.create_dataset('x', (100,), dtype=dt) + tid = ds.id.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII) + string_info = h5py.check_string_dtype(ds.dtype) + self.assertEqual(string_info.encoding, 'ascii') + + def test_vlen_bytes_fillvalue(self): + """ Vlen bytes dataset handles fillvalue """ + dt = h5py.string_dtype(encoding='ascii') + fill_value = b'bar' + ds = self.f.create_dataset('x', (100,), dtype=dt, fillvalue=fill_value) + self.assertEqual(self.f['x'][0], fill_value) + self.assertEqual(self.f['x'].asstr()[0], fill_value.decode()) + self.assertEqual(self.f['x'].fillvalue, fill_value) + + def test_vlen_unicode(self): + """ Vlen unicode dataset maps to vlen utf-8 in the file """ + dt = h5py.string_dtype() + ds = self.f.create_dataset('x', (100,), dtype=dt) + tid = ds.id.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8) + string_info = h5py.check_string_dtype(ds.dtype) + self.assertEqual(string_info.encoding, 'utf-8') + + def test_vlen_unicode_fillvalue(self): + """ Vlen unicode dataset handles fillvalue """ + dt = h5py.string_dtype() + fill_value = 'bár' + ds = self.f.create_dataset('x', (100,), dtype=dt, fillvalue=fill_value) + self.assertEqual(self.f['x'][0], fill_value.encode("utf-8")) + self.assertEqual(self.f['x'].asstr()[0], fill_value) + self.assertEqual(self.f['x'].fillvalue, fill_value.encode("utf-8")) + + def test_fixed_ascii(self): + """ Fixed-length bytes dataset maps to fixed-length ascii in the file + """ + dt = np.dtype("|S10") + ds = self.f.create_dataset('x', (100,), dtype=dt) + tid = ds.id.get_type() + self.assertEqual(type(tid), h5py.h5t.TypeStringID) + self.assertFalse(tid.is_variable_str()) + self.assertEqual(tid.get_size(), 10) + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII) + string_info = h5py.check_string_dtype(ds.dtype) + self.assertEqual(string_info.encoding, 'ascii') + self.assertEqual(string_info.length, 10) + + def test_fixed_bytes_fillvalue(self): + """ Vlen bytes dataset handles fillvalue """ + dt = h5py.string_dtype(encoding='ascii', length=10) + fill_value = b'bar' + ds = self.f.create_dataset('x', (100,), dtype=dt, fillvalue=fill_value) + self.assertEqual(self.f['x'][0], fill_value) + self.assertEqual(self.f['x'].asstr()[0], fill_value.decode()) + self.assertEqual(self.f['x'].fillvalue, fill_value) + + def test_fixed_utf8(self): + dt = h5py.string_dtype(encoding='utf-8', length=5) + ds = self.f.create_dataset('x', (100,), dtype=dt) + tid = ds.id.get_type() + self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8) + s = 'cù' + ds[0] = s.encode('utf-8') + ds[1] = s + ds[2:4] = [s, s] + ds[4:6] = np.array([s, s], dtype=object) + ds[6:8] = np.array([s.encode('utf-8')] * 2, dtype=dt) + with self.assertRaises(TypeError): + ds[8:10] = np.array([s, s], dtype='U') + + np.testing.assert_array_equal(ds[:8], np.array([s.encode('utf-8')] * 8, dtype='S')) + + def test_fixed_utf_8_fillvalue(self): + """ Vlen unicode dataset handles fillvalue """ + dt = h5py.string_dtype(encoding='utf-8', length=10) + fill_value = 'bár'.encode("utf-8") + ds = self.f.create_dataset('x', (100,), dtype=dt, fillvalue=fill_value) + self.assertEqual(self.f['x'][0], fill_value) + self.assertEqual(self.f['x'].asstr()[0], fill_value.decode("utf-8")) + self.assertEqual(self.f['x'].fillvalue, fill_value) + + def test_fixed_unicode(self): + """ Fixed-length unicode datasets are unsupported (raise TypeError) """ + dt = np.dtype("|U10") + with self.assertRaises(TypeError): + ds = self.f.create_dataset('x', (100,), dtype=dt) + + def test_roundtrip_vlen_bytes(self): + """ writing and reading to vlen bytes dataset preserves type and content + """ + dt = h5py.string_dtype(encoding='ascii') + ds = self.f.create_dataset('x', (100,), dtype=dt) + data = b"Hello\xef" + ds[0] = data + out = ds[0] + self.assertEqual(type(out), bytes) + self.assertEqual(out, data) + + def test_roundtrip_fixed_bytes(self): + """ Writing to and reading from fixed-length bytes dataset preserves + type and content """ + dt = np.dtype("|S10") + ds = self.f.create_dataset('x', (100,), dtype=dt) + data = b"Hello\xef" + ds[0] = data + out = ds[0] + self.assertEqual(type(out), np.bytes_) + self.assertEqual(out, data) + + def test_retrieve_vlen_unicode(self): + dt = h5py.string_dtype() + ds = self.f.create_dataset('x', (10,), dtype=dt) + data = "fàilte" + ds[0] = data + self.assertIsInstance(ds[0], bytes) + out = ds.asstr()[0] + self.assertIsInstance(out, str) + self.assertEqual(out, data) + + def test_asstr(self): + ds = self.f.create_dataset('x', (10,), dtype=h5py.string_dtype()) + data = "fàilte" + ds[0] = data + + strwrap1 = ds.asstr('ascii') + with self.assertRaises(UnicodeDecodeError): + out = strwrap1[0] + + # Different errors parameter + self.assertEqual(ds.asstr('ascii', 'ignore')[0], 'filte') + + # latin-1 will decode it but give the wrong text + self.assertNotEqual(ds.asstr('latin-1')[0], data) + + # len of ds + self.assertEqual(10, len(ds.asstr())) + + # Array output + np.testing.assert_array_equal( + ds.asstr()[:1], np.array([data], dtype=object) + ) + + np.testing.assert_array_equal( + np.asarray(ds.asstr())[:1], np.array([data], dtype=object) + ) + + def test_asstr_fixed(self): + dt = h5py.string_dtype(length=5) + ds = self.f.create_dataset('x', (10,), dtype=dt) + data = 'cù' + ds[0] = np.array(data.encode('utf-8'), dtype=dt) + + self.assertIsInstance(ds[0], np.bytes_) + out = ds.asstr()[0] + self.assertIsInstance(out, str) + self.assertEqual(out, data) + + # Different errors parameter + self.assertEqual(ds.asstr('ascii', 'ignore')[0], 'c') + + # latin-1 will decode it but give the wrong text + self.assertNotEqual(ds.asstr('latin-1')[0], data) + + # Array output + np.testing.assert_array_equal( + ds.asstr()[:1], np.array([data], dtype=object) + ) + + def test_unicode_write_error(self): + """Encoding error when writing a non-ASCII string to an ASCII vlen dataset""" + dt = h5py.string_dtype('ascii') + ds = self.f.create_dataset('x', (100,), dtype=dt) + data = "fàilte" + with self.assertRaises(UnicodeEncodeError): + ds[0] = data + + def test_unicode_write_bytes(self): + """ Writing valid utf-8 byte strings to a unicode vlen dataset is OK + """ + dt = h5py.string_dtype() + ds = self.f.create_dataset('x', (100,), dtype=dt) + data = (u"Hello there" + chr(0x2034)).encode('utf8') + ds[0] = data + out = ds[0] + self.assertEqual(type(out), bytes) + self.assertEqual(out, data) + + def test_vlen_bytes_write_ascii_str(self): + """ Writing an ascii str to ascii vlen dataset is OK + """ + dt = h5py.string_dtype('ascii') + ds = self.f.create_dataset('x', (100,), dtype=dt) + data = "ASCII string" + ds[0] = data + out = ds[0] + self.assertEqual(type(out), bytes) + self.assertEqual(out, data.encode('ascii')) + + +class TestCompound(BaseDataset): + + """ + Feature: Compound types correctly round-trip + """ + + def test_rt(self): + """ Compound types are read back in correct order (issue 236)""" + + dt = np.dtype([ ('weight', np.float64), + ('cputime', np.float64), + ('walltime', np.float64), + ('parents_offset', np.uint32), + ('n_parents', np.uint32), + ('status', np.uint8), + ('endpoint_type', np.uint8), ]) + + testdata = np.ndarray((16,), dtype=dt) + for key in dt.fields: + testdata[key] = np.random.random((16,)) * 100 + + self.f['test'] = testdata + outdata = self.f['test'][...] + self.assertTrue(np.all(outdata == testdata)) + self.assertEqual(outdata.dtype, testdata.dtype) + + def test_assign(self): + dt = np.dtype([ ('weight', (np.float64, 3)), + ('endpoint_type', np.uint8), ]) + + testdata = np.ndarray((16,), dtype=dt) + for key in dt.fields: + testdata[key] = np.random.random(size=testdata[key].shape) * 100 + + ds = self.f.create_dataset('test', (16,), dtype=dt) + for key in dt.fields: + ds[key] = testdata[key] + + outdata = self.f['test'][...] + + self.assertTrue(np.all(outdata == testdata)) + self.assertEqual(outdata.dtype, testdata.dtype) + + def test_fields(self): + dt = np.dtype([ + ('x', np.float64), + ('y', np.float64), + ('z', np.float64), + ]) + + testdata = np.ndarray((16,), dtype=dt) + for key in dt.fields: + testdata[key] = np.random.random((16,)) * 100 + + self.f['test'] = testdata + + # Extract multiple fields + np.testing.assert_array_equal( + self.f['test'].fields(['x', 'y'])[:], testdata[['x', 'y']] + ) + # Extract single field + np.testing.assert_array_equal( + self.f['test'].fields('x')[:], testdata['x'] + ) + # Check __array__() method of fields wrapper + np.testing.assert_array_equal( + np.asarray(self.f['test'].fields(['x', 'y'])), testdata[['x', 'y']] + ) + # Check type conversion of __array__() method + dt_int = np.dtype([('x', np.int32)]) + np.testing.assert_array_equal( + np.asarray(self.f['test'].fields(['x']), dtype=dt_int), + testdata[['x']].astype(dt_int) + ) + + # Check len() on fields wrapper + assert len(self.f['test'].fields('x')) == 16 + + def test_nested_compound_vlen(self): + dt_inner = np.dtype([('a', h5py.vlen_dtype(np.int32)), + ('b', h5py.vlen_dtype(np.int32))]) + + dt = np.dtype([('f1', h5py.vlen_dtype(dt_inner)), + ('f2', np.int64)]) + + inner1 = (np.array(range(1, 3), dtype=np.int32), + np.array(range(6, 9), dtype=np.int32)) + + inner2 = (np.array(range(10, 14), dtype=np.int32), + np.array(range(16, 21), dtype=np.int32)) + + data = np.array([(np.array([inner1, inner2], dtype=dt_inner), 2), + (np.array([inner1], dtype=dt_inner), 3)], + dtype=dt) + + self.f["ds"] = data + out = self.f["ds"] + + # Specifying check_alignment=False because vlen fields have 8 bytes of padding + # because the vlen datatype in hdf5 occupies 16 bytes + self.assertArrayEqual(out, data, check_alignment=False) + + +class TestSubarray(BaseDataset): + def test_write_list(self): + ds = self.f.create_dataset("a", (1,), dtype="3int8") + ds[0] = [1, 2, 3] + np.testing.assert_array_equal(ds[:], [[1, 2, 3]]) + + ds[:] = [[4, 5, 6]] + np.testing.assert_array_equal(ds[:], [[4, 5, 6]]) + + def test_write_array(self): + ds = self.f.create_dataset("a", (1,), dtype="3int8") + ds[0] = np.array([1, 2, 3]) + np.testing.assert_array_equal(ds[:], [[1, 2, 3]]) + + ds[:] = np.array([[4, 5, 6]]) + np.testing.assert_array_equal(ds[:], [[4, 5, 6]]) + + +class TestEnum(BaseDataset): + + """ + Feature: Enum datatype info is preserved, read/write as integer + """ + + EDICT = {'RED': 0, 'GREEN': 1, 'BLUE': 42} + + def test_create(self): + """ Enum datasets can be created and type correctly round-trips """ + dt = h5py.enum_dtype(self.EDICT, basetype='i') + ds = self.f.create_dataset('x', (100, 100), dtype=dt) + dt2 = ds.dtype + dict2 = h5py.check_enum_dtype(dt2) + self.assertEqual(dict2, self.EDICT) + + def test_readwrite(self): + """ Enum datasets can be read/written as integers """ + dt = h5py.enum_dtype(self.EDICT, basetype='i4') + ds = self.f.create_dataset('x', (100, 100), dtype=dt) + ds[35, 37] = 42 + ds[1, :] = 1 + self.assertEqual(ds[35, 37], 42) + self.assertArrayEqual(ds[1, :], np.array((1,) * 100, dtype='i4')) + + +class TestFloats(BaseDataset): + + """ + Test support for mini and extended-precision floats + """ + + def _exectest(self, dt): + dset = self.f.create_dataset('x', (100,), dtype=dt) + self.assertEqual(dset.dtype, dt) + data = np.ones((100,), dtype=dt) + dset[...] = data + self.assertArrayEqual(dset[...], data) + + @ut.skipUnless(hasattr(np, 'float16'), "NumPy float16 support required") + def test_mini(self): + """ Mini-floats round trip """ + self._exectest(np.dtype('float16')) + + # TODO: move these tests to test_h5t + def test_mini_mapping(self): + """ Test mapping for float16 """ + if hasattr(np, 'float16'): + self.assertEqual(h5t.IEEE_F16LE.dtype, np.dtype('= (1, 10, 5), + "chunk info requires HDF5 >= 1.10.5") +def test_get_chunk_details(): + from io import BytesIO + buf = BytesIO() + with h5py.File(buf, 'w') as fout: + fout.create_dataset('test', shape=(100, 100), chunks=(10, 10), dtype='i4') + fout['test'][:] = 1 + + buf.seek(0) + with h5py.File(buf, 'r') as fin: + ds = fin['test'].id + + assert ds.get_num_chunks() == 100 + for j in range(100): + offset = tuple(np.array(np.unravel_index(j, (10, 10))) * 10) + + si = ds.get_chunk_info(j) + assert si.chunk_offset == offset + assert si.filter_mask == 0 + assert si.byte_offset is not None + assert si.size > 0 + + si = ds.get_chunk_info_by_coord((0, 0)) + assert si.chunk_offset == (0, 0) + assert si.filter_mask == 0 + assert si.byte_offset is not None + assert si.size > 0 + + +@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 12, 3) or + (h5py.version.hdf5_version_tuple >= (1, 10, 10) and h5py.version.hdf5_version_tuple < (1, 10, 99)), + "chunk iteration requires HDF5 1.10.10 and later 1.10, or 1.12.3 and later") +def test_chunk_iter(): + """H5Dchunk_iter() for chunk information""" + from io import BytesIO + buf = BytesIO() + with h5py.File(buf, 'w') as f: + f.create_dataset('test', shape=(100, 100), chunks=(10, 10), dtype='i4') + f['test'][:] = 1 + + buf.seek(0) + with h5py.File(buf, 'r') as f: + dsid = f['test'].id + + num_chunks = dsid.get_num_chunks() + assert num_chunks == 100 + ci = {} + for j in range(num_chunks): + si = dsid.get_chunk_info(j) + ci[si.chunk_offset] = si + + def callback(chunk_info): + known = ci[chunk_info.chunk_offset] + assert chunk_info.chunk_offset == known.chunk_offset + assert chunk_info.filter_mask == known.filter_mask + assert chunk_info.byte_offset == known.byte_offset + assert chunk_info.size == known.size + + dsid.chunk_iter(callback) + + +def test_empty_shape(writable_file): + ds = writable_file.create_dataset('empty', dtype='int32') + assert ds.shape is None + assert ds.maxshape is None + + +def test_zero_storage_size(): + # https://github.com/h5py/h5py/issues/1475 + from io import BytesIO + buf = BytesIO() + with h5py.File(buf, 'w') as fout: + fout.create_dataset('empty', dtype='uint8') + + buf.seek(0) + with h5py.File(buf, 'r') as fin: + assert fin['empty'].chunks is None + assert fin['empty'].id.get_offset() is None + assert fin['empty'].id.get_storage_size() == 0 + + +def test_python_int_uint64(writable_file): + # https://github.com/h5py/h5py/issues/1547 + data = [np.iinfo(np.int64).max, np.iinfo(np.int64).max + 1] + + # Check creating a new dataset + ds = writable_file.create_dataset('x', data=data, dtype=np.uint64) + assert ds.dtype == np.dtype(np.uint64) + np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64)) + + # Check writing to an existing dataset + ds[:] = data + np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64)) + + +def test_setitem_fancy_indexing(writable_file): + # https://github.com/h5py/h5py/issues/1593 + arr = writable_file.create_dataset('data', (5, 1000, 2), dtype=np.uint8) + block = np.random.randint(255, size=(5, 3, 2)) + arr[:, [0, 2, 4], ...] = block + + +def test_vlen_spacepad(): + with File(get_data_file_path("vlen_string_dset.h5")) as f: + assert f["DS1"][0] == b"Parting" + + +def test_vlen_nullterm(): + with File(get_data_file_path("vlen_string_dset_utc.h5")) as f: + assert f["ds1"][0] == b"2009-12-20T10:16:18.662409Z" + + +def test_allow_unknown_filter(writable_file): + # apparently 256-511 are reserved for testing purposes + fake_filter_id = 256 + ds = writable_file.create_dataset( + 'data', shape=(10, 10), dtype=np.uint8, compression=fake_filter_id, + allow_unknown_filter=True + ) + assert str(fake_filter_id) in ds._filters + + +def test_dset_chunk_cache(): + """Chunk cache configuration for individual datasets.""" + from io import BytesIO + buf = BytesIO() + with h5py.File(buf, 'w') as fout: + ds = fout.create_dataset( + 'x', shape=(10, 20), chunks=(5, 4), dtype='i4', + rdcc_nbytes=2 * 1024 * 1024, rdcc_w0=0.2, rdcc_nslots=997) + ds_chunk_cache = ds.id.get_access_plist().get_chunk_cache() + assert fout.id.get_access_plist().get_cache()[1:] != ds_chunk_cache + assert ds_chunk_cache == (997, 2 * 1024 * 1024, 0.2) + + buf.seek(0) + with h5py.File(buf, 'r') as fin: + ds = fin.require_dataset( + 'x', shape=(10, 20), dtype='i4', + rdcc_nbytes=3 * 1024 * 1024, rdcc_w0=0.67, rdcc_nslots=709) + ds_chunk_cache = ds.id.get_access_plist().get_chunk_cache() + assert fin.id.get_access_plist().get_cache()[1:] != ds_chunk_cache + assert ds_chunk_cache == (709, 3 * 1024 * 1024, 0.67) + + +class TestCommutative(BaseDataset): + """ + Test the symmetry of operators, at least with the numpy types. + Issue: https://github.com/h5py/h5py/issues/1947 + """ + def test_numpy_commutative(self,): + """ + Create a h5py dataset, extract one element convert to numpy + Check that it returns symmetric response to == and != + """ + shape = (100,1) + dset = self.f.create_dataset("test", shape, dtype=float, + data=np.random.rand(*shape)) + + # grab a value from the elements, ie dset[0, 0] + # check that mask arrays are commutative wrt ==, != + val = np.float64(dset[0, 0]) + + assert np.all((val == dset) == (dset == val)) + assert np.all((val != dset) == (dset != val)) + + # generate sample not in the dset, ie max(dset)+delta + # check that mask arrays are commutative wrt ==, != + delta = 0.001 + nval = np.nanmax(dset)+delta + + assert np.all((nval == dset) == (dset == nval)) + assert np.all((nval != dset) == (dset != nval)) + + def test_basetype_commutative(self,): + """ + Create a h5py dataset and check basetype compatibility. + Check that operation is symmetric, even if it is potentially + not meaningful. + """ + shape = (100,1) + dset = self.f.create_dataset("test", shape, dtype=float, + data=np.random.rand(*shape)) + + # generate float type, sample float(0.) + # check that operation is symmetric (but potentially meaningless) + val = float(0.) + assert (val == dset) == (dset == val) + assert (val != dset) == (dset != val) + +class TestVirtualPrefix(BaseDataset): + """ + Test setting virtual prefix + """ + def test_virtual_prefix_create(self): + shape = (100,1) + virtual_prefix = "/path/to/virtual" + dset = self.f.create_dataset("test", shape, dtype=float, + data=np.random.rand(*shape), + virtual_prefix = virtual_prefix) + + virtual_prefix_readback = pathlib.Path(dset.id.get_access_plist().get_virtual_prefix().decode()).as_posix() + assert virtual_prefix_readback == virtual_prefix + + def test_virtual_prefix_require(self): + virtual_prefix = "/path/to/virtual" + dset = self.f.require_dataset('foo', (10, 3), 'f', virtual_prefix = virtual_prefix) + virtual_prefix_readback = pathlib.Path(dset.id.get_access_plist().get_virtual_prefix().decode()).as_posix() + self.assertEqual(virtual_prefix, virtual_prefix_readback) + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.shape, (10, 3)) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dataset_getitem.py b/MLPY/Lib/site-packages/h5py/tests/test_dataset_getitem.py new file mode 100644 index 0000000000000000000000000000000000000000..1b478e6e15f8896948a132c144a6e7db64fcdc23 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dataset_getitem.py @@ -0,0 +1,618 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Tests the h5py.Dataset.__getitem__ method. + + This module does not specifically test type conversion. The "type" axis + therefore only tests objects which interact with the slicing system in + unreliable ways; for example, compound and array types. + + See test_dataset_getitem_types for type-conversion tests. + + Tests are organized into TestCases by dataset shape and type. Test + methods vary by slicing arg type. + + 1. Dataset shape: + Empty + Scalar + 1D + 3D + + 2. Type: + Float + Compound + Array + + 3. Slicing arg types: + Ellipsis + Empty tuple + Regular slice + MultiBlockSlice + Indexing + Index list + Boolean mask + Field names +""" + +import sys + +import numpy as np +import h5py + +from .common import ut, TestCase + + +class TestEmpty(TestCase): + + def setUp(self): + TestCase.setUp(self) + sid = h5py.h5s.create(h5py.h5s.NULL) + tid = h5py.h5t.C_S1.copy() + tid.set_size(10) + dsid = h5py.h5d.create(self.f.id, b'x', tid, sid) + self.dset = h5py.Dataset(dsid) + self.empty_obj = h5py.Empty(np.dtype("S10")) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 0) + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, None) + + def test_size(self): + """ Verify shape """ + self.assertEqual(self.dset.size, None) + + def test_nbytes(self): + """ Verify nbytes """ + self.assertEqual(self.dset.nbytes, 0) + + def test_ellipsis(self): + self.assertEqual(self.dset[...], self.empty_obj) + + def test_tuple(self): + self.assertEqual(self.dset[()], self.empty_obj) + + def test_slice(self): + """ slice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0:4] + + def test_multi_block_slice(self): + """ MultiBlockSlice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[h5py.MultiBlockSlice()] + + def test_index(self): + """ index -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0] + + def test_indexlist(self): + """ index list -> ValueError """ + with self.assertRaises(ValueError): + self.dset[[1,2,5]] + + def test_mask(self): + """ mask -> ValueError """ + mask = np.array(True, dtype='bool') + with self.assertRaises(ValueError): + self.dset[mask] + + def test_fieldnames(self): + """ field name -> ValueError """ + with self.assertRaises(ValueError): + self.dset['field'] + + +class TestScalarFloat(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.array(42.5, dtype=np.double) + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 0) + + def test_size(self): + """ Verify size """ + self.assertEqual(self.dset.size, 1) + + def test_nbytes(self): + """ Verify nbytes """ + self.assertEqual(self.dset.nbytes, self.data.dtype.itemsize) # not sure if 'f' is always alias for 'f4' + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, tuple()) + + def test_ellipsis(self): + """ Ellipsis -> scalar ndarray """ + out = self.dset[...] + self.assertArrayEqual(out, self.data) + + def test_tuple(self): + """ () -> bare item """ + out = self.dset[()] + self.assertArrayEqual(out, self.data.item()) + + def test_slice(self): + """ slice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0:4] + + def test_multi_block_slice(self): + """ MultiBlockSlice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[h5py.MultiBlockSlice()] + + def test_index(self): + """ index -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0] + + # FIXME: NumPy has IndexError instead + def test_indexlist(self): + """ index list -> ValueError """ + with self.assertRaises(ValueError): + self.dset[[1,2,5]] + + # FIXME: NumPy permits this + def test_mask(self): + """ mask -> ValueError """ + mask = np.array(True, dtype='bool') + with self.assertRaises(ValueError): + self.dset[mask] + + def test_fieldnames(self): + """ field name -> ValueError (no fields) """ + with self.assertRaises(ValueError): + self.dset['field'] + + +class TestScalarCompound(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.array((42.5, -118, "Hello"), dtype=[('a', 'f'), ('b', 'i'), ('c', '|S10')]) + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 0) + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, tuple()) + + def test_size(self): + """ Verify size """ + self.assertEqual(self.dset.size, 1) + + def test_nbytes(self): + """ Verify nbytes """ + self.assertEqual(self.dset.nbytes, self.data.dtype.itemsize) + + def test_ellipsis(self): + """ Ellipsis -> scalar ndarray """ + out = self.dset[...] + # assertArrayEqual doesn't work with compounds; do manually + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, self.data.shape) + self.assertEqual(out.dtype, self.data.dtype) + + def test_tuple(self): + """ () -> np.void instance """ + out = self.dset[()] + self.assertIsInstance(out, np.void) + self.assertEqual(out.dtype, self.data.dtype) + + def test_slice(self): + """ slice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0:4] + + def test_multi_block_slice(self): + """ MultiBlockSlice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[h5py.MultiBlockSlice()] + + def test_index(self): + """ index -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0] + + # FIXME: NumPy has IndexError instead + def test_indexlist(self): + """ index list -> ValueError """ + with self.assertRaises(ValueError): + self.dset[[1,2,5]] + + # FIXME: NumPy permits this + def test_mask(self): + """ mask -> ValueError """ + mask = np.array(True, dtype='bool') + with self.assertRaises(ValueError): + self.dset[mask] + + # FIXME: NumPy returns a scalar ndarray + def test_fieldnames(self): + """ field name -> bare value """ + out = self.dset['a'] + self.assertIsInstance(out, np.float32) + self.assertEqual(out, self.dset['a']) + + +class TestScalarArray(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.dt = np.dtype('(3,2)f') + self.data = np.array([(3.2, -119), (42, 99.8), (3.14, 0)], dtype='f') + self.dset = self.f.create_dataset('x', (), dtype=self.dt) + self.dset[...] = self.data + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.data.ndim, 2) + self.assertEqual(self.dset.ndim, 0) + + def test_size(self): + """ Verify size """ + self.assertEqual(self.dset.size, 1) + + def test_nbytes(self): + """ Verify nbytes """ + self.assertEqual(self.dset.nbytes, self.dset.dtype.itemsize) # not sure if 'f' is always alias for 'f4' + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.data.shape, (3, 2)) + self.assertEqual(self.dset.shape, tuple()) + + def test_ellipsis(self): + """ Ellipsis -> ndarray promoted to underlying shape """ + out = self.dset[...] + self.assertArrayEqual(out, self.data) + + def test_tuple(self): + """ () -> same as ellipsis """ + out = self.dset[...] + self.assertArrayEqual(out, self.data) + + def test_slice(self): + """ slice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0:4] + + def test_multi_block_slice(self): + """ MultiBlockSlice -> ValueError """ + with self.assertRaises(ValueError): + self.dset[h5py.MultiBlockSlice()] + + def test_index(self): + """ index -> ValueError """ + with self.assertRaises(ValueError): + self.dset[0] + + def test_indexlist(self): + """ index list -> ValueError """ + with self.assertRaises(ValueError): + self.dset[[]] + + def test_mask(self): + """ mask -> ValueError """ + mask = np.array(True, dtype='bool') + with self.assertRaises(ValueError): + self.dset[mask] + + def test_fieldnames(self): + """ field name -> ValueError (no fields) """ + with self.assertRaises(ValueError): + self.dset['field'] + + +class Test1DZeroFloat(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.ones((0,), dtype='f') + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 1) + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, (0,)) + + def test_ellipsis(self): + """ Ellipsis -> ndarray of matching shape """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[...]) + + def test_tuple(self): + """ () -> same as ellipsis """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[()]) + + def test_slice(self): + """ slice -> ndarray of shape (0,) """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[0:4]) + + def test_slice_stop_less_than_start(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[7:5]) + + def test_index(self): + """ index -> out of range """ + with self.assertRaises(IndexError): + self.dset[0] + + def test_indexlist(self): + """ index list """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[[]]) + + def test_mask(self): + """ mask -> ndarray of matching shape """ + mask = np.ones((0,), dtype='bool') + self.assertNumpyBehavior( + self.dset, + self.data, + np.s_[mask], + # Fast reader doesn't work with boolean masks + skip_fast_reader=True, + ) + + def test_fieldnames(self): + """ field name -> ValueError (no fields) """ + with self.assertRaises(ValueError): + self.dset['field'] + + +class Test1DFloat(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.arange(13).astype('f') + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 1) + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, (13,)) + + def test_ellipsis(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[...]) + + def test_tuple(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[()]) + + def test_slice_simple(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[0:4]) + + def test_slice_zerosize(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[4:4]) + + def test_slice_strides(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[1:7:3]) + + def test_slice_negindexes(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[-8:-2:3]) + + def test_slice_stop_less_than_start(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[7:5]) + + def test_slice_outofrange(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[100:400:3]) + + def test_slice_backwards(self): + """ we disallow negative steps """ + with self.assertRaises(ValueError): + self.dset[::-1] + + def test_slice_zerostride(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[::0]) + + def test_index_simple(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[3]) + + def test_index_neg(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[-4]) + + # FIXME: NumPy permits this... it adds a new axis in front + def test_index_none(self): + with self.assertRaises(TypeError): + self.dset[None] + + def test_index_illegal(self): + """ Illegal slicing argument """ + with self.assertRaises(TypeError): + self.dset[{}] + + def test_index_outofrange(self): + with self.assertRaises(IndexError): + self.dset[100] + + def test_indexlist_simple(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[[1,2,5]]) + + def test_indexlist_numpyarray(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[np.array([1, 2, 5])]) + + def test_indexlist_single_index_ellipsis(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[[0], ...]) + + def test_indexlist_numpyarray_single_index_ellipsis(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[np.array([0]), ...]) + + def test_indexlist_numpyarray_ellipsis(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[np.array([1, 2, 5]), ...]) + + def test_indexlist_empty(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[[]]) + + def test_indexlist_outofrange(self): + with self.assertRaises(IndexError): + self.dset[[100]] + + def test_indexlist_nonmonotonic(self): + """ we require index list values to be strictly increasing """ + with self.assertRaises(TypeError): + self.dset[[1,3,2]] + + def test_indexlist_monotonic_negative(self): + # This should work: indices are logically increasing + self.assertNumpyBehavior(self.dset, self.data, np.s_[[0, 2, -2]]) + + with self.assertRaises(TypeError): + self.dset[[-2, -3]] + + def test_indexlist_repeated(self): + """ we forbid repeated index values """ + with self.assertRaises(TypeError): + self.dset[[1,1,2]] + + def test_mask_true(self): + self.assertNumpyBehavior( + self.dset, + self.data, + np.s_[self.data > -100], + # Fast reader doesn't work with boolean masks + skip_fast_reader=True, + ) + + def test_mask_false(self): + self.assertNumpyBehavior( + self.dset, + self.data, + np.s_[self.data > 100], + # Fast reader doesn't work with boolean masks + skip_fast_reader=True, + ) + + def test_mask_partial(self): + self.assertNumpyBehavior( + self.dset, + self.data, + np.s_[self.data > 5], + # Fast reader doesn't work with boolean masks + skip_fast_reader=True, + ) + + def test_mask_wrongsize(self): + """ we require the boolean mask shape to match exactly """ + with self.assertRaises(TypeError): + self.dset[np.ones((2,), dtype='bool')] + + def test_fieldnames(self): + """ field name -> ValueError (no fields) """ + with self.assertRaises(ValueError): + self.dset['field'] + + +class Test2DZeroFloat(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.ones((0,3), dtype='f') + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 2) + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, (0, 3)) + + def test_indexlist(self): + """ see issue #473 """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[:,[0,1,2]]) + + +class Test2DFloat(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.data = np.ones((5,3), dtype='f') + self.dset = self.f.create_dataset('x', data=self.data) + + def test_ndim(self): + """ Verify number of dimensions """ + self.assertEqual(self.dset.ndim, 2) + + def test_size(self): + """ Verify size """ + self.assertEqual(self.dset.size, 15) + + def test_nbytes(self): + """ Verify nbytes """ + self.assertEqual(self.dset.nbytes, 15*self.data.dtype.itemsize) # not sure if 'f' is always alias for 'f4' + + def test_shape(self): + """ Verify shape """ + self.assertEqual(self.dset.shape, (5, 3)) + + def test_indexlist(self): + """ see issue #473 """ + self.assertNumpyBehavior(self.dset, self.data, np.s_[:,[0,1,2]]) + + def test_index_emptylist(self): + self.assertNumpyBehavior(self.dset, self.data, np.s_[:, []]) + self.assertNumpyBehavior(self.dset, self.data, np.s_[[]]) + + +class TestVeryLargeArray(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.dset = self.f.create_dataset('x', shape=(2**15, 2**16)) + + @ut.skipIf(sys.maxsize < 2**31, 'Maximum integer size >= 2**31 required') + def test_size(self): + self.assertEqual(self.dset.size, 2**31) + + +def test_read_no_fill_value(writable_file): + # With FILL_TIME_NEVER, HDF5 doesn't write zeros in the output array for + # unallocated chunks. If we read into uninitialized memory, it can appear + # to read random values. https://github.com/h5py/h5py/issues/2069 + dcpl = h5py.h5p.create(h5py.h5p.DATASET_CREATE) + dcpl.set_chunk((1,)) + dcpl.set_fill_time(h5py.h5d.FILL_TIME_NEVER) + ds = h5py.Dataset(h5py.h5d.create( + writable_file.id, b'a', h5py.h5t.IEEE_F64LE, h5py.h5s.create_simple((5,)), dcpl + )) + np.testing.assert_array_equal(ds[:3], np.zeros(3, np.float64)) + + +class TestBoolIndex(TestCase): + """ + Tests for indexing with Boolean arrays + """ + def setUp(self): + super().setUp() + self.arr = np.arange(9).reshape(3,-1) + self.dset = self.f.create_dataset('x', data=self.arr) + + def test_select_first_axis(self): + sel = np.s_[[False, True, False],:] + self.assertNumpyBehavior(self.dset, self.arr, sel) + + def test_wrong_size(self): + sel = np.s_[[False, True, False, False],:] + with self.assertRaises(TypeError): + self.dset[sel] diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dataset_swmr.py b/MLPY/Lib/site-packages/h5py/tests/test_dataset_swmr.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf5fbcd59a2fcf917563fb40903755e78a6c1e7 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dataset_swmr.py @@ -0,0 +1,118 @@ +import numpy as np +import h5py + +from .common import ut, TestCase + + +class TestDatasetSwmrRead(TestCase): + """ Testing SWMR functions when reading a dataset. + Skip this test if the HDF5 library does not have the SWMR features. + """ + + def setUp(self): + TestCase.setUp(self) + self.data = np.arange(13).astype('f') + self.dset = self.f.create_dataset('data', chunks=(13,), maxshape=(None,), data=self.data) + fname = self.f.filename + self.f.close() + + self.f = h5py.File(fname, 'r', swmr=True) + self.dset = self.f['data'] + + def test_initial_swmr_mode_on(self): + """ Verify that the file is initially in SWMR mode""" + self.assertTrue(self.f.swmr_mode) + + def test_read_data(self): + self.assertArrayEqual(self.dset, self.data) + + def test_refresh(self): + self.dset.refresh() + + def test_force_swmr_mode_on_raises(self): + """ Verify when reading a file cannot be forcibly switched to swmr mode. + When reading with SWMR the file must be opened with swmr=True.""" + with self.assertRaises(Exception): + self.f.swmr_mode = True + self.assertTrue(self.f.swmr_mode) + + def test_force_swmr_mode_off_raises(self): + """ Switching SWMR write mode off is only possible by closing the file. + Attempts to forcibly switch off the SWMR mode should raise a ValueError. + """ + with self.assertRaises(ValueError): + self.f.swmr_mode = False + self.assertTrue(self.f.swmr_mode) + +class TestDatasetSwmrWrite(TestCase): + """ Testing SWMR functions when reading a dataset. + Skip this test if the HDF5 library does not have the SWMR features. + """ + + def setUp(self): + """ First setup a file with a small chunked and empty dataset. + No data written yet. + """ + + # Note that when creating the file, the swmr=True is not required for + # write, but libver='latest' is required. + self.f = h5py.File(self.mktemp(), 'w', libver='latest') + + self.data = np.arange(4).astype('f') + self.dset = self.f.create_dataset('data', shape=(0,), dtype=self.data.dtype, chunks=(2,), maxshape=(None,)) + + + def test_initial_swmr_mode_off(self): + """ Verify that the file is not initially in SWMR mode""" + self.assertFalse(self.f.swmr_mode) + + def test_switch_swmr_mode_on(self): + """ Switch to SWMR mode and verify """ + self.f.swmr_mode = True + self.assertTrue(self.f.swmr_mode) + + def test_switch_swmr_mode_off_raises(self): + """ Switching SWMR write mode off is only possible by closing the file. + Attempts to forcibly switch off the SWMR mode should raise a ValueError. + """ + self.f.swmr_mode = True + self.assertTrue(self.f.swmr_mode) + with self.assertRaises(ValueError): + self.f.swmr_mode = False + self.assertTrue(self.f.swmr_mode) + + def test_extend_dset(self): + """ Extend and flush a SWMR dataset + """ + self.f.swmr_mode = True + self.assertTrue(self.f.swmr_mode) + + self.dset.resize( self.data.shape ) + self.dset[:] = self.data + self.dset.flush() + + # Refresh and read back data for assertion + self.dset.refresh() + self.assertArrayEqual(self.dset, self.data) + + def test_extend_dset_multiple(self): + + self.f.swmr_mode = True + self.assertTrue(self.f.swmr_mode) + + self.dset.resize( (4,) ) + self.dset[0:] = self.data + self.dset.flush() + + # Refresh and read back 1st data block for assertion + self.dset.refresh() + self.assertArrayEqual(self.dset, self.data) + + self.dset.resize( (8,) ) + self.dset[4:] = self.data + self.dset.flush() + + # Refresh and read back 1st data block for assertion + self.dset.refresh() + self.assertArrayEqual(self.dset[0:4], self.data) + self.assertArrayEqual(self.dset[4:8], self.data) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_datatype.py b/MLPY/Lib/site-packages/h5py/tests/test_datatype.py new file mode 100644 index 0000000000000000000000000000000000000000..37072cb47e9f16c1f021419a2a91ab259a37d390 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_datatype.py @@ -0,0 +1,40 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + File-resident datatype tests. + + Tests "committed" file-resident datatype objects. +""" + +import numpy as np + +from .common import ut, TestCase + +from h5py import Datatype + +class TestCreation(TestCase): + + """ + Feature: repr() works sensibly on datatype objects + """ + + def test_repr(self): + """ repr() on datatype objects """ + self.f['foo'] = np.dtype('S10') + dt = self.f['foo'] + self.assertIsInstance(repr(dt), str) + self.f.close() + self.assertIsInstance(repr(dt), str) + + + def test_appropriate_low_level_id(self): + " Binding a group to a non-TypeID identifier fails with ValueError " + with self.assertRaises(ValueError): + Datatype(self.f['/'].id) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dimension_scales.py b/MLPY/Lib/site-packages/h5py/tests/test_dimension_scales.py new file mode 100644 index 0000000000000000000000000000000000000000..e30f729f565f06e95cf8ce2169059ffb29340005 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dimension_scales.py @@ -0,0 +1,217 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import sys + +import numpy as np + +from .common import ut, TestCase +from h5py import File, Group, Dataset +import h5py + + +class BaseDataset(TestCase): + + """ + data is a 3-dimensional dataset with dimensions [z, y, x] + + The z dimension is labeled. It does not have any attached scales. + The y dimension is not labeled. It has one attached scale. + The x dimension is labeled. It has two attached scales. + + data2 is a 3-dimensional dataset with no associated dimension scales. + """ + + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.f['data'] = np.ones((4, 3, 2), 'f') + self.f['data2'] = np.ones((4, 3, 2), 'f') + self.f['x1'] = np.ones((2), 'f') + h5py.h5ds.set_scale(self.f['x1'].id) + h5py.h5ds.attach_scale(self.f['data'].id, self.f['x1'].id, 2) + self.f['x2'] = np.ones((2), 'f') + h5py.h5ds.set_scale(self.f['x2'].id, b'x2 name') + h5py.h5ds.attach_scale(self.f['data'].id, self.f['x2'].id, 2) + self.f['y1'] = np.ones((3), 'f') + h5py.h5ds.set_scale(self.f['y1'].id, b'y1 name') + h5py.h5ds.attach_scale(self.f['data'].id, self.f['y1'].id, 1) + self.f['z1'] = np.ones((4), 'f') + + h5py.h5ds.set_label(self.f['data'].id, 0, b'z') + h5py.h5ds.set_label(self.f['data'].id, 2, b'x') + + def tearDown(self): + if self.f: + self.f.close() + + +class TestH5DSBindings(BaseDataset): + + """ + Feature: Datasets can be created from existing data + """ + + def test_create_dimensionscale(self): + """ Create a dimension scale from existing dataset """ + self.assertTrue(h5py.h5ds.is_scale(self.f['x1'].id)) + self.assertEqual(h5py.h5ds.get_scale_name(self.f['x1'].id), b'') + self.assertEqual(self.f['x1'].attrs['CLASS'], b"DIMENSION_SCALE") + self.assertEqual(h5py.h5ds.get_scale_name(self.f['x2'].id), b'x2 name') + + def test_attach_dimensionscale(self): + self.assertTrue( + h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2) + ) + self.assertFalse( + h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 1)) + self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 0), 0) + self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 1), 1) + self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 2) + + def test_detach_dimensionscale(self): + self.assertTrue( + h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2) + ) + h5py.h5ds.detach_scale(self.f['data'].id, self.f['x1'].id, 2) + self.assertFalse( + h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2) + ) + self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 1) + + def test_label_dimensionscale(self): + self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 0), b'z') + self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 1), b'') + self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 2), b'x') + + def test_iter_dimensionscales(self): + def func(dsid): + res = h5py.h5ds.get_scale_name(dsid) + if res == b'x2 name': + return dsid + + res = h5py.h5ds.iterate(self.f['data'].id, 2, func, 0) + self.assertEqual(h5py.h5ds.get_scale_name(res), b'x2 name') + + +class TestDimensionManager(BaseDataset): + + def test_make_scale(self): + # test recreating or renaming an existing scale: + self.f['x1'].make_scale(b'foobar') + self.assertEqual(self.f['data'].dims[2]['foobar'], self.f['x1']) + # test creating entirely new scale: + self.f['data2'].make_scale(b'foobaz') + self.f['data'].dims[2].attach_scale(self.f['data2']) + self.assertEqual(self.f['data'].dims[2]['foobaz'], self.f['data2']) + + def test_get_dimension(self): + with self.assertRaises(IndexError): + self.f['data'].dims[3] + + def test_len(self): + self.assertEqual(len(self.f['data'].dims), 3) + self.assertEqual(len(self.f['data2'].dims), 3) + + def test_iter(self): + dims = self.f['data'].dims + self.assertEqual( + [d for d in dims], + [dims[0], dims[1], dims[2]] + ) + + def test_repr(self): + ds = self.f.create_dataset('x', (2,3)) + self.assertIsInstance(repr(ds.dims), str) + self.f.close() + self.assertIsInstance(repr(ds.dims), str) + + +class TestDimensionsHighLevel(BaseDataset): + + def test_len(self): + self.assertEqual(len(self.f['data'].dims[0]), 0) + self.assertEqual(len(self.f['data'].dims[1]), 1) + self.assertEqual(len(self.f['data'].dims[2]), 2) + self.assertEqual(len(self.f['data2'].dims[0]), 0) + self.assertEqual(len(self.f['data2'].dims[1]), 0) + self.assertEqual(len(self.f['data2'].dims[2]), 0) + + def test_get_label(self): + self.assertEqual(self.f['data'].dims[2].label, 'x') + self.assertEqual(self.f['data'].dims[1].label, '') + self.assertEqual(self.f['data'].dims[0].label, 'z') + self.assertEqual(self.f['data2'].dims[2].label, '') + self.assertEqual(self.f['data2'].dims[1].label, '') + self.assertEqual(self.f['data2'].dims[0].label, '') + + def test_set_label(self): + self.f['data'].dims[0].label = 'foo' + self.assertEqual(self.f['data'].dims[2].label, 'x') + self.assertEqual(self.f['data'].dims[1].label, '') + self.assertEqual(self.f['data'].dims[0].label, 'foo') + + def test_detach_scale(self): + self.f['data'].dims[2].detach_scale(self.f['x1']) + self.assertEqual(len(self.f['data'].dims[2]), 1) + self.assertEqual(self.f['data'].dims[2][0], self.f['x2']) + self.f['data'].dims[2].detach_scale(self.f['x2']) + self.assertEqual(len(self.f['data'].dims[2]), 0) + + def test_attach_scale(self): + self.f['x3'] = self.f['x2'][...] + self.f['data'].dims[2].attach_scale(self.f['x3']) + self.assertEqual(len(self.f['data'].dims[2]), 3) + self.assertEqual(self.f['data'].dims[2][2], self.f['x3']) + + def test_get_dimension_scale(self): + self.assertEqual(self.f['data'].dims[2][0], self.f['x1']) + with self.assertRaises(RuntimeError): + self.f['data2'].dims[2][0], self.f['x2'] + self.assertEqual(self.f['data'].dims[2][''], self.f['x1']) + self.assertEqual(self.f['data'].dims[2]['x2 name'], self.f['x2']) + + def test_get_items(self): + self.assertEqual( + self.f['data'].dims[2].items(), + [('', self.f['x1']), ('x2 name', self.f['x2'])] + ) + + def test_get_keys(self): + self.assertEqual(self.f['data'].dims[2].keys(), ['', 'x2 name']) + + def test_get_values(self): + self.assertEqual( + self.f['data'].dims[2].values(), + [self.f['x1'], self.f['x2']] + ) + + def test_iter(self): + self.assertEqual([i for i in self.f['data'].dims[2]], ['', 'x2 name']) + + def test_repr(self): + ds = self.f["data"] + self.assertEqual(repr(ds.dims[2])[1:16], '"x" dimension 2') + self.f.close() + self.assertIsInstance(repr(ds.dims), str) + + def test_attributes(self): + self.f["data2"].attrs["DIMENSION_LIST"] = self.f["data"].attrs[ + "DIMENSION_LIST"] + self.assertEqual(len(self.f['data2'].dims[0]), 0) + self.assertEqual(len(self.f['data2'].dims[1]), 1) + self.assertEqual(len(self.f['data2'].dims[2]), 2) + + def test_is_scale(self): + """Test Dataset.is_scale property""" + self.assertTrue(self.f['x1'].is_scale) + self.assertTrue(self.f['x2'].is_scale) + self.assertTrue(self.f['y1'].is_scale) + self.assertFalse(self.f['z1'].is_scale) + self.assertFalse(self.f['data'].is_scale) + self.assertFalse(self.f['data2'].is_scale) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dims_dimensionproxy.py b/MLPY/Lib/site-packages/h5py/tests/test_dims_dimensionproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..188d02f4d03296d49e1201da133c37b4e8e84dcc --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dims_dimensionproxy.py @@ -0,0 +1,24 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Tests the h5py.Dataset.dims.DimensionProxy class. +""" + +import numpy as np +import h5py + +from .common import ut, TestCase + +class TestItems(TestCase): + + def test_empty(self): + """ no dimension scales -> empty list """ + dset = self.f.create_dataset('x', (10,)) + self.assertEqual(dset.dims[0].items(), []) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_dtype.py b/MLPY/Lib/site-packages/h5py/tests/test_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..dbf72c7d3e0c0d11b4402939005abb327fde423d --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_dtype.py @@ -0,0 +1,514 @@ +""" + Tests for converting between numpy dtypes and h5py data types +""" + +from itertools import count +import platform +import numpy as np +import h5py +try: + import tables +except ImportError: + tables = None + +from .common import ut, TestCase + +UNSUPPORTED_LONG_DOUBLE = ('i386', 'i486', 'i586', 'i686', 'ppc64le') +UNSUPPORTED_LONG_DOUBLE_TYPES = ('float96', 'float128', 'complex192', + 'complex256') + + +class TestVlen(TestCase): + + """ + Check that storage of vlen strings is carried out correctly. + """ + def assertVlenArrayEqual(self, dset, arr, message=None, precision=None): + assert dset.shape == arr.shape, \ + "Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message) + for (i, d, a) in zip(count(), dset, arr): + self.assertArrayEqual(d, a, message, precision) + + def test_compound(self): + + fields = [] + fields.append(('field_1', h5py.string_dtype())) + fields.append(('field_2', np.int32)) + dt = np.dtype(fields) + self.f['mytype'] = np.dtype(dt) + dt_out = self.f['mytype'].dtype.fields['field_1'][0] + string_inf = h5py.check_string_dtype(dt_out) + self.assertEqual(string_inf.encoding, 'utf-8') + + def test_compound_vlen_bool(self): + vidt = h5py.vlen_dtype(np.uint8) + def a(items): + return np.array(items, dtype=np.uint8) + + f = self.f + + dt_vb = np.dtype([ + ('foo', vidt), + ('logical', bool)]) + vb = f.create_dataset('dt_vb', shape=(4,), dtype=dt_vb) + data = np.array([(a([1, 2, 3]), True), + (a([1 ]), False), + (a([1, 5 ]), True), + (a([],), False), ], + dtype=dt_vb) + vb[:] = data + actual = f['dt_vb'][:] + self.assertVlenArrayEqual(data['foo'], actual['foo']) + self.assertArrayEqual(data['logical'], actual['logical']) + + dt_vv = np.dtype([ + ('foo', vidt), + ('bar', vidt)]) + f.create_dataset('dt_vv', shape=(4,), dtype=dt_vv) + + dt_vvb = np.dtype([ + ('foo', vidt), + ('bar', vidt), + ('logical', bool)]) + vvb = f.create_dataset('dt_vvb', shape=(2,), dtype=dt_vvb) + + dt_bvv = np.dtype([ + ('logical', bool), + ('foo', vidt), + ('bar', vidt)]) + bvv = f.create_dataset('dt_bvv', shape=(2,), dtype=dt_bvv) + data = np.array([(True, a([1, 2, 3]), a([1, 2])), + (False, a([]), a([2, 4, 6])), ], + dtype=bvv) + bvv[:] = data + actual = bvv[:] + self.assertVlenArrayEqual(data['foo'], actual['foo']) + self.assertVlenArrayEqual(data['bar'], actual['bar']) + self.assertArrayEqual(data['logical'], actual['logical']) + + def test_compound_vlen_enum(self): + eidt = h5py.enum_dtype({'OFF': 0, 'ON': 1}, basetype=np.uint8) + vidt = h5py.vlen_dtype(np.uint8) + def a(items): + return np.array(items, dtype=np.uint8) + + f = self.f + + dt_vve = np.dtype([ + ('foo', vidt), + ('bar', vidt), + ('switch', eidt)]) + vve = f.create_dataset('dt_vve', shape=(2,), dtype=dt_vve) + data = np.array([(a([1, 2, 3]), a([1, 2]), 1), + (a([]), a([2, 4, 6]), 0), ], + dtype=dt_vve) + vve[:] = data + actual = vve[:] + self.assertVlenArrayEqual(data['foo'], actual['foo']) + self.assertVlenArrayEqual(data['bar'], actual['bar']) + self.assertArrayEqual(data['switch'], actual['switch']) + + def test_vlen_enum(self): + fname = self.mktemp() + arr1 = [[1], [1, 2]] + dt1 = h5py.vlen_dtype(h5py.enum_dtype(dict(foo=1, bar=2), 'i')) + + with h5py.File(fname, 'w') as f: + df1 = f.create_dataset('test', (len(arr1),), dtype=dt1) + df1[:] = np.array(arr1, dtype=object) + + with h5py.File(fname, 'r') as f: + df2 = f['test'] + dt2 = df2.dtype + arr2 = [e.tolist() for e in df2[:]] + + self.assertEqual(arr1, arr2) + self.assertEqual(h5py.check_enum_dtype(h5py.check_vlen_dtype(dt1)), + h5py.check_enum_dtype(h5py.check_vlen_dtype(dt2))) + + +class TestEmptyVlen(TestCase): + def test_write_empty_vlen(self): + fname = self.mktemp() + with h5py.File(fname, 'w') as f: + d = np.core.records.fromarrays([[], []], names='a,b', formats='|V16,O') + dset = f.create_dataset('test', data=d, dtype=[('a', '|V16'), ('b', h5py.special_dtype(vlen=np.float64))]) + self.assertEqual(dset.size, 0) + + +class TestExplicitCast(TestCase): + def test_f2_casting(self): + fname = self.mktemp() + + np.random.seed(1) + A = np.random.rand(1500, 20) + + # Save to HDF5 file + with h5py.File(fname, "w") as Fid: + Fid.create_dataset("Data", data=A, dtype='f2') + + with h5py.File(fname, "r") as Fid: + B = Fid["Data"][:] + + # Compare + self.assertTrue(np.all(A.astype('f2') == B)) + + +class TestOffsets(TestCase): + """ + Check that compound members with aligned or manual offsets are handled + correctly. + """ + + def test_compound_vlen(self): + vidt = h5py.vlen_dtype(np.uint8) + eidt = h5py.enum_dtype({'OFF': 0, 'ON': 1}, basetype=np.uint8) + + for np_align in (False, True): + dt = np.dtype([ + ('a', eidt), + ('foo', vidt), + ('bar', vidt), + ('switch', eidt)], align=np_align) + np_offsets = [dt.fields[i][1] for i in dt.names] + + for logical in (False, True): + if logical and np_align: + # Vlen types have different size in the numpy struct + self.assertRaises(TypeError, h5py.h5t.py_create, dt, + logical=logical) + else: + ht = h5py.h5t.py_create(dt, logical=logical) + offsets = [ht.get_member_offset(i) + for i in range(ht.get_nmembers())] + if np_align: + self.assertEqual(np_offsets, offsets) + + def test_aligned_offsets(self): + dt = np.dtype('i4,i8,i2', align=True) + ht = h5py.h5t.py_create(dt) + self.assertEqual(dt.itemsize, ht.get_size()) + self.assertEqual( + [dt.fields[i][1] for i in dt.names], + [ht.get_member_offset(i) for i in range(ht.get_nmembers())] + ) + + def test_aligned_data(self): + dt = np.dtype('i4,f8,i2', align=True) + data = np.zeros(10, dtype=dt) + + data['f0'] = np.array(np.random.randint(-100, 100, size=data.size), + dtype='i4') + data['f1'] = np.random.rand(data.size) + data['f2'] = np.array(np.random.randint(-100, 100, size=data.size), + dtype='i2') + + fname = self.mktemp() + + with h5py.File(fname, 'w') as f: + f['data'] = data + + with h5py.File(fname, 'r') as f: + self.assertArrayEqual(f['data'], data) + + def test_compound_robustness(self): + # make an out of order compound type with gaps in it, and larger itemsize than minimum + # Idea is to be robust to type descriptions we *could* get out of HDF5 files, from custom descriptions + # of types in addition to numpy's flakey history on unaligned fields with non-standard or padded layouts. + fields = [ + ('f0', np.float64, 25), + ('f1', np.uint64, 9), + ('f2', np.uint32, 0), + ('f3', np.uint16, 5) + ] + lastfield = fields[np.argmax([ x[2] for x in fields ])] + itemsize = lastfield[2] + np.dtype(lastfield[1]).itemsize + 6 + extract_index = lambda index, sequence: [ x[index] for x in sequence ] + + dt = np.dtype({ + 'names' : extract_index(0, fields), + 'formats' : extract_index(1, fields), + 'offsets' : extract_index(2, fields), + # 'aligned': False, - already defaults to False + 'itemsize': itemsize + }) + + self.assertTrue(dt.itemsize == itemsize) + data = np.zeros(10, dtype=dt) + + # don't trust numpy struct handling, keep fields out of band in case content insertion is erroneous + # yes... this has also been known to happen. + f1 = np.array([1 + i * 4 for i in range(data.shape[0])], dtype=dt.fields['f1'][0]) + f2 = np.array([2 + i * 4 for i in range(data.shape[0])], dtype=dt.fields['f2'][0]) + f3 = np.array([3 + i * 4 for i in range(data.shape[0])], dtype=dt.fields['f3'][0]) + f0c = 3.14 + data['f0'] = f0c + data['f3'] = f3 + data['f1'] = f1 + data['f2'] = f2 + + # numpy consistency checks + self.assertTrue(np.all(data['f0'] == f0c)) + self.assertArrayEqual(data['f3'], f3) + self.assertArrayEqual(data['f1'], f1) + self.assertArrayEqual(data['f2'], f2) + + fname = self.mktemp() + + with h5py.File(fname, 'w') as fd: + fd.create_dataset('data', data=data) + + with h5py.File(fname, 'r') as fd: + readback = fd['data'] + self.assertTrue(readback.dtype == dt) + self.assertArrayEqual(readback, data) + self.assertTrue(np.all(readback['f0'] == f0c)) + self.assertArrayEqual(readback['f1'], f1) + self.assertArrayEqual(readback['f2'], f2) + self.assertArrayEqual(readback['f3'], f3) + + def test_out_of_order_offsets(self): + dt = np.dtype({ + 'names' : ['f1', 'f2', 'f3'], + 'formats' : ['']: + dt_descr = f'{dt_order}M8[{dt_unit}]' + dt = h5py.opaque_dtype(np.dtype(dt_descr)) + arr = np.array([0], dtype=np.int64).view(dtype=dt) + + with h5py.File(fname, 'w') as f: + dset = f.create_dataset("default", data=arr, dtype=dt) + self.assertArrayEqual(arr, dset) + self.assertEqual(arr.dtype, dset.dtype) + + def test_timedelta(self): + fname = self.mktemp() + + for dt_unit in self.datetime_units: + for dt_order in ['<', '>']: + dt_descr = f'{dt_order}m8[{dt_unit}]' + dt = h5py.opaque_dtype(np.dtype(dt_descr)) + arr = np.array([np.timedelta64(500, dt_unit)], dtype=dt) + + with h5py.File(fname, 'w') as f: + dset = f.create_dataset("default", data=arr, dtype=dt) + self.assertArrayEqual(arr, dset) + self.assertEqual(arr.dtype, dset.dtype) + +@ut.skipUnless(tables is not None, 'tables is required') +class TestBitfield(TestCase): + + """ + Test H5T_NATIVE_B8 reading + """ + + def test_b8_bool(self): + arr1 = np.array([False, True], dtype=bool) + self._test_b8( + arr1, + expected_default_cast_dtype=np.uint8 + ) + self._test_b8( + arr1, + expected_default_cast_dtype=np.uint8, + cast_dtype=np.uint8 + ) + + def test_b8_bool_compound(self): + arr1 = np.array([(False,), (True,)], dtype=np.dtype([('x', '?')])) + self._test_b8( + arr1, + expected_default_cast_dtype=np.dtype([('x', 'u1')]) + ) + self._test_b8( + arr1, + expected_default_cast_dtype=np.dtype([('x', 'u1')]), + cast_dtype=np.dtype([('x', 'u1')]) + ) + + def test_b8_bool_compound_nested(self): + arr1 = np.array( + [(True, (True, False)), (True, (False, True))], + dtype=np.dtype([('x', '?'), ('y', [('a', '?'), ('b', '?')])]), + ) + self._test_b8( + arr1, + expected_default_cast_dtype=np.dtype( + [('x', 'u1'), ('y', [('a', 'u1'), ('b', 'u1')])] + ) + ) + self._test_b8( + arr1, + expected_default_cast_dtype=np.dtype( + [('x', 'u1'), ('y', [('a', 'u1'), ('b', 'u1')])] + ), + cast_dtype=np.dtype([('x', 'u1'), ('y', [('a', 'u1'), ('b', 'u1')])]), + ) + + def test_b8_bool_compound_mixed_types(self): + arr1 = np.array( + [(True, 0.5), (False, 0.2)], dtype=np.dtype([('x','?'), ('y', ' (1, 14, 3), + reason='Requires HDF5 <= 1.14.3') + def test_too_small_pbs(self): + """Page buffer size must be greater than file space page size.""" + fname = self.mktemp() + fsp = 16 * 1024 + with File(fname, mode='w', fs_strategy='page', fs_page_size=fsp): + pass + with self.assertRaises(OSError): + File(fname, mode="r", page_buf_size=fsp-1) + + @pytest.mark.skipif(h5py.version.hdf5_version_tuple < (1, 14, 4), + reason='Requires HDF5 >= 1.14.4') + def test_open_nonpage_pbs(self): + """Open non-PAGE file with page buffer set.""" + fname = self.mktemp() + fsp = 16 * 1024 + with File(fname, mode='w'): + pass + with File(fname, mode='r', page_buf_size=fsp) as f: + fapl = f.id.get_access_plist() + assert fapl.get_page_buffer_size()[0] == 0 + + @pytest.mark.skipif(h5py.version.hdf5_version_tuple < (1, 14, 4), + reason='Requires HDF5 >= 1.14.4') + def test_smaller_pbs(self): + """Adjust page buffer size automatically when smaller than file page.""" + fname = self.mktemp() + fsp = 16 * 1024 + with File(fname, mode='w', fs_strategy='page', fs_page_size=fsp): + pass + with File(fname, mode='r', page_buf_size=fsp-100) as f: + fapl = f.id.get_access_plist() + assert fapl.get_page_buffer_size()[0] == fsp + + def test_actual_pbs(self): + """Verify actual page buffer size.""" + fname = self.mktemp() + fsp = 16 * 1024 + pbs = 2 * fsp + with File(fname, mode='w', fs_strategy='page', fs_page_size=fsp): + pass + with File(fname, mode='r', page_buf_size=pbs-1) as f: + fapl = f.id.get_access_plist() + self.assertEqual(fapl.get_page_buffer_size()[0], fsp) + + +class TestModes(TestCase): + + """ + Feature: File mode can be retrieved via file.mode + """ + + def test_mode_attr(self): + """ Mode equivalent can be retrieved via property """ + fname = self.mktemp() + with File(fname, 'w') as f: + self.assertEqual(f.mode, 'r+') + with File(fname, 'r') as f: + self.assertEqual(f.mode, 'r') + + def test_mode_external(self): + """ Mode property works for files opened via external links + + Issue 190. + """ + fname1 = self.mktemp() + fname2 = self.mktemp() + + f1 = File(fname1, 'w') + f1.close() + + f2 = File(fname2, 'w') + try: + f2['External'] = h5py.ExternalLink(fname1, '/') + f3 = f2['External'].file + self.assertEqual(f3.mode, 'r+') + finally: + f2.close() + f3.close() + + f2 = File(fname2, 'r') + try: + f3 = f2['External'].file + self.assertEqual(f3.mode, 'r') + finally: + f2.close() + f3.close() + + +class TestDrivers(TestCase): + + """ + Feature: Files can be opened with low-level HDF5 drivers. Does not + include MPI drivers (see bottom). + """ + + @ut.skipUnless(os.name == 'posix', "Stdio driver is supported on posix") + def test_stdio(self): + """ Stdio driver is supported on posix """ + fid = File(self.mktemp(), 'w', driver='stdio') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'stdio') + fid.close() + + # Testing creation with append flag + fid = File(self.mktemp(), 'a', driver='stdio') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'stdio') + fid.close() + + @ut.skipUnless(direct_vfd, + "DIRECT driver is supported on Linux if hdf5 is " + "built with the appriorate flags.") + def test_direct(self): + """ DIRECT driver is supported on Linux""" + fid = File(self.mktemp(), 'w', driver='direct') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'direct') + default_fapl = fid.id.get_access_plist().get_fapl_direct() + fid.close() + + # Testing creation with append flag + fid = File(self.mktemp(), 'a', driver='direct') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'direct') + fid.close() + + # 2022/02/26: hnmaarrfk + # I'm actually not too sure of the restriction on the + # different valid block_sizes and cbuf_sizes on different hardware + # platforms. + # + # I've learned a few things: + # * cbuf_size: Copy buffer size must be a multiple of block size + # The alignment (on my platform x86-64bit with an NVMe SSD + # could be an integer multiple of 512 + # + # To allow HDF5 to do the heavy lifting for different platform, + # We didn't provide any arguments to the first call + # and obtained HDF5's default values there. + + # Testing creation with a few different property lists + for alignment, block_size, cbuf_size in [ + default_fapl, + (default_fapl[0], default_fapl[1], 3 * default_fapl[1]), + (default_fapl[0] * 2, default_fapl[1], 3 * default_fapl[1]), + (default_fapl[0], 2 * default_fapl[1], 6 * default_fapl[1]), + ]: + with File(self.mktemp(), 'w', driver='direct', + alignment=alignment, + block_size=block_size, + cbuf_size=cbuf_size) as fid: + actual_fapl = fid.id.get_access_plist().get_fapl_direct() + actual_alignment = actual_fapl[0] + actual_block_size = actual_fapl[1] + actual_cbuf_size = actual_fapl[2] + assert actual_alignment == alignment + assert actual_block_size == block_size + assert actual_cbuf_size == actual_cbuf_size + + @ut.skipUnless(os.name == 'posix', "Sec2 driver is supported on posix") + def test_sec2(self): + """ Sec2 driver is supported on posix """ + fid = File(self.mktemp(), 'w', driver='sec2') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'sec2') + fid.close() + + # Testing creation with append flag + fid = File(self.mktemp(), 'a', driver='sec2') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'sec2') + fid.close() + + def test_core(self): + """ Core driver is supported (no backing store) """ + fname = self.mktemp() + fid = File(fname, 'w', driver='core', backing_store=False) + self.assertTrue(fid) + self.assertEqual(fid.driver, 'core') + fid.close() + self.assertFalse(os.path.exists(fname)) + + # Testing creation with append flag + fid = File(self.mktemp(), 'a', driver='core') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'core') + fid.close() + + def test_backing(self): + """ Core driver saves to file when backing store used """ + fname = self.mktemp() + fid = File(fname, 'w', driver='core', backing_store=True) + fid.create_group('foo') + fid.close() + fid = File(fname, 'r') + assert 'foo' in fid + fid.close() + # keywords for other drivers are invalid when using the default driver + with self.assertRaises(TypeError): + File(fname, 'w', backing_store=True) + + def test_readonly(self): + """ Core driver can be used to open existing files """ + fname = self.mktemp() + fid = File(fname, 'w') + fid.create_group('foo') + fid.close() + fid = File(fname, 'r', driver='core') + self.assertTrue(fid) + assert 'foo' in fid + with self.assertRaises(ValueError): + fid.create_group('bar') + fid.close() + + def test_blocksize(self): + """ Core driver supports variable block size """ + fname = self.mktemp() + fid = File(fname, 'w', driver='core', block_size=1024, + backing_store=False) + self.assertTrue(fid) + fid.close() + + def test_split(self): + """ Split stores metadata in a separate file """ + fname = self.mktemp() + fid = File(fname, 'w', driver='split') + fid.close() + self.assertTrue(os.path.exists(fname + '-m.h5')) + fid = File(fname, 'r', driver='split') + self.assertTrue(fid) + fid.close() + + def test_fileobj(self): + """ Python file object driver is supported """ + tf = tempfile.TemporaryFile() + fid = File(tf, 'w', driver='fileobj') + self.assertTrue(fid) + self.assertEqual(fid.driver, 'fileobj') + fid.close() + # Driver must be 'fileobj' for file-like object if specified + with self.assertRaises(ValueError): + File(tf, 'w', driver='core') + + # TODO: family driver tests + + +@pytest.mark.skipif( + h5py.version.hdf5_version_tuple[1] % 2 != 0 , + reason='Not HDF5 release version' +) +class TestNewLibver(TestCase): + + """ + Feature: File format compatibility bounds can be specified when + opening a file. + """ + + @classmethod + def setUpClass(cls): + super().setUpClass() + + # Current latest library bound label + if h5py.version.hdf5_version_tuple < (1, 11, 4): + cls.latest = 'v110' + elif h5py.version.hdf5_version_tuple < (1, 13, 0): + cls.latest = 'v112' + else: + cls.latest = 'v114' + + def test_default(self): + """ Opening with no libver arg """ + f = File(self.mktemp(), 'w') + self.assertEqual(f.libver, ('earliest', self.latest)) + f.close() + + def test_single(self): + """ Opening with single libver arg """ + f = File(self.mktemp(), 'w', libver='latest') + self.assertEqual(f.libver, (self.latest, self.latest)) + f.close() + + def test_single_v108(self): + """ Opening with "v108" libver arg """ + f = File(self.mktemp(), 'w', libver='v108') + self.assertEqual(f.libver, ('v108', self.latest)) + f.close() + + def test_single_v110(self): + """ Opening with "v110" libver arg """ + f = File(self.mktemp(), 'w', libver='v110') + self.assertEqual(f.libver, ('v110', self.latest)) + f.close() + + @ut.skipIf(h5py.version.hdf5_version_tuple < (1, 11, 4), + 'Requires HDF5 1.11.4 or later') + def test_single_v112(self): + """ Opening with "v112" libver arg """ + f = File(self.mktemp(), 'w', libver='v112') + self.assertEqual(f.libver, ('v112', self.latest)) + f.close() + + def test_multiple(self): + """ Opening with two libver args """ + f = File(self.mktemp(), 'w', libver=('earliest', 'v108')) + self.assertEqual(f.libver, ('earliest', 'v108')) + f.close() + + def test_none(self): + """ Omitting libver arg results in maximum compatibility """ + f = File(self.mktemp(), 'w') + self.assertEqual(f.libver, ('earliest', self.latest)) + f.close() + + +class TestUserblock(TestCase): + + """ + Feature: Files can be create with user blocks + """ + + def test_create_blocksize(self): + """ User blocks created with w, w-, x and properties work correctly """ + f = File(self.mktemp(), 'w-', userblock_size=512) + try: + self.assertEqual(f.userblock_size, 512) + finally: + f.close() + + f = File(self.mktemp(), 'x', userblock_size=512) + try: + self.assertEqual(f.userblock_size, 512) + finally: + f.close() + + f = File(self.mktemp(), 'w', userblock_size=512) + try: + self.assertEqual(f.userblock_size, 512) + finally: + f.close() + # User block size must be an integer + with self.assertRaises(ValueError): + File(self.mktemp(), 'w', userblock_size='non') + + def test_write_only(self): + """ User block only allowed for write """ + name = self.mktemp() + f = File(name, 'w') + f.close() + + with self.assertRaises(ValueError): + f = h5py.File(name, 'r', userblock_size=512) + + with self.assertRaises(ValueError): + f = h5py.File(name, 'r+', userblock_size=512) + + def test_match_existing(self): + """ User block size must match that of file when opening for append """ + name = self.mktemp() + f = File(name, 'w', userblock_size=512) + f.close() + + with self.assertRaises(ValueError): + f = File(name, 'a', userblock_size=1024) + + f = File(name, 'a', userblock_size=512) + try: + self.assertEqual(f.userblock_size, 512) + finally: + f.close() + + def test_power_of_two(self): + """ User block size must be a power of 2 and at least 512 """ + name = self.mktemp() + + with self.assertRaises(ValueError): + f = File(name, 'w', userblock_size=128) + + with self.assertRaises(ValueError): + f = File(name, 'w', userblock_size=513) + + with self.assertRaises(ValueError): + f = File(name, 'w', userblock_size=1023) + + def test_write_block(self): + """ Test that writing to a user block does not destroy the file """ + name = self.mktemp() + + f = File(name, 'w', userblock_size=512) + f.create_group("Foobar") + f.close() + + pyfile = open(name, 'r+b') + try: + pyfile.write(b'X' * 512) + finally: + pyfile.close() + + f = h5py.File(name, 'r') + try: + assert "Foobar" in f + finally: + f.close() + + pyfile = open(name, 'rb') + try: + self.assertEqual(pyfile.read(512), b'X' * 512) + finally: + pyfile.close() + + +class TestContextManager(TestCase): + + """ + Feature: File objects can be used as context managers + """ + + def test_context_manager(self): + """ File objects can be used in with statements """ + with File(self.mktemp(), 'w') as fid: + self.assertTrue(fid) + self.assertTrue(not fid) + + +@ut.skipIf(not UNICODE_FILENAMES, "Filesystem unicode support required") +class TestUnicode(TestCase): + + """ + Feature: Unicode filenames are supported + """ + + def test_unicode(self): + """ Unicode filenames can be used, and retrieved properly via .filename + """ + fname = self.mktemp(prefix=chr(0x201a)) + fid = File(fname, 'w') + try: + self.assertEqual(fid.filename, fname) + self.assertIsInstance(fid.filename, str) + finally: + fid.close() + + def test_unicode_hdf5_python_consistent(self): + """ Unicode filenames can be used, and seen correctly from python + """ + fname = self.mktemp(prefix=chr(0x201a)) + with File(fname, 'w') as f: + self.assertTrue(os.path.exists(fname)) + + def test_nonexistent_file_unicode(self): + """ + Modes 'r' and 'r+' do not create files even when given unicode names + """ + fname = self.mktemp(prefix=chr(0x201a)) + with self.assertRaises(OSError): + File(fname, 'r') + with self.assertRaises(OSError): + File(fname, 'r+') + + +class TestFileProperty(TestCase): + + """ + Feature: A File object can be retrieved from any child object, + via the .file property + """ + + def test_property(self): + """ File object can be retrieved from subgroup """ + fname = self.mktemp() + hfile = File(fname, 'w') + try: + hfile2 = hfile['/'].file + self.assertEqual(hfile, hfile2) + finally: + hfile.close() + + def test_close(self): + """ All retrieved File objects are closed at the same time """ + fname = self.mktemp() + hfile = File(fname, 'w') + grp = hfile.create_group('foo') + hfile2 = grp.file + hfile3 = hfile['/'].file + hfile2.close() + self.assertFalse(hfile) + self.assertFalse(hfile2) + self.assertFalse(hfile3) + + def test_mode(self): + """ Retrieved File objects have a meaningful mode attribute """ + hfile = File(self.mktemp(), 'w') + try: + grp = hfile.create_group('foo') + self.assertEqual(grp.file.mode, hfile.mode) + finally: + hfile.close() + + +class TestClose(TestCase): + + """ + Feature: Files can be closed + """ + + def test_close(self): + """ Close file via .close method """ + fid = File(self.mktemp(), 'w') + self.assertTrue(fid) + fid.close() + self.assertFalse(fid) + + def test_closed_file(self): + """ Trying to modify closed file raises ValueError """ + fid = File(self.mktemp(), 'w') + fid.close() + with self.assertRaises(ValueError): + fid.create_group('foo') + + def test_close_multiple_default_driver(self): + fname = self.mktemp() + f = h5py.File(fname, 'w') + f.create_group("test") + f.close() + f.close() + + +class TestFlush(TestCase): + + """ + Feature: Files can be flushed + """ + + def test_flush(self): + """ Flush via .flush method """ + fid = File(self.mktemp(), 'w') + fid.flush() + fid.close() + + +class TestRepr(TestCase): + + """ + Feature: File objects provide a helpful __repr__ string + """ + + def test_repr(self): + """ __repr__ behaves itself when files are open and closed """ + fid = File(self.mktemp(), 'w') + self.assertIsInstance(repr(fid), str) + fid.close() + self.assertIsInstance(repr(fid), str) + + +class TestFilename(TestCase): + + """ + Feature: The name of a File object can be retrieved via .filename + """ + + def test_filename(self): + """ .filename behaves properly for string data """ + fname = self.mktemp() + fid = File(fname, 'w') + try: + self.assertEqual(fid.filename, fname) + self.assertIsInstance(fid.filename, str) + finally: + fid.close() + + +class TestCloseInvalidatesOpenObjectIDs(TestCase): + + """ + Ensure that closing a file invalidates object IDs, as appropriate + """ + + def test_close(self): + """ Closing a file invalidates any of the file's open objects """ + with File(self.mktemp(), 'w') as f1: + g1 = f1.create_group('foo') + self.assertTrue(bool(f1.id)) + self.assertTrue(bool(g1.id)) + f1.close() + self.assertFalse(bool(f1.id)) + self.assertFalse(bool(g1.id)) + with File(self.mktemp(), 'w') as f2: + g2 = f2.create_group('foo') + self.assertTrue(bool(f2.id)) + self.assertTrue(bool(g2.id)) + self.assertFalse(bool(f1.id)) + self.assertFalse(bool(g1.id)) + + def test_close_one_handle(self): + fname = self.mktemp() + with File(fname, 'w') as f: + f.create_group('foo') + + f1 = File(fname) + f2 = File(fname) + g1 = f1['foo'] + g2 = f2['foo'] + assert g1.id.valid + assert g2.id.valid + f1.close() + assert not g1.id.valid + # Closing f1 shouldn't close f2 or objects belonging to it + assert f2.id.valid + assert g2.id.valid + + f2.close() + assert not f2.id.valid + assert not g2.id.valid + + +class TestPathlibSupport(TestCase): + + """ + Check that h5py doesn't break on pathlib + """ + def test_pathlib_accepted_file(self): + """ Check that pathlib is accepted by h5py.File """ + with closed_tempfile() as f: + path = pathlib.Path(f) + with File(path, 'w') as f2: + self.assertTrue(True) + + def test_pathlib_name_match(self): + """ Check that using pathlib does not affect naming """ + with closed_tempfile() as f: + path = pathlib.Path(f) + with File(path, 'w') as h5f1: + pathlib_name = h5f1.filename + with File(f, 'w') as h5f2: + normal_name = h5f2.filename + self.assertEqual(pathlib_name, normal_name) + + +class TestPickle(TestCase): + """Check that h5py.File can't be pickled""" + def test_dump_error(self): + with File(self.mktemp(), 'w') as f1: + with self.assertRaises(TypeError): + pickle.dumps(f1) + + +# unittest doesn't work with pytest fixtures (and possibly other features), +# hence no subclassing TestCase +@pytest.mark.mpi +class TestMPI: + def test_mpio(self, mpi_file_name): + """ MPIO driver and options """ + from mpi4py import MPI + + with File(mpi_file_name, 'w', driver='mpio', comm=MPI.COMM_WORLD) as f: + assert f + assert f.driver == 'mpio' + + def test_mpio_append(self, mpi_file_name): + """ Testing creation of file with append """ + from mpi4py import MPI + + with File(mpi_file_name, 'a', driver='mpio', comm=MPI.COMM_WORLD) as f: + assert f + assert f.driver == 'mpio' + + def test_mpi_atomic(self, mpi_file_name): + """ Enable atomic mode for MPIO driver """ + from mpi4py import MPI + + with File(mpi_file_name, 'w', driver='mpio', comm=MPI.COMM_WORLD) as f: + assert not f.atomic + f.atomic = True + assert f.atomic + + def test_close_multiple_mpio_driver(self, mpi_file_name): + """ MPIO driver and options """ + from mpi4py import MPI + + f = File(mpi_file_name, 'w', driver='mpio', comm=MPI.COMM_WORLD) + f.create_group("test") + f.close() + f.close() + + +class TestSWMRMode(TestCase): + + """ + Feature: Create file that switches on SWMR mode + """ + + def test_file_mode_generalizes(self): + fname = self.mktemp() + fid = File(fname, 'w', libver='latest') + g = fid.create_group('foo') + # fid and group member file attribute should have the same mode + assert fid.mode == g.file.mode == 'r+' + fid.swmr_mode = True + # fid and group member file attribute should still be 'r+' + # even though file intent has changed + assert fid.mode == g.file.mode == 'r+' + fid.close() + + def test_swmr_mode_consistency(self): + fname = self.mktemp() + fid = File(fname, 'w', libver='latest') + g = fid.create_group('foo') + assert fid.swmr_mode == g.file.swmr_mode == False + fid.swmr_mode = True + # This setter should affect both fid and group member file attribute + assert fid.swmr_mode == g.file.swmr_mode == True + fid.close() + + +@pytest.mark.skipif( + h5py.version.hdf5_version_tuple < (1, 12, 1) and ( + h5py.version.hdf5_version_tuple[:2] != (1, 10) or h5py.version.hdf5_version_tuple[2] < 7), + reason="Requires HDF5 >= 1.12.1 or 1.10.x >= 1.10.7") +@pytest.mark.skipif("HDF5_USE_FILE_LOCKING" in os.environ, + reason="HDF5_USE_FILE_LOCKING env. var. is set") +class TestFileLocking: + """Test h5py.File file locking option""" + + def test_reopen(self, tmp_path): + """Test file locking when opening twice the same file""" + fname = tmp_path / "test.h5" + + with h5py.File(fname, mode="w", locking=True) as f: + f.flush() + + # Opening same file in same process without locking is expected to fail + with pytest.raises(OSError): + with h5py.File(fname, mode="r", locking=False) as h5f_read: + pass + + with h5py.File(fname, mode="r", locking=True) as h5f_read: + pass + + if h5py.version.hdf5_version_tuple < (1, 14, 4): + with h5py.File(fname, mode="r", locking='best-effort') as h5f_read: + pass + else: + with pytest.raises(OSError): + with h5py.File(fname, mode="r", locking='best-effort') as h5f_read: + pass + + + def test_unsupported_locking(self, tmp_path): + """Test with erroneous file locking value""" + fname = tmp_path / "test.h5" + with pytest.raises(ValueError): + with h5py.File(fname, mode="r", locking='unsupported-value') as h5f_read: + pass + + def test_multiprocess(self, tmp_path): + """Test file locking option from different concurrent processes""" + fname = tmp_path / "test.h5" + + def open_in_subprocess(filename, mode, locking): + """Open HDF5 file in a subprocess and return True on success""" + h5py_import_dir = str(pathlib.Path(h5py.__file__).parent.parent) + + process = subprocess.run( + [ + sys.executable, + "-c", + f""" +import sys +sys.path.insert(0, {h5py_import_dir!r}) +import h5py +f = h5py.File({str(filename)!r}, mode={mode!r}, locking={locking}) + """, + ], + capture_output=True) + return process.returncode == 0 and not process.stderr + + # Create test file + with h5py.File(fname, mode="w", locking=True) as f: + f["data"] = 1 + + with h5py.File(fname, mode="r", locking=False) as f: + # Opening in write mode with locking is expected to work + assert open_in_subprocess(fname, mode="w", locking=True) + + +def test_close_gc(writable_file): + # https://github.com/h5py/h5py/issues/1852 + for i in range(100): + writable_file[str(i)] = [] + + filename = writable_file.filename + writable_file.close() + + # Ensure that Python's garbage collection doesn't interfere with closing + # a file. Try a few times - the problem is not 100% consistent, but + # normally showed up on the 1st or 2nd iteration for me. -TAK, 2021 + for i in range(10): + with h5py.File(filename, 'r') as f: + refs = [d.id for d in f.values()] + refs.append(refs) # Make a reference cycle so GC is involved + del refs # GC is likely to fire while closing the file diff --git a/MLPY/Lib/site-packages/h5py/tests/test_file2.py b/MLPY/Lib/site-packages/h5py/tests/test_file2.py new file mode 100644 index 0000000000000000000000000000000000000000..1c2d42b064eabeb92a0a24f99b9df2db3daaf7b9 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_file2.py @@ -0,0 +1,315 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Tests the h5py.File object. +""" + +import h5py +from h5py._hl.files import _drivers +from h5py import File + +from .common import ut, TestCase + +import pytest +import io +import tempfile +import os + + +def nfiles(): + return h5py.h5f.get_obj_count(h5py.h5f.OBJ_ALL, h5py.h5f.OBJ_FILE) + +def ngroups(): + return h5py.h5f.get_obj_count(h5py.h5f.OBJ_ALL, h5py.h5f.OBJ_GROUP) + + +class TestDealloc(TestCase): + + """ + Behavior on object deallocation. Note most of this behavior is + delegated to FileID. + """ + + def test_autoclose(self): + """ File objects close automatically when out of scope, but + other objects remain open. """ + + start_nfiles = nfiles() + start_ngroups = ngroups() + + fname = self.mktemp() + f = h5py.File(fname, 'w') + g = f['/'] + + self.assertEqual(nfiles(), start_nfiles+1) + self.assertEqual(ngroups(), start_ngroups+1) + + del f + + self.assertTrue(g) + self.assertEqual(nfiles(), start_nfiles) + self.assertEqual(ngroups(), start_ngroups+1) + + f = g.file + + self.assertTrue(f) + self.assertEqual(nfiles(), start_nfiles+1) + self.assertEqual(ngroups(), start_ngroups+1) + + del g + + self.assertEqual(nfiles(), start_nfiles+1) + self.assertEqual(ngroups(), start_ngroups) + + del f + + self.assertEqual(nfiles(), start_nfiles) + self.assertEqual(ngroups(), start_ngroups) + + +class TestDriverRegistration(TestCase): + def test_register_driver(self): + called_with = [None] + + def set_fapl(plist, *args, **kwargs): + called_with[0] = args, kwargs + return _drivers['sec2'](plist) + + h5py.register_driver('new-driver', set_fapl) + self.assertIn('new-driver', h5py.registered_drivers()) + + fname = self.mktemp() + h5py.File(fname, driver='new-driver', driver_arg_0=0, driver_arg_1=1, + mode='w') + + self.assertEqual( + called_with, + [((), {'driver_arg_0': 0, 'driver_arg_1': 1})], + ) + + def test_unregister_driver(self): + h5py.register_driver('new-driver', lambda plist: None) + self.assertIn('new-driver', h5py.registered_drivers()) + + h5py.unregister_driver('new-driver') + self.assertNotIn('new-driver', h5py.registered_drivers()) + + with self.assertRaises(ValueError) as e: + fname = self.mktemp() + h5py.File(fname, driver='new-driver', mode='w') + + self.assertEqual(str(e.exception), 'Unknown driver type "new-driver"') + + +class TestCache(TestCase): + def test_defaults(self): + fname = self.mktemp() + f = h5py.File(fname, 'w') + self.assertEqual(list(f.id.get_access_plist().get_cache()), + [0, 521, 1048576, 0.75]) + + def test_nbytes(self): + fname = self.mktemp() + f = h5py.File(fname, 'w', rdcc_nbytes=1024) + self.assertEqual(list(f.id.get_access_plist().get_cache()), + [0, 521, 1024, 0.75]) + + def test_nslots(self): + fname = self.mktemp() + f = h5py.File(fname, 'w', rdcc_nslots=125) + self.assertEqual(list(f.id.get_access_plist().get_cache()), + [0, 125, 1048576, 0.75]) + + def test_w0(self): + fname = self.mktemp() + f = h5py.File(fname, 'w', rdcc_w0=0.25) + self.assertEqual(list(f.id.get_access_plist().get_cache()), + [0, 521, 1048576, 0.25]) + + +class TestFileObj(TestCase): + + def check_write(self, fileobj): + f = h5py.File(fileobj, 'w') + self.assertEqual(f.driver, 'fileobj') + self.assertEqual(f.filename, repr(fileobj)) + f.create_dataset('test', data=list(range(12))) + self.assertEqual(list(f), ['test']) + self.assertEqual(list(f['test'][:]), list(range(12))) + f.close() + + def check_read(self, fileobj): + f = h5py.File(fileobj, 'r') + self.assertEqual(list(f), ['test']) + self.assertEqual(list(f['test'][:]), list(range(12))) + self.assertRaises(Exception, f.create_dataset, 'another.test', data=list(range(3))) + f.close() + + def test_BytesIO(self): + with io.BytesIO() as fileobj: + self.assertEqual(len(fileobj.getvalue()), 0) + self.check_write(fileobj) + self.assertGreater(len(fileobj.getvalue()), 0) + self.check_read(fileobj) + + def test_file(self): + fname = self.mktemp() + try: + with open(fname, 'wb+') as fileobj: + self.assertEqual(os.path.getsize(fname), 0) + self.check_write(fileobj) + self.assertGreater(os.path.getsize(fname), 0) + self.check_read(fileobj) + with open(fname, 'rb') as fileobj: + self.check_read(fileobj) + finally: + os.remove(fname) + + def test_TemporaryFile(self): + # in this test, we check explicitly that temp file gets + # automatically deleted upon h5py.File.close()... + fileobj = tempfile.NamedTemporaryFile() + fname = fileobj.name + f = h5py.File(fileobj, 'w') + del fileobj + # ... but in your code feel free to simply + # f = h5py.File(tempfile.TemporaryFile()) + + f.create_dataset('test', data=list(range(12))) + self.assertEqual(list(f), ['test']) + self.assertEqual(list(f['test'][:]), list(range(12))) + self.assertTrue(os.path.isfile(fname)) + f.close() + self.assertFalse(os.path.isfile(fname)) + + def test_exception_open(self): + self.assertRaises(Exception, h5py.File, None, + driver='fileobj', mode='x') + self.assertRaises(Exception, h5py.File, 'rogue', + driver='fileobj', mode='x') + self.assertRaises(Exception, h5py.File, self, + driver='fileobj', mode='x') + + def test_exception_read(self): + + class BrokenBytesIO(io.BytesIO): + def readinto(self, b): + raise Exception('I am broken') + + f = h5py.File(BrokenBytesIO(), 'w') + f.create_dataset('test', data=list(range(12))) + self.assertRaises(Exception, list, f['test']) + + def test_exception_write(self): + + class BrokenBytesIO(io.BytesIO): + allow_write = False + def write(self, b): + if self.allow_write: + return super().write(b) + else: + raise Exception('I am broken') + + bio = BrokenBytesIO() + f = h5py.File(bio, 'w') + try: + self.assertRaises(Exception, f.create_dataset, 'test', + data=list(range(12))) + finally: + # Un-break writing so we can close: errors while closing get messy. + bio.allow_write = True + f.close() + + @ut.skip("Incompletely closed files can cause segfaults") + def test_exception_close(self): + fileobj = io.BytesIO() + f = h5py.File(fileobj, 'w') + fileobj.close() + self.assertRaises(Exception, f.close) + + def test_exception_writeonly(self): + # HDF5 expects read & write access to a file it's writing; + # check that we get the correct exception on a write-only file object. + fileobj = open(os.path.join(self.tempdir, 'a.h5'), 'wb') + with self.assertRaises(io.UnsupportedOperation): + f = h5py.File(fileobj, 'w') + group = f.create_group("group") + group.create_dataset("data", data='foo', dtype=h5py.string_dtype()) + + + def test_method_vanish(self): + fileobj = io.BytesIO() + f = h5py.File(fileobj, 'w') + f.create_dataset('test', data=list(range(12))) + self.assertEqual(list(f['test'][:]), list(range(12))) + fileobj.readinto = None + self.assertRaises(Exception, list, f['test']) + + +class TestTrackOrder(TestCase): + def populate(self, f): + for i in range(100): + # Mix group and dataset creation. + if i % 10 == 0: + f.create_group(str(i)) + else: + f[str(i)] = [i] + + def test_track_order(self): + fname = self.mktemp() + f = h5py.File(fname, 'w', track_order=True) # creation order + self.populate(f) + self.assertEqual(list(f), + [str(i) for i in range(100)]) + + def test_no_track_order(self): + fname = self.mktemp() + f = h5py.File(fname, 'w', track_order=False) # name alphanumeric + self.populate(f) + self.assertEqual(list(f), + sorted([str(i) for i in range(100)])) + + +class TestFileMetaBlockSize(TestCase): + + """ + Feature: The meta block size can be manipulated, changing how metadata + is aggregated and the offset of the first dataset. + """ + + def test_file_create_with_meta_block_size_4096(self): + # Test a large meta block size of 4 kibibytes + meta_block_size = 4096 + with File( + self.mktemp(), 'w', + meta_block_size=meta_block_size, + libver="latest" + ) as f: + f["test"] = 5 + self.assertEqual(f.meta_block_size, meta_block_size) + # Equality is expected for HDF5 1.10 + self.assertGreaterEqual(f["test"].id.get_offset(), meta_block_size) + + def test_file_create_with_meta_block_size_512(self): + # Test a small meta block size of 512 bytes + # The smallest verifiable meta_block_size is 463 + meta_block_size = 512 + libver = "latest" + with File( + self.mktemp(), 'w', + meta_block_size=meta_block_size, + libver=libver + ) as f: + f["test"] = 3 + self.assertEqual(f.meta_block_size, meta_block_size) + # Equality is expected for HDF5 1.10 + self.assertGreaterEqual(f["test"].id.get_offset(), meta_block_size) + # Default meta_block_size is 2048. This should fail if meta_block_size is not set. + self.assertLess(f["test"].id.get_offset(), meta_block_size*2) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_file_alignment.py b/MLPY/Lib/site-packages/h5py/tests/test_file_alignment.py new file mode 100644 index 0000000000000000000000000000000000000000..21b870db166f462b4372acab62321be267aa8911 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_file_alignment.py @@ -0,0 +1,103 @@ +import h5py +from .common import TestCase + + +def is_aligned(dataset, offset=4096): + # Here we check if the dataset is aligned + return dataset.id.get_offset() % offset == 0 + + +def dataset_name(i): + return f"data{i:03}" + + +class TestFileAlignment(TestCase): + """ + Ensure that setting the file alignment has the desired effect + in the internal structure. + """ + def test_no_alignment_set(self): + fname = self.mktemp() + # 881 is a prime number, so hopefully this help randomize the alignment + # enough + # A nice even number might give a pathological case where + # While we don't want the data to be aligned, it ends up aligned... + shape = (881,) + + with h5py.File(fname, 'w') as h5file: + # Create up to 1000 datasets + # At least one of them should be misaligned. + # While this isn't perfect, it seems that there + # The case where 1000 datasets get created is one where the data + # is aligned. Therefore, during correct operation, this test is + # expected to finish quickly + for i in range(1000): + dataset = h5file.create_dataset( + dataset_name(i), shape, dtype='uint8') + # Assign data so that the dataset is instantiated in + # the file + dataset[...] = i + if not is_aligned(dataset): + # Break early asserting that the file is not aligned + break + else: + raise RuntimeError("Data was all found to be aligned to 4096") + + def test_alignment_set_above_threshold(self): + # 2022/01/19 hmaarrfk + # UnitTest (TestCase) doesn't play well with pytest parametrization. + alignment_threshold = 1000 + alignment_interval = 4096 + + for shape in [ + (1033,), # A prime number above the threshold + (1000,), # Exactly equal to the threshold + (1001,), # one above the threshold + ]: + fname = self.mktemp() + with h5py.File(fname, 'w', + alignment_threshold=alignment_threshold, + alignment_interval=alignment_interval) as h5file: + # Create up to 1000 datasets + # They are all expected to be aligned + for i in range(1000): + dataset = h5file.create_dataset( + dataset_name(i), shape, dtype='uint8') + # Assign data so that the dataset is instantiated in + # the file + dataset[...] = (i % 256) # Truncate to uint8 + assert is_aligned(dataset, offset=alignment_interval) + + def test_alignment_set_below_threshold(self): + # 2022/01/19 hmaarrfk + # UnitTest (TestCase) doesn't play well with pytest parametrization. + alignment_threshold = 1000 + alignment_interval = 1024 + + for shape in [ + (881,), # A prime number below the threshold + (999,), # Exactly one below the threshold + ]: + fname = self.mktemp() + with h5py.File(fname, 'w', + alignment_threshold=alignment_threshold, + alignment_interval=alignment_interval) as h5file: + # Create up to 1000 datasets + # At least one of them should be misaligned. + # While this isn't perfect, it seems that there + # The case where 1000 datasets get created is one where the + # data is aligned. Therefore, during correct operation, this + # test is expected to finish quickly + for i in range(1000): + dataset = h5file.create_dataset( + dataset_name(i), shape, dtype='uint8') + # Assign data so that the dataset is instantiated in + # the file + dataset[...] = i + if not is_aligned(dataset, offset=alignment_interval): + # Break early asserting that the file is not aligned + break + else: + raise RuntimeError( + "Data was all found to be aligned to " + f"{alignment_interval}. This is highly unlikely.") diff --git a/MLPY/Lib/site-packages/h5py/tests/test_file_image.py b/MLPY/Lib/site-packages/h5py/tests/test_file_image.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e8ba036e32d515b0bbedc390f3335e8c4226e4 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_file_image.py @@ -0,0 +1,35 @@ +import h5py +from h5py import h5f, h5p + +from .common import ut, TestCase + +class TestFileImage(TestCase): + def test_load_from_image(self): + from binascii import a2b_base64 + from zlib import decompress + + compressed_image = 'eJzr9HBx4+WS4mIAAQ4OBhYGAQZk8B8KKjhQ+TD5BCjNCKU7oPQKJpg4I1hOAiouCDUfXV1IkKsrSPV/NACzx4AFQnMwjIKRCDxcHQNAdASUD0ulJ5hQ1ZWkFpeAaFh69KDQXkYGNohZjDA+JCUzMkIEmKHqELQAWKkAByytOoBJViAPJM7ExATWyAE0B8RgZkyAJmlYDoEAIahukJoNU6+HMTA0UOgT6oBgP38XUI6G5UMFZrzKR8EoGAUjGMDKYVgxDSsuAHcfMK8=' + + image = decompress(a2b_base64(compressed_image)) + + fapl = h5p.create(h5py.h5p.FILE_ACCESS) + fapl.set_fapl_core() + fapl.set_file_image(image) + + fid = h5f.open(self.mktemp().encode(), h5py.h5f.ACC_RDONLY, fapl=fapl) + f = h5py.File(fid) + + self.assertTrue('test' in f) + + def test_open_from_image(self): + from binascii import a2b_base64 + from zlib import decompress + + compressed_image = 'eJzr9HBx4+WS4mIAAQ4OBhYGAQZk8B8KKjhQ+TD5BCjNCKU7oPQKJpg4I1hOAiouCDUfXV1IkKsrSPV/NACzx4AFQnMwjIKRCDxcHQNAdASUD0ulJ5hQ1ZWkFpeAaFh69KDQXkYGNohZjDA+JCUzMkIEmKHqELQAWKkAByytOoBJViAPJM7ExATWyAE0B8RgZkyAJmlYDoEAIahukJoNU6+HMTA0UOgT6oBgP38XUI6G5UMFZrzKR8EoGAUjGMDKYVgxDSsuAHcfMK8=' + + image = decompress(a2b_base64(compressed_image)) + + fid = h5f.open_file_image(image) + f = h5py.File(fid) + + self.assertTrue('test' in f) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_filters.py b/MLPY/Lib/site-packages/h5py/tests/test_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..634fc1ef93e3353ffa7e48adbbe012bb5f76ae03 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_filters.py @@ -0,0 +1,94 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Tests the h5py._hl.filters module. + +""" +import os +import numpy as np +import h5py + +from .common import ut, TestCase + + +class TestFilters(TestCase): + + def setUp(self): + """ like TestCase.setUp but also store the file path """ + self.path = self.mktemp() + self.f = h5py.File(self.path, 'w') + + @ut.skipUnless(h5py.h5z.filter_avail(h5py.h5z.FILTER_SZIP), 'szip filter required') + def test_wr_szip_fletcher32_64bit(self): + """ test combination of szip, fletcher32, and 64bit arrays + + The fletcher32 checksum must be computed after the szip + compression is applied. + + References: + - GitHub issue #953 + - https://lists.hdfgroup.org/pipermail/ + hdf-forum_lists.hdfgroup.org/2018-January/010753.html + """ + self.f.create_dataset("test_data", + data=np.zeros(10000, dtype=np.float64), + fletcher32=True, + compression="szip", + ) + self.f.close() + + with h5py.File(self.path, "r") as h5: + # Access the data which will compute the fletcher32 + # checksum and raise an OSError if something is wrong. + h5["test_data"][0] + + def test_wr_scaleoffset_fletcher32(self): + """ make sure that scaleoffset + fletcher32 is prevented + """ + data = np.linspace(0, 1, 100) + with self.assertRaises(ValueError): + self.f.create_dataset("test_data", + data=data, + fletcher32=True, + # retain 3 digits after the decimal point + scaleoffset=3, + ) + + +@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed") +def test_filter_ref_obj(writable_file): + gzip8 = h5py.filters.Gzip(level=8) + # **kwargs unpacking (compatible with earlier h5py versions) + assert dict(**gzip8) == { + 'compression': h5py.h5z.FILTER_DEFLATE, + 'compression_opts': (8,) + } + + # Pass object as compression argument (new in h5py 3.0) + ds = writable_file.create_dataset( + 'x', shape=(100,), dtype=np.uint32, compression=gzip8 + ) + assert ds.compression == 'gzip' + assert ds.compression_opts == 8 + + +def test_filter_ref_obj_eq(): + gzip8 = h5py.filters.Gzip(level=8) + + assert gzip8 == h5py.filters.Gzip(level=8) + assert gzip8 != h5py.filters.Gzip(level=7) + + +@ut.skipIf(not os.getenv('H5PY_TEST_CHECK_FILTERS'), "H5PY_TEST_CHECK_FILTERS not set") +def test_filters_available(): + assert 'gzip' in h5py.filters.decode + assert 'gzip' in h5py.filters.encode + assert 'lzf' in h5py.filters.decode + assert 'lzf' in h5py.filters.encode diff --git a/MLPY/Lib/site-packages/h5py/tests/test_group.py b/MLPY/Lib/site-packages/h5py/tests/test_group.py new file mode 100644 index 0000000000000000000000000000000000000000..446d3681d32914b7ea11b99af20955120b2e4b31 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_group.py @@ -0,0 +1,1137 @@ +# -*- coding: utf-8 -*- +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Group test module. + + Tests all methods and properties of Group objects, with the following + exceptions: + + 1. Method create_dataset is tested in module test_dataset +""" + +import numpy as np +import os +import os.path +import sys +from tempfile import mkdtemp + +from collections.abc import MutableMapping + +from .common import ut, TestCase +import h5py +from h5py import File, Group, SoftLink, HardLink, ExternalLink +from h5py import Dataset, Datatype +from h5py import h5t +from h5py._hl.compat import filename_encode + +# If we can't encode unicode filenames, there's not much point failing tests +# which must fail +try: + filename_encode(u"α") +except UnicodeEncodeError: + NO_FS_UNICODE = True +else: + NO_FS_UNICODE = False + + +class BaseGroup(TestCase): + + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + +class TestCreate(BaseGroup): + + """ + Feature: New groups can be created via .create_group method + """ + + def test_create(self): + """ Simple .create_group call """ + grp = self.f.create_group('foo') + self.assertIsInstance(grp, Group) + + grp2 = self.f.create_group(b'bar') + self.assertIsInstance(grp, Group) + + def test_create_intermediate(self): + """ Intermediate groups can be created automatically """ + grp = self.f.create_group('foo/bar/baz') + self.assertEqual(grp.name, '/foo/bar/baz') + + grp2 = self.f.create_group(b'boo/bar/baz') + self.assertEqual(grp2.name, '/boo/bar/baz') + + def test_create_exception(self): + """ Name conflict causes group creation to fail with ValueError """ + self.f.create_group('foo') + with self.assertRaises(ValueError): + self.f.create_group('foo') + + def test_unicode(self): + """ Unicode names are correctly stored """ + name = u"/Name" + chr(0x4500) + group = self.f.create_group(name) + self.assertEqual(group.name, name) + self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_UTF8) + + def test_unicode_default(self): + """ Unicode names convertible to ASCII are stored as ASCII (issue 239) + """ + name = u"/Hello, this is a name" + group = self.f.create_group(name) + self.assertEqual(group.name, name) + self.assertEqual(group.id.links.get_info(name.encode('utf8')).cset, h5t.CSET_ASCII) + + def test_type(self): + """ Names should be strings or bytes """ + with self.assertRaises(TypeError): + self.f.create_group(1.) + + def test_appropriate_low_level_id(self): + " Binding a group to a non-group identifier fails with ValueError " + dset = self.f.create_dataset('foo', [1]) + with self.assertRaises(ValueError): + Group(dset.id) + +class TestDatasetAssignment(BaseGroup): + + """ + Feature: Datasets can be created by direct assignment of data + """ + + def test_ndarray(self): + """ Dataset auto-creation by direct assignment """ + data = np.ones((4,4),dtype='f') + self.f['a'] = data + self.assertIsInstance(self.f['a'], Dataset) + self.assertArrayEqual(self.f['a'][...], data) + + def test_name_bytes(self): + data = np.ones((4, 4), dtype='f') + self.f[b'b'] = data + self.assertIsInstance(self.f[b'b'], Dataset) + +class TestDtypeAssignment(BaseGroup): + + """ + Feature: Named types can be created by direct assignment of dtypes + """ + + def test_dtype(self): + """ Named type creation """ + dtype = np.dtype('|S10') + self.f['a'] = dtype + self.assertIsInstance(self.f['a'], Datatype) + self.assertEqual(self.f['a'].dtype, dtype) + + def test_name_bytes(self): + """ Named type creation """ + dtype = np.dtype('|S10') + self.f[b'b'] = dtype + self.assertIsInstance(self.f[b'b'], Datatype) + + +class TestRequire(BaseGroup): + + """ + Feature: Groups can be auto-created, or opened via .require_group + """ + + def test_open_existing(self): + """ Existing group is opened and returned """ + grp = self.f.create_group('foo') + grp2 = self.f.require_group('foo') + self.assertEqual(grp2, grp) + + grp3 = self.f.require_group(b'foo') + self.assertEqual(grp3, grp) + + def test_create(self): + """ Group is created if it doesn't exist """ + grp = self.f.require_group('foo') + self.assertIsInstance(grp, Group) + self.assertEqual(grp.name, '/foo') + + def test_require_exception(self): + """ Opening conflicting object results in TypeError """ + self.f.create_dataset('foo', (1,), 'f') + with self.assertRaises(TypeError): + self.f.require_group('foo') + + def test_intermediate_create_dataset(self): + """ Intermediate is created if it doesn't exist """ + dt = h5py.string_dtype() + self.f.require_dataset("foo/bar/baz", (1,), dtype=dt) + group = self.f.get('foo') + assert isinstance(group, Group) + group = self.f.get('foo/bar') + assert isinstance(group, Group) + + def test_intermediate_create_group(self): + dt = h5py.string_dtype() + self.f.require_group("foo/bar/baz") + group = self.f.get('foo') + assert isinstance(group, Group) + group = self.f.get('foo/bar') + assert isinstance(group, Group) + group = self.f.get('foo/bar/baz') + assert isinstance(group, Group) + + def test_require_shape(self): + ds = self.f.require_dataset("foo/resizable", shape=(0, 3), maxshape=(None, 3), dtype=int) + ds.resize(20, axis=0) + self.f.require_dataset("foo/resizable", shape=(0, 3), maxshape=(None, 3), dtype=int) + self.f.require_dataset("foo/resizable", shape=(20, 3), dtype=int) + with self.assertRaises(TypeError): + self.f.require_dataset("foo/resizable", shape=(0, 0), maxshape=(3, None), dtype=int) + with self.assertRaises(TypeError): + self.f.require_dataset("foo/resizable", shape=(0, 0), maxshape=(None, 5), dtype=int) + with self.assertRaises(TypeError): + self.f.require_dataset("foo/resizable", shape=(0, 0), maxshape=(None, 5, 2), dtype=int) + with self.assertRaises(TypeError): + self.f.require_dataset("foo/resizable", shape=(10, 3), dtype=int) + + +class TestDelete(BaseGroup): + + """ + Feature: Objects can be unlinked via "del" operator + """ + + def test_delete(self): + """ Object deletion via "del" """ + self.f.create_group('foo') + self.assertIn('foo', self.f) + del self.f['foo'] + self.assertNotIn('foo', self.f) + + def test_nonexisting(self): + """ Deleting non-existent object raises KeyError """ + with self.assertRaises(KeyError): + del self.f['foo'] + + def test_readonly_delete_exception(self): + """ Deleting object in readonly file raises KeyError """ + # Note: it is impossible to restore the old behavior (ValueError) + # without breaking the above test (non-existing objects) + fname = self.mktemp() + hfile = File(fname, 'w') + try: + hfile.create_group('foo') + finally: + hfile.close() + + hfile = File(fname, 'r') + try: + with self.assertRaises(KeyError): + del hfile['foo'] + finally: + hfile.close() + +class TestOpen(BaseGroup): + + """ + Feature: Objects can be opened via indexing syntax obj[name] + """ + + def test_open(self): + """ Simple obj[name] opening """ + grp = self.f.create_group('foo') + grp2 = self.f['foo'] + grp3 = self.f['/foo'] + self.assertEqual(grp, grp2) + self.assertEqual(grp, grp3) + + def test_nonexistent(self): + """ Opening missing objects raises KeyError """ + with self.assertRaises(KeyError): + self.f['foo'] + + def test_reference(self): + """ Objects can be opened by HDF5 object reference """ + grp = self.f.create_group('foo') + grp2 = self.f[grp.ref] + self.assertEqual(grp2, grp) + + def test_reference_numpyobj(self): + """ Object can be opened by numpy.object_ containing object ref + + Test for issue 181, issue 202. + """ + g = self.f.create_group('test') + + dt = np.dtype([('a', 'i'),('b', h5py.ref_dtype)]) + dset = self.f.create_dataset('test_dset', (1,), dt) + + dset[0] =(42,g.ref) + data = dset[0] + self.assertEqual(self.f[data[1]], g) + + def test_invalid_ref(self): + """ Invalid region references should raise an exception """ + + ref = h5py.h5r.Reference() + + with self.assertRaises(ValueError): + self.f[ref] + + self.f.create_group('x') + ref = self.f['x'].ref + del self.f['x'] + + with self.assertRaises(Exception): + self.f[ref] + + def test_path_type_validation(self): + """ Access with non bytes or str types should raise an exception """ + self.f.create_group('group') + + with self.assertRaises(TypeError): + self.f[0] + + with self.assertRaises(TypeError): + self.f[...] + + # TODO: check that regionrefs also work with __getitem__ + +class TestRepr(BaseGroup): + """Opened and closed groups provide a useful __repr__ string""" + + def test_repr(self): + """ Opened and closed groups provide a useful __repr__ string """ + g = self.f.create_group('foo') + self.assertIsInstance(repr(g), str) + g.id._close() + self.assertIsInstance(repr(g), str) + g = self.f['foo'] + # Closing the file shouldn't break it + self.f.close() + self.assertIsInstance(repr(g), str) + +class BaseMapping(BaseGroup): + + """ + Base class for mapping tests + """ + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.groups = ('a', 'b', 'c', 'd') + for x in self.groups: + self.f.create_group(x) + self.f['x'] = h5py.SoftLink('/mongoose') + self.groups = self.groups + ('x',) + + def tearDown(self): + if self.f: + self.f.close() + +class TestLen(BaseMapping): + + """ + Feature: The Python len() function returns the number of groups + """ + + def test_len(self): + """ len() returns number of group members """ + self.assertEqual(len(self.f), len(self.groups)) + self.f.create_group('e') + self.assertEqual(len(self.f), len(self.groups)+1) + + +class TestContains(BaseGroup): + + """ + Feature: The Python "in" builtin tests for membership + """ + + def test_contains(self): + """ "in" builtin works for membership (byte and Unicode) """ + self.f.create_group('a') + self.assertIn(b'a', self.f) + self.assertIn('a', self.f) + self.assertIn(b'/a', self.f) + self.assertIn('/a', self.f) + self.assertNotIn(b'mongoose', self.f) + self.assertNotIn('mongoose', self.f) + + def test_exc(self): + """ "in" on closed group returns False (see also issue 174) """ + self.f.create_group('a') + self.f.close() + self.assertFalse(b'a' in self.f) + self.assertFalse('a' in self.f) + + def test_empty(self): + """ Empty strings work properly and aren't contained """ + self.assertNotIn('', self.f) + self.assertNotIn(b'', self.f) + + def test_dot(self): + """ Current group "." is always contained """ + self.assertIn(b'.', self.f) + self.assertIn('.', self.f) + + def test_root(self): + """ Root group (by itself) is contained """ + self.assertIn(b'/', self.f) + self.assertIn('/', self.f) + + def test_trailing_slash(self): + """ Trailing slashes are unconditionally ignored """ + self.f.create_group('group') + self.f['dataset'] = 42 + self.assertIn('/group/', self.f) + self.assertIn('group/', self.f) + self.assertIn('/dataset/', self.f) + self.assertIn('dataset/', self.f) + + def test_softlinks(self): + """ Broken softlinks are contained, but their members are not """ + self.f.create_group('grp') + self.f['/grp/soft'] = h5py.SoftLink('/mongoose') + self.f['/grp/external'] = h5py.ExternalLink('mongoose.hdf5', '/mongoose') + self.assertIn('/grp/soft', self.f) + self.assertNotIn('/grp/soft/something', self.f) + self.assertIn('/grp/external', self.f) + self.assertNotIn('/grp/external/something', self.f) + + def test_oddball_paths(self): + """ Technically legitimate (but odd-looking) paths """ + self.f.create_group('x/y/z') + self.f['dset'] = 42 + self.assertIn('/', self.f) + self.assertIn('//', self.f) + self.assertIn('///', self.f) + self.assertIn('.///', self.f) + self.assertIn('././/', self.f) + grp = self.f['x'] + self.assertIn('.//x/y/z', self.f) + self.assertNotIn('.//x/y/z', grp) + self.assertIn('x///', self.f) + self.assertIn('./x///', self.f) + self.assertIn('dset///', self.f) + self.assertIn('/dset//', self.f) + +class TestIter(BaseMapping): + + """ + Feature: You can iterate over group members via "for x in y", etc. + """ + + def test_iter(self): + """ "for x in y" iteration """ + lst = [x for x in self.f] + self.assertSameElements(lst, self.groups) + + def test_iter_zero(self): + """ Iteration works properly for the case with no group members """ + hfile = File(self.mktemp(), 'w') + try: + lst = [x for x in hfile] + self.assertEqual(lst, []) + finally: + hfile.close() + +class TestTrackOrder(BaseGroup): + def populate(self, g): + for i in range(100): + # Mix group and dataset creation. + if i % 10 == 0: + g.create_group(str(i)) + else: + g[str(i)] = [i] + + def test_track_order(self): + g = self.f.create_group('order', track_order=True) # creation order + self.populate(g) + + ref = [str(i) for i in range(100)] + self.assertEqual(list(g), ref) + self.assertEqual(list(reversed(g)), list(reversed(ref))) + + def test_no_track_order(self): + g = self.f.create_group('order', track_order=False) # name alphanumeric + self.populate(g) + + ref = sorted([str(i) for i in range(100)]) + self.assertEqual(list(g), ref) + self.assertEqual(list(reversed(g)), list(reversed(ref))) + +class TestPy3Dict(BaseMapping): + + def test_keys(self): + """ .keys provides a key view """ + kv = getattr(self.f, 'keys')() + ref = self.groups + self.assertSameElements(list(kv), ref) + self.assertSameElements(list(reversed(kv)), list(reversed(ref))) + + for x in self.groups: + self.assertIn(x, kv) + self.assertEqual(len(kv), len(self.groups)) + + def test_values(self): + """ .values provides a value view """ + vv = getattr(self.f, 'values')() + ref = [self.f.get(x) for x in self.groups] + self.assertSameElements(list(vv), ref) + self.assertSameElements(list(reversed(vv)), list(reversed(ref))) + + self.assertEqual(len(vv), len(self.groups)) + for x in self.groups: + self.assertIn(self.f.get(x), vv) + + def test_items(self): + """ .items provides an item view """ + iv = getattr(self.f, 'items')() + ref = [(x,self.f.get(x)) for x in self.groups] + self.assertSameElements(list(iv), ref) + self.assertSameElements(list(reversed(iv)), list(reversed(ref))) + + self.assertEqual(len(iv), len(self.groups)) + for x in self.groups: + self.assertIn((x, self.f.get(x)), iv) + +class TestAdditionalMappingFuncs(BaseMapping): + """ + Feature: Other dict methods (pop, pop_item, clear, update, setdefault) are + available. + """ + def setUp(self): + self.f = File(self.mktemp(), 'w') + for x in ('/test/a', '/test/b', '/test/c', '/test/d'): + self.f.create_group(x) + self.group = self.f['test'] + + def tearDown(self): + if self.f: + self.f.close() + + def test_pop_item(self): + """.pop_item exists and removes item""" + key, val = self.group.popitem() + self.assertNotIn(key, self.group) + + def test_pop(self): + """.pop exists and removes specified item""" + self.group.pop('a') + self.assertNotIn('a', self.group) + + def test_pop_default(self): + """.pop falls back to default""" + # e shouldn't exist as a group + value = self.group.pop('e', None) + self.assertEqual(value, None) + + def test_pop_raises(self): + """.pop raises KeyError for non-existence""" + # e shouldn't exist as a group + with self.assertRaises(KeyError): + key = self.group.pop('e') + + def test_clear(self): + """.clear removes groups""" + self.group.clear() + self.assertEqual(len(self.group), 0) + + def test_update_dict(self): + """.update works with dict""" + new_items = {'e': np.array([42])} + self.group.update(new_items) + self.assertIn('e', self.group) + + def test_update_iter(self): + """.update works with list""" + new_items = [ + ('e', np.array([42])), + ('f', np.array([42])) + ] + self.group.update(new_items) + self.assertIn('e', self.group) + + def test_update_kwargs(self): + """.update works with kwargs""" + new_items = {'e': np.array([42])} + self.group.update(**new_items) + self.assertIn('e', self.group) + + def test_setdefault(self): + """.setdefault gets group if it exists""" + value = self.group.setdefault('a') + self.assertEqual(value, self.group.get('a')) + + def test_setdefault_with_default(self): + """.setdefault gets default if group doesn't exist""" + # e shouldn't exist as a group + # 42 used as groups should be strings + value = self.group.setdefault('e', np.array([42])) + self.assertEqual(value, 42) + + def test_setdefault_no_default(self): + """ + .setdefault gets None if group doesn't exist, but as None isn't defined + as data for a dataset, this should raise a TypeError. + """ + # e shouldn't exist as a group + with self.assertRaises(TypeError): + self.group.setdefault('e') + + +class TestGet(BaseGroup): + + """ + Feature: The .get method allows access to objects and metadata + """ + + def test_get_default(self): + """ Object is returned, or default if it doesn't exist """ + default = object() + out = self.f.get('mongoose', default) + self.assertIs(out, default) + + grp = self.f.create_group('a') + out = self.f.get(b'a') + self.assertEqual(out, grp) + + def test_get_class(self): + """ Object class is returned with getclass option """ + self.f.create_group('foo') + out = self.f.get('foo', getclass=True) + self.assertEqual(out, Group) + + self.f.create_dataset('bar', (4,)) + out = self.f.get('bar', getclass=True) + self.assertEqual(out, Dataset) + + self.f['baz'] = np.dtype('|S10') + out = self.f.get('baz', getclass=True) + self.assertEqual(out, Datatype) + + def test_get_link_class(self): + """ Get link classes """ + default = object() + + sl = SoftLink('/mongoose') + el = ExternalLink('somewhere.hdf5', 'mongoose') + + self.f.create_group('hard') + self.f['soft'] = sl + self.f['external'] = el + + out_hl = self.f.get('hard', default, getlink=True, getclass=True) + out_sl = self.f.get('soft', default, getlink=True, getclass=True) + out_el = self.f.get('external', default, getlink=True, getclass=True) + + self.assertEqual(out_hl, HardLink) + self.assertEqual(out_sl, SoftLink) + self.assertEqual(out_el, ExternalLink) + + def test_get_link(self): + """ Get link values """ + sl = SoftLink('/mongoose') + el = ExternalLink('somewhere.hdf5', 'mongoose') + + self.f.create_group('hard') + self.f['soft'] = sl + self.f['external'] = el + + out_hl = self.f.get('hard', getlink=True) + out_sl = self.f.get('soft', getlink=True) + out_el = self.f.get('external', getlink=True) + + #TODO: redo with SoftLink/ExternalLink built-in equality + self.assertIsInstance(out_hl, HardLink) + self.assertIsInstance(out_sl, SoftLink) + self.assertEqual(out_sl._path, sl._path) + self.assertIsInstance(out_el, ExternalLink) + self.assertEqual(out_el._path, el._path) + self.assertEqual(out_el._filename, el._filename) + +class TestVisit(TestCase): + + """ + Feature: The .visit and .visititems methods allow iterative access to + group and subgroup members + """ + + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.groups = [ + 'grp1', 'grp1/sg1', 'grp1/sg2', 'grp2', 'grp2/sg1', 'grp2/sg1/ssg1' + ] + for x in self.groups: + self.f.create_group(x) + + def tearDown(self): + self.f.close() + + def test_visit(self): + """ All subgroups are visited """ + l = [] + self.f.visit(l.append) + self.assertSameElements(l, self.groups) + + def test_visititems(self): + """ All subgroups and contents are visited """ + l = [] + comp = [(x, self.f[x]) for x in self.groups] + self.f.visititems(lambda x, y: l.append((x,y))) + self.assertSameElements(comp, l) + + def test_bailout(self): + """ Returning a non-None value immediately aborts iteration """ + x = self.f.visit(lambda x: x) + self.assertEqual(x, self.groups[0]) + x = self.f.visititems(lambda x, y: (x,y)) + self.assertEqual(x, (self.groups[0], self.f[self.groups[0]])) + +class TestVisitLinks(TestCase): + """ + Feature: The .visit_links and .visititems_links methods allow iterative access to + links contained in the group and its subgroups. + """ + + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.groups = [ + 'grp1', 'grp1/grp11', 'grp1/grp12', 'grp2', 'grp2/grp21', 'grp2/grp21/grp211' + ] + self.links = [ + 'linkto_grp1', 'grp1/linkto_grp11', 'grp1/linkto_grp12', 'linkto_grp2', 'grp2/linkto_grp21', 'grp2/grp21/linkto_grp211' + ] + for g, l in zip(self.groups, self.links): + self.f.create_group(g) + self.f[l] = SoftLink(f'/{g}') + + def tearDown(self): + self.f.close() + + def test_visit_links(self): + """ All subgroups and links are visited """ + l = [] + self.f.visit_links(l.append) + self.assertSameElements(l, self.groups + self.links) + + def test_visititems(self): + """ All links are visited """ + l = [] + comp = [(x, type(self.f.get(x, getlink=True))) for x in self.groups + self.links] + self.f.visititems_links(lambda x, y: l.append((x, type(y)))) + self.assertSameElements(comp, l) + + def test_bailout(self): + """ Returning a non-None value immediately aborts iteration """ + x = self.f.visit_links(lambda x: x) + self.assertEqual(x, self.groups[0]) + x = self.f.visititems_links(lambda x, y: (x,type(y))) + self.assertEqual(x, (self.groups[0], type(self.f.get(self.groups[0], getlink=True)))) + + +class TestSoftLinks(BaseGroup): + + """ + Feature: Create and manage soft links with the high-level interface + """ + + def test_spath(self): + """ SoftLink path attribute """ + sl = SoftLink('/foo') + self.assertEqual(sl.path, '/foo') + + def test_srepr(self): + """ SoftLink path repr """ + sl = SoftLink('/foo') + self.assertIsInstance(repr(sl), str) + + def test_create(self): + """ Create new soft link by assignment """ + g = self.f.create_group('new') + sl = SoftLink('/new') + self.f['alias'] = sl + g2 = self.f['alias'] + self.assertEqual(g, g2) + + def test_exc(self): + """ Opening dangling soft link results in KeyError """ + self.f['alias'] = SoftLink('new') + with self.assertRaises(KeyError): + self.f['alias'] + +class TestExternalLinks(TestCase): + + """ + Feature: Create and manage external links + """ + + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.ename = self.mktemp() + self.ef = File(self.ename, 'w') + self.ef.create_group('external') + self.ef.close() + + def tearDown(self): + if self.f: + self.f.close() + if self.ef: + self.ef.close() + + def test_epath(self): + """ External link paths attributes """ + el = ExternalLink('foo.hdf5', '/foo') + self.assertEqual(el.filename, 'foo.hdf5') + self.assertEqual(el.path, '/foo') + + def test_erepr(self): + """ External link repr """ + el = ExternalLink('foo.hdf5','/foo') + self.assertIsInstance(repr(el), str) + + def test_create(self): + """ Creating external links """ + self.f['ext'] = ExternalLink(self.ename, '/external') + grp = self.f['ext'] + self.ef = grp.file + self.assertNotEqual(self.ef, self.f) + self.assertEqual(grp.name, '/external') + + def test_exc(self): + """ KeyError raised when attempting to open broken link """ + self.f['ext'] = ExternalLink(self.ename, '/missing') + with self.assertRaises(KeyError): + self.f['ext'] + + # I would prefer OSError but there's no way to fix this as the exception + # class is determined by HDF5. + def test_exc_missingfile(self): + """ KeyError raised when attempting to open missing file """ + self.f['ext'] = ExternalLink('mongoose.hdf5','/foo') + with self.assertRaises(KeyError): + self.f['ext'] + + def test_close_file(self): + """ Files opened by accessing external links can be closed + + Issue 189. + """ + self.f['ext'] = ExternalLink(self.ename, '/') + grp = self.f['ext'] + f2 = grp.file + f2.close() + self.assertFalse(f2) + + @ut.skipIf(NO_FS_UNICODE, "No unicode filename support") + def test_unicode_encode(self): + """ + Check that external links encode unicode filenames properly + Testing issue #732 + """ + ext_filename = os.path.join(mkdtemp(), u"α.hdf5") + with File(ext_filename, "w") as ext_file: + ext_file.create_group('external') + self.f['ext'] = ExternalLink(ext_filename, '/external') + + @ut.skipIf(NO_FS_UNICODE, "No unicode filename support") + def test_unicode_decode(self): + """ + Check that external links decode unicode filenames properly + Testing issue #732 + """ + ext_filename = os.path.join(mkdtemp(), u"α.hdf5") + with File(ext_filename, "w") as ext_file: + ext_file.create_group('external') + ext_file["external"].attrs["ext_attr"] = "test" + self.f['ext'] = ExternalLink(ext_filename, '/external') + self.assertEqual(self.f["ext"].attrs["ext_attr"], "test") + + def test_unicode_hdf5_path(self): + """ + Check that external links handle unicode hdf5 paths properly + Testing issue #333 + """ + ext_filename = os.path.join(mkdtemp(), "external.hdf5") + with File(ext_filename, "w") as ext_file: + ext_file.create_group('α') + ext_file["α"].attrs["ext_attr"] = "test" + self.f['ext'] = ExternalLink(ext_filename, '/α') + self.assertEqual(self.f["ext"].attrs["ext_attr"], "test") + +class TestExtLinkBugs(TestCase): + + """ + Bugs: Specific regressions for external links + """ + + def test_issue_212(self): + """ Issue 212 + + Fails with: + + AttributeError: 'SharedConfig' object has no attribute 'lapl' + """ + def closer(x): + def w(): + try: + if x: + x.close() + except OSError: + pass + return w + orig_name = self.mktemp() + new_name = self.mktemp() + f = File(orig_name, 'w') + self.addCleanup(closer(f)) + f.create_group('a') + f.close() + + g = File(new_name, 'w') + self.addCleanup(closer(g)) + g['link'] = ExternalLink(orig_name, '/') # note root group + g.close() + + h = File(new_name, 'r') + self.addCleanup(closer(h)) + self.assertIsInstance(h['link']['a'], Group) + + +class TestCopy(TestCase): + + def setUp(self): + self.f1 = File(self.mktemp(), 'w') + self.f2 = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f1: + self.f1.close() + if self.f2: + self.f2.close() + + def test_copy_path_to_path(self): + foo = self.f1.create_group('foo') + foo['bar'] = [1,2,3] + + self.f1.copy('foo', 'baz') + baz = self.f1['baz'] + self.assertIsInstance(baz, Group) + self.assertArrayEqual(baz['bar'], np.array([1,2,3])) + + def test_copy_path_to_group(self): + foo = self.f1.create_group('foo') + foo['bar'] = [1,2,3] + baz = self.f1.create_group('baz') + + self.f1.copy('foo', baz) + baz = self.f1['baz'] + self.assertIsInstance(baz, Group) + self.assertArrayEqual(baz['foo/bar'], np.array([1,2,3])) + + self.f1.copy('foo', self.f2['/']) + self.assertIsInstance(self.f2['/foo'], Group) + self.assertArrayEqual(self.f2['foo/bar'], np.array([1,2,3])) + + def test_copy_group_to_path(self): + + foo = self.f1.create_group('foo') + foo['bar'] = [1,2,3] + + self.f1.copy(foo, 'baz') + baz = self.f1['baz'] + self.assertIsInstance(baz, Group) + self.assertArrayEqual(baz['bar'], np.array([1,2,3])) + + self.f2.copy(foo, 'foo') + self.assertIsInstance(self.f2['/foo'], Group) + self.assertArrayEqual(self.f2['foo/bar'], np.array([1,2,3])) + + def test_copy_group_to_group(self): + + foo = self.f1.create_group('foo') + foo['bar'] = [1,2,3] + baz = self.f1.create_group('baz') + + self.f1.copy(foo, baz) + baz = self.f1['baz'] + self.assertIsInstance(baz, Group) + self.assertArrayEqual(baz['foo/bar'], np.array([1,2,3])) + + self.f1.copy(foo, self.f2['/']) + self.assertIsInstance(self.f2['/foo'], Group) + self.assertArrayEqual(self.f2['foo/bar'], np.array([1,2,3])) + + def test_copy_dataset(self): + self.f1['foo'] = [1,2,3] + foo = self.f1['foo'] + grp = self.f1.create_group("grp") + + self.f1.copy(foo, 'bar') + self.assertArrayEqual(self.f1['bar'], np.array([1,2,3])) + + self.f1.copy('foo', 'baz') + self.assertArrayEqual(self.f1['baz'], np.array([1,2,3])) + + self.f1.copy(foo, grp) + self.assertArrayEqual(self.f1['/grp/foo'], np.array([1,2,3])) + + self.f1.copy('foo', self.f2) + self.assertArrayEqual(self.f2['foo'], np.array([1,2,3])) + + self.f2.copy(self.f1['foo'], self.f2, 'bar') + self.assertArrayEqual(self.f2['bar'], np.array([1,2,3])) + + def test_copy_shallow(self): + + foo = self.f1.create_group('foo') + bar = foo.create_group('bar') + foo['qux'] = [1,2,3] + bar['quux'] = [4,5,6] + + self.f1.copy(foo, 'baz', shallow=True) + baz = self.f1['baz'] + self.assertIsInstance(baz, Group) + self.assertIsInstance(baz['bar'], Group) + self.assertEqual(len(baz['bar']), 0) + self.assertArrayEqual(baz['qux'], np.array([1,2,3])) + + self.f2.copy(foo, 'foo', shallow=True) + self.assertIsInstance(self.f2['/foo'], Group) + self.assertIsInstance(self.f2['foo/bar'], Group) + self.assertEqual(len(self.f2['foo/bar']), 0) + self.assertArrayEqual(self.f2['foo/qux'], np.array([1,2,3])) + + def test_copy_without_attributes(self): + + self.f1['foo'] = [1,2,3] + foo = self.f1['foo'] + foo.attrs['bar'] = [4,5,6] + + self.f1.copy(foo, 'baz', without_attrs=True) + self.assertArrayEqual(self.f1['baz'], np.array([1,2,3])) + assert 'bar' not in self.f1['baz'].attrs + + self.f2.copy(foo, 'baz', without_attrs=True) + self.assertArrayEqual(self.f2['baz'], np.array([1,2,3])) + assert 'bar' not in self.f2['baz'].attrs + + def test_copy_soft_links(self): + + self.f1['bar'] = [1, 2, 3] + foo = self.f1.create_group('foo') + foo['baz'] = SoftLink('/bar') + + self.f1.copy(foo, 'qux', expand_soft=True) + self.f2.copy(foo, 'foo', expand_soft=True) + del self.f1['bar'] + + self.assertIsInstance(self.f1['qux'], Group) + self.assertArrayEqual(self.f1['qux/baz'], np.array([1, 2, 3])) + + self.assertIsInstance(self.f2['/foo'], Group) + self.assertArrayEqual(self.f2['foo/baz'], np.array([1, 2, 3])) + + def test_copy_external_links(self): + + filename = self.f1.filename + self.f1['foo'] = [1,2,3] + self.f2['bar'] = ExternalLink(filename, 'foo') + self.f1.close() + self.f1 = None + + self.assertArrayEqual(self.f2['bar'], np.array([1,2,3])) + + self.f2.copy('bar', 'baz', expand_external=True) + os.unlink(filename) + self.assertArrayEqual(self.f2['baz'], np.array([1,2,3])) + + def test_copy_refs(self): + + self.f1['foo'] = [1,2,3] + self.f1['bar'] = [4,5,6] + foo = self.f1['foo'] + bar = self.f1['bar'] + foo.attrs['bar'] = bar.ref + + self.f1.copy(foo, 'baz', expand_refs=True) + self.assertArrayEqual(self.f1['baz'], np.array([1,2,3])) + baz_bar = self.f1['baz'].attrs['bar'] + self.assertArrayEqual(self.f1[baz_bar], np.array([4,5,6])) + # The reference points to a copy of bar, not to bar itself. + self.assertNotEqual(self.f1[baz_bar].name, bar.name) + + self.f1.copy('foo', self.f2, 'baz', expand_refs=True) + self.assertArrayEqual(self.f2['baz'], np.array([1,2,3])) + baz_bar = self.f2['baz'].attrs['bar'] + self.assertArrayEqual(self.f2[baz_bar], np.array([4,5,6])) + + self.f1.copy('/', self.f2, 'root', expand_refs=True) + self.assertArrayEqual(self.f2['root/foo'], np.array([1,2,3])) + self.assertArrayEqual(self.f2['root/bar'], np.array([4,5,6])) + foo_bar = self.f2['root/foo'].attrs['bar'] + self.assertArrayEqual(self.f2[foo_bar], np.array([4,5,6])) + # There's only one copy of bar, which the reference points to. + self.assertEqual(self.f2[foo_bar], self.f2['root/bar']) + + +class TestMove(BaseGroup): + + """ + Feature: Group.move moves links in a file + """ + + def test_move_hardlink(self): + """ Moving an object """ + grp = self.f.create_group("X") + self.f.move("X", "Y") + self.assertEqual(self.f["Y"], grp) + self.f.move("Y", "new/nested/path") + self.assertEqual(self.f['new/nested/path'], grp) + + def test_move_softlink(self): + """ Moving a soft link """ + self.f['soft'] = h5py.SoftLink("relative/path") + self.f.move('soft', 'new_soft') + lnk = self.f.get('new_soft', getlink=True) + self.assertEqual(lnk.path, "relative/path") + + def test_move_conflict(self): + """ Move conflict raises ValueError """ + self.f.create_group("X") + self.f.create_group("Y") + with self.assertRaises(ValueError): + self.f.move("X", "Y") + + def test_short_circuit(self): + ''' Test that a null-move works ''' + self.f.create_group("X") + self.f.move("X", "X") + + +class TestMutableMapping(BaseGroup): + '''Tests if the registration of Group as a MutableMapping + behaves as expected + ''' + def test_resolution(self): + assert issubclass(Group, MutableMapping) + grp = self.f.create_group("K") + assert isinstance(grp, MutableMapping) + + def test_validity(self): + ''' + Test that the required functions are implemented. + ''' + Group.__getitem__ + Group.__setitem__ + Group.__delitem__ + Group.__iter__ + Group.__len__ diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5.py b/MLPY/Lib/site-packages/h5py/tests/test_h5.py new file mode 100644 index 0000000000000000000000000000000000000000..a76cfc6727591ad7731c80e53daa6da8eba20c71 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5.py @@ -0,0 +1,45 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +from h5py import h5 + +from .common import TestCase + +def fixnames(): + cfg = h5.get_config() + cfg.complex_names = ('r','i') + +class TestH5(TestCase): + + def test_config(self): + cfg = h5.get_config() + self.assertIsInstance(cfg, h5.H5PYConfig) + cfg2 = h5.get_config() + self.assertIs(cfg, cfg2) + + def test_cnames_get(self): + cfg = h5.get_config() + self.assertEqual(cfg.complex_names, ('r','i')) + + def test_cnames_set(self): + self.addCleanup(fixnames) + cfg = h5.get_config() + cfg.complex_names = ('q','x') + self.assertEqual(cfg.complex_names, ('q','x')) + + def test_cnames_set_exc(self): + self.addCleanup(fixnames) + cfg = h5.get_config() + with self.assertRaises(TypeError): + cfg.complex_names = ('q','i','v') + self.assertEqual(cfg.complex_names, ('r','i')) + + def test_repr(self): + cfg = h5.get_config() + repr(cfg) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5d_direct_chunk.py b/MLPY/Lib/site-packages/h5py/tests/test_h5d_direct_chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..84bce9d044969f58827717b55988a9e83fedd675 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5d_direct_chunk.py @@ -0,0 +1,188 @@ +import h5py +import numpy +import numpy.testing +import pytest + +from .common import ut, TestCase + + +class TestWriteDirectChunk(TestCase): + def test_write_direct_chunk(self): + + filename = self.mktemp().encode() + with h5py.File(filename, "w") as filehandle: + + dataset = filehandle.create_dataset("data", (100, 100, 100), + maxshape=(None, 100, 100), + chunks=(1, 100, 100), + dtype='float32') + + # writing + array = numpy.zeros((10, 100, 100)) + for index in range(10): + a = numpy.random.rand(100, 100).astype('float32') + dataset.id.write_direct_chunk((index, 0, 0), a.tobytes(), filter_mask=1) + array[index] = a + + + # checking + with h5py.File(filename, "r") as filehandle: + for i in range(10): + read_data = filehandle["data"][i] + numpy.testing.assert_array_equal(array[i], read_data) + + +@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed") +class TestReadDirectChunk(TestCase): + def test_read_compressed_offsets(self): + + filename = self.mktemp().encode() + with h5py.File(filename, "w") as filehandle: + + frame = numpy.arange(16).reshape(4, 4) + frame_dataset = filehandle.create_dataset("frame", + data=frame, + compression="gzip", + compression_opts=9) + dataset = filehandle.create_dataset("compressed_chunked", + data=[frame, frame, frame], + compression="gzip", + compression_opts=9, + chunks=(1, ) + frame.shape) + filter_mask, compressed_frame = frame_dataset.id.read_direct_chunk((0, 0)) + # No filter must be disabled + self.assertEqual(filter_mask, 0) + + for i in range(dataset.shape[0]): + filter_mask, data = dataset.id.read_direct_chunk((i, 0, 0)) + self.assertEqual(compressed_frame, data) + # No filter must be disabled + self.assertEqual(filter_mask, 0) + + def test_read_uncompressed_offsets(self): + + filename = self.mktemp().encode() + frame = numpy.arange(16).reshape(4, 4) + with h5py.File(filename, "w") as filehandle: + dataset = filehandle.create_dataset("frame", + maxshape=(1,) + frame.shape, + shape=(1,) + frame.shape, + compression="gzip", + compression_opts=9) + # Write uncompressed data + DISABLE_ALL_FILTERS = 0xFFFFFFFF + dataset.id.write_direct_chunk((0, 0, 0), frame.tobytes(), filter_mask=DISABLE_ALL_FILTERS) + + # FIXME: Here we have to close the file and load it back else + # a runtime error occurs: + # RuntimeError: Can't get storage size of chunk (chunk storage is not allocated) + with h5py.File(filename, "r") as filehandle: + dataset = filehandle["frame"] + filter_mask, compressed_frame = dataset.id.read_direct_chunk((0, 0, 0)) + + # At least 1 filter is supposed to be disabled + self.assertNotEqual(filter_mask, 0) + self.assertEqual(compressed_frame, frame.tobytes()) + + def test_read_write_chunk(self): + + filename = self.mktemp().encode() + with h5py.File(filename, "w") as filehandle: + + # create a reference + frame = numpy.arange(16).reshape(4, 4) + frame_dataset = filehandle.create_dataset("source", + data=frame, + compression="gzip", + compression_opts=9) + # configure an empty dataset + filter_mask, compressed_frame = frame_dataset.id.read_direct_chunk((0, 0)) + dataset = filehandle.create_dataset("created", + shape=frame_dataset.shape, + maxshape=frame_dataset.shape, + chunks=frame_dataset.chunks, + dtype=frame_dataset.dtype, + compression="gzip", + compression_opts=9) + + # copy the data + dataset.id.write_direct_chunk((0, 0), compressed_frame, filter_mask=filter_mask) + + # checking + with h5py.File(filename, "r") as filehandle: + dataset = filehandle["created"][...] + numpy.testing.assert_array_equal(dataset, frame) + + +class TestReadDirectChunkToOut: + + def test_uncompressed_data(self, writable_file): + ref_data = numpy.arange(16).reshape(4, 4) + dataset = writable_file.create_dataset( + "uncompressed", data=ref_data, chunks=ref_data.shape) + + out = bytearray(ref_data.nbytes) + filter_mask, chunk = dataset.id.read_direct_chunk((0, 0), out=out) + + assert numpy.array_equal( + numpy.frombuffer(out, dtype=ref_data.dtype).reshape(ref_data.shape), + ref_data, + ) + assert filter_mask == 0 + assert len(chunk) == ref_data.nbytes + + @pytest.mark.skipif( + h5py.version.hdf5_version_tuple < (1, 10, 5), + reason="chunk info requires HDF5 >= 1.10.5", + ) + @pytest.mark.skipif( + 'gzip' not in h5py.filters.encode, + reason="DEFLATE is not installed", + ) + def test_compressed_data(self, writable_file): + ref_data = numpy.arange(16).reshape(4, 4) + dataset = writable_file.create_dataset( + "gzip", + data=ref_data, + chunks=ref_data.shape, + compression="gzip", + compression_opts=9, + ) + chunk_info = dataset.id.get_chunk_info(0) + + out = bytearray(chunk_info.size) + filter_mask, chunk = dataset.id.read_direct_chunk( + chunk_info.chunk_offset, + out=out, + ) + assert filter_mask == chunk_info.filter_mask + assert len(chunk) == chunk_info.size + assert out == dataset.id.read_direct_chunk(chunk_info.chunk_offset)[1] + + def test_fail_buffer_too_small(self, writable_file): + ref_data = numpy.arange(16).reshape(4, 4) + dataset = writable_file.create_dataset( + "uncompressed", data=ref_data, chunks=ref_data.shape) + + out = bytearray(ref_data.nbytes // 2) + with pytest.raises(ValueError): + dataset.id.read_direct_chunk((0, 0), out=out) + + def test_fail_buffer_readonly(self, writable_file): + ref_data = numpy.arange(16).reshape(4, 4) + dataset = writable_file.create_dataset( + "uncompressed", data=ref_data, chunks=ref_data.shape) + + out = bytes(ref_data.nbytes) + with pytest.raises(BufferError): + dataset.id.read_direct_chunk((0, 0), out=out) + + def test_fail_buffer_not_contiguous(self, writable_file): + ref_data = numpy.arange(16).reshape(4, 4) + dataset = writable_file.create_dataset( + "uncompressed", data=ref_data, chunks=ref_data.shape) + + array = numpy.empty(ref_data.shape + (2,), dtype=ref_data.dtype) + out = array[:, :, ::2] # Array is not contiguous + with pytest.raises(ValueError): + dataset.id.read_direct_chunk((0, 0), out=out) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5f.py b/MLPY/Lib/site-packages/h5py/tests/test_h5f.py new file mode 100644 index 0000000000000000000000000000000000000000..df9c518a500f19d0c3da6a5f828c4b8a5efbc169 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5f.py @@ -0,0 +1,109 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import tempfile +import shutil +import os +import numpy as np +from h5py import File, special_dtype +from h5py._hl.files import direct_vfd + +from .common import ut, TestCase +from .common import ut, TestCase, UNICODE_FILENAMES, closed_tempfile + + +class TestFileID(TestCase): + def test_descriptor_core(self): + with File('TestFileID.test_descriptor_core', driver='core', + backing_store=False, mode='x') as f: + assert isinstance(f.id.get_vfd_handle(), int) + + def test_descriptor_sec2(self): + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_sec2') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, driver='sec2', mode='x') as f: + descriptor = f.id.get_vfd_handle() + self.assertNotEqual(descriptor, 0) + os.fsync(descriptor) + finally: + shutil.rmtree(dn_tmp) + + @ut.skipUnless(direct_vfd, + "DIRECT driver is supported on Linux if hdf5 is " + "built with the appriorate flags.") + def test_descriptor_direct(self): + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_direct') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, driver='direct', mode='x') as f: + descriptor = f.id.get_vfd_handle() + self.assertNotEqual(descriptor, 0) + os.fsync(descriptor) + finally: + shutil.rmtree(dn_tmp) + + +class TestCacheConfig(TestCase): + def test_simple_gets(self): + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_simple_gets') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, mode='x') as f: + hit_rate = f._id.get_mdc_hit_rate() + mdc_size = f._id.get_mdc_size() + + finally: + shutil.rmtree(dn_tmp) + + def test_hitrate_reset(self): + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_hitrate_reset') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, mode='x') as f: + hit_rate = f._id.get_mdc_hit_rate() + f._id.reset_mdc_hit_rate_stats() + hit_rate = f._id.get_mdc_hit_rate() + assert hit_rate == 0 + + finally: + shutil.rmtree(dn_tmp) + + def test_mdc_config_get(self): + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_mdc_config_get') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, mode='x') as f: + conf = f._id.get_mdc_config() + f._id.set_mdc_config(conf) + finally: + shutil.rmtree(dn_tmp) + + +class TestVlenData(TestCase): + def test_vlen_strings(self): + # Create file with dataset containing vlen arrays of vlen strings + dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestVlenStrings.test_vlen_strings') + fn_h5 = os.path.join(dn_tmp, 'test.h5') + try: + with File(fn_h5, mode='w') as h: + vlen_str = special_dtype(vlen=str) + vlen_vlen_str = special_dtype(vlen=vlen_str) + + ds = h.create_dataset('/com', (2,), dtype=vlen_vlen_str) + ds[0] = (np.array(["a", "b", "c"], dtype=vlen_vlen_str)) + ds[1] = (np.array(["d", "e", "f","g"], dtype=vlen_vlen_str)) + + with File(fn_h5, "r") as h: + ds = h["com"] + assert ds[0].tolist() == [b'a', b'b', b'c'] + assert ds[1].tolist() == [b'd', b'e', b'f', b'g'] + + finally: + shutil.rmtree(dn_tmp) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5o.py b/MLPY/Lib/site-packages/h5py/tests/test_h5o.py new file mode 100644 index 0000000000000000000000000000000000000000..a993ffc2e2165e2897414a97fe78da99989a0b99 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5o.py @@ -0,0 +1,21 @@ +import pytest + +from .common import TestCase +from h5py import File + + +class SampleException(Exception): + pass + +def throwing(name, obj): + print(name, obj) + raise SampleException("throwing exception") + +class TestVisit(TestCase): + def test_visit(self): + fname = self.mktemp() + fid = File(fname, 'w') + fid.create_dataset('foo', (100,), dtype='uint8') + with pytest.raises(SampleException, match='throwing exception'): + fid.visititems(throwing) + fid.close() diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5p.py b/MLPY/Lib/site-packages/h5py/tests/test_h5p.py new file mode 100644 index 0000000000000000000000000000000000000000..8189992637aaa7563dde6f57401c593d230d4d25 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5p.py @@ -0,0 +1,193 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import unittest as ut + +from h5py import h5p, h5f, version + +from .common import TestCase + + +class TestLibver(TestCase): + + """ + Feature: Setting/getting lib ver bounds + """ + + def test_libver(self): + """ Test libver bounds set/get """ + plist = h5p.create(h5p.FILE_ACCESS) + plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST) + self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST), + plist.get_libver_bounds()) + + def test_libver_v18(self): + """ Test libver bounds set/get for H5F_LIBVER_V18""" + plist = h5p.create(h5p.FILE_ACCESS) + plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_V18) + self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_V18), + plist.get_libver_bounds()) + + def test_libver_v110(self): + """ Test libver bounds set/get for H5F_LIBVER_V110""" + plist = h5p.create(h5p.FILE_ACCESS) + plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V110) + self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V110), + plist.get_libver_bounds()) + + @ut.skipIf(version.hdf5_version_tuple < (1, 11, 4), + 'Requires HDF5 1.11.4 or later') + def test_libver_v112(self): + """ Test libver bounds set/get for H5F_LIBVER_V112""" + plist = h5p.create(h5p.FILE_ACCESS) + plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V112) + self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V112), + plist.get_libver_bounds()) + +class TestDA(TestCase): + ''' + Feature: setting/getting chunk cache size on a dataset access property list + ''' + def test_chunk_cache(self): + '''test get/set chunk cache ''' + dalist = h5p.create(h5p.DATASET_ACCESS) + nslots = 10000 # 40kb hash table + nbytes = 1000000 # 1MB cache size + w0 = .5 # even blend of eviction strategy + + dalist.set_chunk_cache(nslots, nbytes, w0) + self.assertEqual((nslots, nbytes, w0), + dalist.get_chunk_cache()) + + def test_efile_prefix(self): + '''test get/set efile prefix ''' + dalist = h5p.create(h5p.DATASET_ACCESS) + self.assertEqual(dalist.get_efile_prefix().decode(), '') + + efile_prefix = "path/to/external/dataset" + dalist.set_efile_prefix(efile_prefix.encode('utf-8')) + self.assertEqual(dalist.get_efile_prefix().decode(), + efile_prefix) + + efile_prefix = "${ORIGIN}" + dalist.set_efile_prefix(efile_prefix.encode('utf-8')) + self.assertEqual(dalist.get_efile_prefix().decode(), + efile_prefix) + + def test_virtual_prefix(self): + '''test get/set virtual prefix ''' + dalist = h5p.create(h5p.DATASET_ACCESS) + self.assertEqual(dalist.get_virtual_prefix().decode(), '') + + virtual_prefix = "path/to/virtual/dataset" + dalist.set_virtual_prefix(virtual_prefix.encode('utf-8')) + self.assertEqual(dalist.get_virtual_prefix().decode(), + virtual_prefix) + + +class TestFA(TestCase): + ''' + Feature: setting/getting mdc config on a file access property list + ''' + def test_mdc_config(self): + '''test get/set mdc config ''' + falist = h5p.create(h5p.FILE_ACCESS) + + config = falist.get_mdc_config() + falist.set_mdc_config(config) + + def test_set_alignment(self): + '''test get/set chunk cache ''' + falist = h5p.create(h5p.FILE_ACCESS) + threshold = 10 * 1024 # threshold of 10kiB + alignment = 1024 * 1024 # threshold of 1kiB + + falist.set_alignment(threshold, alignment) + self.assertEqual((threshold, alignment), + falist.get_alignment()) + + @ut.skipUnless( + version.hdf5_version_tuple >= (1, 12, 1) or + (version.hdf5_version_tuple[:2] == (1, 10) and version.hdf5_version_tuple[2] >= 7), + 'Requires HDF5 1.12.1 or later or 1.10.x >= 1.10.7') + def test_set_file_locking(self): + '''test get/set file locking''' + falist = h5p.create(h5p.FILE_ACCESS) + use_file_locking = False + ignore_when_disabled = False + + falist.set_file_locking(use_file_locking, ignore_when_disabled) + self.assertEqual((use_file_locking, ignore_when_disabled), + falist.get_file_locking()) + + +class TestPL(TestCase): + def test_obj_track_times(self): + """ + tests if the object track times set/get + """ + # test for groups + gcid = h5p.create(h5p.GROUP_CREATE) + gcid.set_obj_track_times(False) + self.assertEqual(False, gcid.get_obj_track_times()) + + gcid.set_obj_track_times(True) + self.assertEqual(True, gcid.get_obj_track_times()) + # test for datasets + dcid = h5p.create(h5p.DATASET_CREATE) + dcid.set_obj_track_times(False) + self.assertEqual(False, dcid.get_obj_track_times()) + + dcid.set_obj_track_times(True) + self.assertEqual(True, dcid.get_obj_track_times()) + + # test for generic objects + ocid = h5p.create(h5p.OBJECT_CREATE) + ocid.set_obj_track_times(False) + self.assertEqual(False, ocid.get_obj_track_times()) + + ocid.set_obj_track_times(True) + self.assertEqual(True, ocid.get_obj_track_times()) + + def test_link_creation_tracking(self): + """ + tests the link creation order set/get + """ + + gcid = h5p.create(h5p.GROUP_CREATE) + gcid.set_link_creation_order(0) + self.assertEqual(0, gcid.get_link_creation_order()) + + flags = h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED + gcid.set_link_creation_order(flags) + self.assertEqual(flags, gcid.get_link_creation_order()) + + # test for file creation + fcpl = h5p.create(h5p.FILE_CREATE) + fcpl.set_link_creation_order(flags) + self.assertEqual(flags, fcpl.get_link_creation_order()) + + def test_attr_phase_change(self): + """ + test the attribute phase change + """ + + cid = h5p.create(h5p.OBJECT_CREATE) + # test default value + ret = cid.get_attr_phase_change() + self.assertEqual((8,6), ret) + + # max_compact must < 65536 (64kb) + with self.assertRaises(ValueError): + cid.set_attr_phase_change(65536, 6) + + # Using dense attributes storage to avoid 64kb size limitation + # for a single attribute in compact attribute storage. + cid.set_attr_phase_change(0, 0) + self.assertEqual((0,0), cid.get_attr_phase_change()) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5pl.py b/MLPY/Lib/site-packages/h5py/tests/test_h5pl.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb842edee2216fd0e32d466143f7a19859887f0 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5pl.py @@ -0,0 +1,67 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2019 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import pytest + +from h5py import h5pl +from h5py.tests.common import insubprocess, subproc_env + + +@pytest.mark.mpi_skip +@insubprocess +@subproc_env({'HDF5_PLUGIN_PATH': 'h5py_plugin_test'}) +def test_default(request): + assert h5pl.size() == 1 + assert h5pl.get(0) == b'h5py_plugin_test' + + +@pytest.mark.mpi_skip +@insubprocess +@subproc_env({'HDF5_PLUGIN_PATH': 'h5py_plugin_test'}) +def test_append(request): + h5pl.append(b'/opt/hdf5/vendor-plugin') + assert h5pl.size() == 2 + assert h5pl.get(0) == b'h5py_plugin_test' + assert h5pl.get(1) == b'/opt/hdf5/vendor-plugin' + + +@pytest.mark.mpi_skip +@insubprocess +@subproc_env({'HDF5_PLUGIN_PATH': 'h5py_plugin_test'}) +def test_prepend(request): + h5pl.prepend(b'/opt/hdf5/vendor-plugin') + assert h5pl.size() == 2 + assert h5pl.get(0) == b'/opt/hdf5/vendor-plugin' + assert h5pl.get(1) == b'h5py_plugin_test' + + +@pytest.mark.mpi_skip +@insubprocess +@subproc_env({'HDF5_PLUGIN_PATH': 'h5py_plugin_test'}) +def test_insert(request): + h5pl.insert(b'/opt/hdf5/vendor-plugin', 0) + assert h5pl.size() == 2 + assert h5pl.get(0) == b'/opt/hdf5/vendor-plugin' + assert h5pl.get(1) == b'h5py_plugin_test' + + +@pytest.mark.mpi_skip +@insubprocess +@subproc_env({'HDF5_PLUGIN_PATH': 'h5py_plugin_test'}) +def test_replace(request): + h5pl.replace(b'/opt/hdf5/vendor-plugin', 0) + assert h5pl.size() == 1 + assert h5pl.get(0) == b'/opt/hdf5/vendor-plugin' + + +@pytest.mark.mpi_skip +@insubprocess +def test_remove(request): + h5pl.remove(0) + assert h5pl.size() == 0 diff --git a/MLPY/Lib/site-packages/h5py/tests/test_h5t.py b/MLPY/Lib/site-packages/h5py/tests/test_h5t.py new file mode 100644 index 0000000000000000000000000000000000000000..cd8deecd20a61cdb979a065fe6747c962f408746 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_h5t.py @@ -0,0 +1,188 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +import numpy as np + +import h5py +from h5py import h5t + +from .common import TestCase, ut + + +class TestCompound(ut.TestCase): + + """ + Feature: Compound types can be created from Python dtypes + """ + + def test_ref(self): + """ Reference types are correctly stored in compound types (issue 144) + """ + dt = np.dtype([('a', h5py.ref_dtype), ('b', ' all fields + out, format = sel2.read_dtypes(dt, ()) + self.assertEqual(out, format) + self.assertEqual(out, dt) + + # Explicit selection of fields -> requested fields + out, format = sel2.read_dtypes(dt, ('a','b')) + self.assertEqual(out, format) + self.assertEqual(out, np.dtype( [('a','i'), ('b','f')] )) + + # Explicit selection of exactly one field -> no fields + out, format = sel2.read_dtypes(dt, ('a',)) + self.assertEqual(out, np.dtype('i')) + self.assertEqual(format, np.dtype( [('a','i')] )) + + # Field does not appear in named typed + with self.assertRaises(ValueError): + out, format = sel2.read_dtypes(dt, ('j', 'k')) + +class TestScalarSliceRules(BaseSelection): + + """ + Internal feature: selections rules for scalar datasets + """ + + def test_args(self): + """ Permissible arguments for scalar slicing """ + shape, selection = sel2.read_selections_scalar(self.dsid, ()) + self.assertEqual(shape, None) + self.assertEqual(selection.get_select_npoints(), 1) + + shape, selection = sel2.read_selections_scalar(self.dsid, (Ellipsis,)) + self.assertEqual(shape, ()) + self.assertEqual(selection.get_select_npoints(), 1) + + with self.assertRaises(ValueError): + shape, selection = sel2.read_selections_scalar(self.dsid, (1,)) + + dsid = self.f.create_dataset('y', (1,)).id + with self.assertRaises(RuntimeError): + shape, selection = sel2.read_selections_scalar(dsid, (1,)) + +class TestSelection(BaseSelection): + + """ High-level routes to generate a selection + """ + + def test_selection(self): + dset = self.f.create_dataset('dset', (100,100)) + regref = dset.regionref[0:100, 0:100] + + # args is list, return a FancySelection + st = sel.select((10,), list([1,2,3]), dset) + self.assertIsInstance(st, sel.FancySelection) + + # args[0] is tuple, return a FancySelection + st = sel.select((10,), ((1, 2, 3),), dset) + self.assertIsInstance(st, sel.FancySelection) + + # args is a Boolean mask, return a PointSelection + st1 = sel.select((5,), np.array([True,False,False,False,True]), dset) + self.assertIsInstance(st1, sel.PointSelection) + + # args is int, return a SimpleSelection + st2 = sel.select((10,), 1, dset) + self.assertIsInstance(st2, sel.SimpleSelection) + + # args is str, should be rejected + with self.assertRaises(TypeError): + sel.select((100,), "foo", dset) + + # args is RegionReference, return a Selection instance + st3 = sel.select((100,100), regref, dset) + self.assertIsInstance(st3, sel.Selection) + + # args is RegionReference, but dataset is None + with self.assertRaises(TypeError): + sel.select((100,), regref, None) + + # args is RegionReference, but its shape doesn't match dataset shape + with self.assertRaises(TypeError): + sel.select((100,), regref, dset) + + # args is a single Selection instance, return the arg + st4 = sel.select((100,100), st3, dset) + self.assertEqual(st4,st3) + + # args is a single Selection instance, but args shape doesn't match Shape + with self.assertRaises(TypeError): + sel.select((100,), st3, dset) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_slicing.py b/MLPY/Lib/site-packages/h5py/tests/test_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..dee2c1b288918df5a1e0cfbac64650aac11d7bfa --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_slicing.py @@ -0,0 +1,416 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Dataset slicing test module. + + Tests all supported slicing operations, including read/write and + broadcasting operations. Does not test type conversion except for + corner cases overlapping with slicing; for example, when selecting + specific fields of a compound type. +""" + +import numpy as np + +from .common import ut, TestCase + +import h5py +from h5py import h5s, h5t, h5d +from h5py import File, MultiBlockSlice + +class BaseSlicing(TestCase): + + def setUp(self): + self.f = File(self.mktemp(), 'w') + + def tearDown(self): + if self.f: + self.f.close() + +class TestSingleElement(BaseSlicing): + + """ + Feature: Retrieving a single element works with NumPy semantics + """ + + def test_single_index(self): + """ Single-element selection with [index] yields array scalar """ + dset = self.f.create_dataset('x', (1,), dtype='i1') + out = dset[0] + self.assertIsInstance(out, np.int8) + + def test_single_null(self): + """ Single-element selection with [()] yields ndarray """ + dset = self.f.create_dataset('x', (1,), dtype='i1') + out = dset[()] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, (1,)) + + def test_scalar_index(self): + """ Slicing with [...] yields scalar ndarray """ + dset = self.f.create_dataset('x', shape=(), dtype='f') + out = dset[...] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, ()) + + def test_scalar_null(self): + """ Slicing with [()] yields array scalar """ + dset = self.f.create_dataset('x', shape=(), dtype='i1') + out = dset[()] + self.assertIsInstance(out, np.int8) + + def test_compound(self): + """ Compound scalar is numpy.void, not tuple (issue 135) """ + dt = np.dtype([('a','i4'),('b','f8')]) + v = np.ones((4,), dtype=dt) + dset = self.f.create_dataset('foo', (4,), data=v) + self.assertEqual(dset[0], v[0]) + self.assertIsInstance(dset[0], np.void) + +class TestObjectIndex(BaseSlicing): + + """ + Feature: numpy.object_ subtypes map to real Python objects + """ + + def test_reference(self): + """ Indexing a reference dataset returns a h5py.Reference instance """ + dset = self.f.create_dataset('x', (1,), dtype=h5py.ref_dtype) + dset[0] = self.f.ref + self.assertEqual(type(dset[0]), h5py.Reference) + + def test_regref(self): + """ Indexing a region reference dataset returns a h5py.RegionReference + """ + dset1 = self.f.create_dataset('x', (10,10)) + regref = dset1.regionref[...] + dset2 = self.f.create_dataset('y', (1,), dtype=h5py.regionref_dtype) + dset2[0] = regref + self.assertEqual(type(dset2[0]), h5py.RegionReference) + + def test_reference_field(self): + """ Compound types of which a reference is an element work right """ + dt = np.dtype([('a', 'i'),('b', h5py.ref_dtype)]) + + dset = self.f.create_dataset('x', (1,), dtype=dt) + dset[0] = (42, self.f['/'].ref) + + out = dset[0] + self.assertEqual(type(out[1]), h5py.Reference) # isinstance does NOT work + + def test_scalar(self): + """ Indexing returns a real Python object on scalar datasets """ + dset = self.f.create_dataset('x', (), dtype=h5py.ref_dtype) + dset[()] = self.f.ref + self.assertEqual(type(dset[()]), h5py.Reference) + + def test_bytestr(self): + """ Indexing a byte string dataset returns a real python byte string + """ + dset = self.f.create_dataset('x', (1,), dtype=h5py.string_dtype(encoding='ascii')) + dset[0] = b"Hello there!" + self.assertEqual(type(dset[0]), bytes) + +class TestSimpleSlicing(TestCase): + + """ + Feature: Simple NumPy-style slices (start:stop:step) are supported. + """ + + def setUp(self): + self.f = File(self.mktemp(), 'w') + self.arr = np.arange(10) + self.dset = self.f.create_dataset('x', data=self.arr) + + def tearDown(self): + if self.f: + self.f.close() + + def test_negative_stop(self): + """ Negative stop indexes work as they do in NumPy """ + self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2]) + + def test_write(self): + """Assigning to a 1D slice of a 2D dataset + """ + dset = self.f.create_dataset('x2', (10, 2)) + + x = np.zeros((10, 1)) + dset[:, 0] = x[:, 0] + with self.assertRaises(TypeError): + dset[:, 1] = x + +class TestArraySlicing(BaseSlicing): + + """ + Feature: Array types are handled appropriately + """ + + def test_read(self): + """ Read arrays tack array dimensions onto end of shape tuple """ + dt = np.dtype('(3,)f8') + dset = self.f.create_dataset('x',(10,),dtype=dt) + self.assertEqual(dset.shape, (10,)) + self.assertEqual(dset.dtype, dt) + + # Full read + out = dset[...] + self.assertEqual(out.dtype, np.dtype('f8')) + self.assertEqual(out.shape, (10,3)) + + # Single element + out = dset[0] + self.assertEqual(out.dtype, np.dtype('f8')) + self.assertEqual(out.shape, (3,)) + + # Range + out = dset[2:8:2] + self.assertEqual(out.dtype, np.dtype('f8')) + self.assertEqual(out.shape, (3,3)) + + def test_write_broadcast(self): + """ Array fill from constant is not supported (issue 211). + """ + dt = np.dtype('(3,)i') + + dset = self.f.create_dataset('x', (10,), dtype=dt) + + with self.assertRaises(TypeError): + dset[...] = 42 + + def test_write_element(self): + """ Write a single element to the array + + Issue 211. + """ + dt = np.dtype('(3,)f8') + dset = self.f.create_dataset('x', (10,), dtype=dt) + + data = np.array([1,2,3.0]) + dset[4] = data + + out = dset[4] + self.assertTrue(np.all(out == data)) + + def test_write_slices(self): + """ Write slices to array type """ + dt = np.dtype('(3,)i') + + data1 = np.ones((2,), dtype=dt) + data2 = np.ones((4,5), dtype=dt) + + dset = self.f.create_dataset('x', (10,9,11), dtype=dt) + + dset[0,0,2:4] = data1 + self.assertArrayEqual(dset[0,0,2:4], data1) + + dset[3, 1:5, 6:11] = data2 + self.assertArrayEqual(dset[3, 1:5, 6:11], data2) + + + def test_roundtrip(self): + """ Read the contents of an array and write them back + + Issue 211. + """ + dt = np.dtype('(3,)f8') + dset = self.f.create_dataset('x', (10,), dtype=dt) + + out = dset[...] + dset[...] = out + + self.assertTrue(np.all(dset[...] == out)) + + +class TestZeroLengthSlicing(BaseSlicing): + + """ + Slices resulting in empty arrays + """ + + def test_slice_zero_length_dimension(self): + """ Slice a dataset with a zero in its shape vector + along the zero-length dimension """ + for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]): + dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape)) + self.assertEqual(dset.shape, shape) + out = dset[...] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, shape) + out = dset[:] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, shape) + if len(shape) > 1: + out = dset[:, :1] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape[:2], (0, 1)) + + def test_slice_other_dimension(self): + """ Slice a dataset with a zero in its shape vector + along a non-zero-length dimension """ + for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]): + dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape)) + self.assertEqual(dset.shape, shape) + out = dset[:1] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, (1,)+shape[1:]) + + def test_slice_of_length_zero(self): + """ Get a slice of length zero from a non-empty dataset """ + for i, shape in enumerate([(3,), (2, 2,), (2, 1, 5)]): + dset = self.f.create_dataset('x%d'%i, data=np.zeros(shape, int), maxshape=(None,)*len(shape)) + self.assertEqual(dset.shape, shape) + out = dset[1:1] + self.assertIsInstance(out, np.ndarray) + self.assertEqual(out.shape, (0,)+shape[1:]) + +class TestFieldNames(BaseSlicing): + + """ + Field names for read & write + """ + + dt = np.dtype([('a', 'f'), ('b', 'i'), ('c', 'f4')]) + data = np.ones((100,), dtype=dt) + + def setUp(self): + BaseSlicing.setUp(self) + self.dset = self.f.create_dataset('x', (100,), dtype=self.dt) + self.dset[...] = self.data + + def test_read(self): + """ Test read with field selections """ + self.assertArrayEqual(self.dset['a'], self.data['a']) + + def test_unicode_names(self): + """ Unicode field names for for read and write """ + self.assertArrayEqual(self.dset['a'], self.data['a']) + self.dset['a'] = 42 + data = self.data.copy() + data['a'] = 42 + self.assertArrayEqual(self.dset['a'], data['a']) + + def test_write(self): + """ Test write with field selections """ + data2 = self.data.copy() + data2['a'] *= 2 + self.dset['a'] = data2 + self.assertTrue(np.all(self.dset[...] == data2)) + data2['b'] *= 4 + self.dset['b'] = data2 + self.assertTrue(np.all(self.dset[...] == data2)) + data2['a'] *= 3 + data2['c'] *= 3 + self.dset['a','c'] = data2 + self.assertTrue(np.all(self.dset[...] == data2)) + + def test_write_noncompound(self): + """ Test write with non-compound source (single-field) """ + data2 = self.data.copy() + data2['b'] = 1.0 + self.dset['b'] = 1.0 + self.assertTrue(np.all(self.dset[...] == data2)) + + +class TestMultiBlockSlice(BaseSlicing): + + def setUp(self): + super().setUp() + self.arr = np.arange(10) + self.dset = self.f.create_dataset('x', data=self.arr) + + def test_default(self): + # Default selects entire dataset as one block + mbslice = MultiBlockSlice() + + self.assertEqual(mbslice.indices(10), (0, 1, 10, 1)) + np.testing.assert_array_equal(self.dset[mbslice], self.arr) + + def test_default_explicit(self): + mbslice = MultiBlockSlice(start=0, count=10, stride=1, block=1) + + self.assertEqual(mbslice.indices(10), (0, 1, 10, 1)) + np.testing.assert_array_equal(self.dset[mbslice], self.arr) + + def test_start(self): + mbslice = MultiBlockSlice(start=4) + + self.assertEqual(mbslice.indices(10), (4, 1, 6, 1)) + np.testing.assert_array_equal(self.dset[mbslice], np.array([4, 5, 6, 7, 8, 9])) + + def test_count(self): + mbslice = MultiBlockSlice(count=7) + + self.assertEqual(mbslice.indices(10), (0, 1, 7, 1)) + np.testing.assert_array_equal( + self.dset[mbslice], np.array([0, 1, 2, 3, 4, 5, 6]) + ) + + def test_count_more_than_length_error(self): + mbslice = MultiBlockSlice(count=11) + with self.assertRaises(ValueError): + mbslice.indices(10) + + def test_stride(self): + mbslice = MultiBlockSlice(stride=2) + + self.assertEqual(mbslice.indices(10), (0, 2, 5, 1)) + np.testing.assert_array_equal(self.dset[mbslice], np.array([0, 2, 4, 6, 8])) + + def test_stride_zero_error(self): + with self.assertRaises(ValueError): + # This would cause a ZeroDivisionError if not caught + MultiBlockSlice(stride=0, block=0).indices(10) + + def test_stride_block_equal(self): + mbslice = MultiBlockSlice(stride=2, block=2) + + self.assertEqual(mbslice.indices(10), (0, 2, 5, 2)) + np.testing.assert_array_equal(self.dset[mbslice], self.arr) + + def test_block_more_than_stride_error(self): + with self.assertRaises(ValueError): + MultiBlockSlice(block=3) + + with self.assertRaises(ValueError): + MultiBlockSlice(stride=2, block=3) + + def test_stride_more_than_block(self): + mbslice = MultiBlockSlice(stride=3, block=2) + + self.assertEqual(mbslice.indices(10), (0, 3, 3, 2)) + np.testing.assert_array_equal(self.dset[mbslice], np.array([0, 1, 3, 4, 6, 7])) + + def test_block_overruns_extent_error(self): + # If fully described then must fit within extent + mbslice = MultiBlockSlice(start=2, count=2, stride=5, block=4) + with self.assertRaises(ValueError): + mbslice.indices(10) + + def test_fully_described(self): + mbslice = MultiBlockSlice(start=1, count=2, stride=5, block=4) + + self.assertEqual(mbslice.indices(10), (1, 5, 2, 4)) + np.testing.assert_array_equal( + self.dset[mbslice], np.array([1, 2, 3, 4, 6, 7, 8, 9]) + ) + + def test_count_calculated(self): + # If not given, count should be calculated to select as many full blocks as possible + mbslice = MultiBlockSlice(start=1, stride=3, block=2) + + self.assertEqual(mbslice.indices(10), (1, 3, 3, 2)) + np.testing.assert_array_equal(self.dset[mbslice], np.array([1, 2, 4, 5, 7, 8])) + + def test_zero_count_calculated_error(self): + # In this case, there is no possible count to select even one block, so error + mbslice = MultiBlockSlice(start=8, stride=4, block=3) + + with self.assertRaises(ValueError): + mbslice.indices(10) diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/__init__.py b/MLPY/Lib/site-packages/h5py/tests/test_vds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1587df12c0d931a843b4316e05d95e04a80e528a --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_vds/__init__.py @@ -0,0 +1,4 @@ + +from .test_virtual_source import * +from .test_highlevel_vds import * +from .test_lowlevel_vds import * diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6c163a03e1c0012167d1b42b42529ddb349261a Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_highlevel_vds.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_highlevel_vds.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..239018529d9a896cdeb50a0f2e4a249507b966cc Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_highlevel_vds.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_lowlevel_vds.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_lowlevel_vds.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a265cdc074939488926766aeef732ced487acdc0 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_lowlevel_vds.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_virtual_source.cpython-39.pyc b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_virtual_source.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34f120fa37c4dc0b0de959476edb933cd0e9436e Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/tests/test_vds/__pycache__/test_virtual_source.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/test_highlevel_vds.py b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_highlevel_vds.py new file mode 100644 index 0000000000000000000000000000000000000000..b9a084a1f73b81c742fb92fc594e47e2e32b6b49 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_highlevel_vds.py @@ -0,0 +1,465 @@ +''' +Unit test for the high level vds interface for eiger +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' +import numpy as np +from numpy.testing import assert_array_equal +import os +import os.path as osp +import shutil +import tempfile + +import h5py as h5 +from ..common import ut +from ..._hl.vds import vds_support + + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class TestEigerHighLevel(ut.TestCase): + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ['raw_file_1.h5', 'raw_file_2.h5', 'raw_file_3.h5'] + for k, outfile in enumerate(self.fname): + filename = osp.join(self.working_dir, outfile) + f = h5.File(filename, 'w') + f['data'] = np.ones((20, 200, 200)) * k + f.close() + + f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w') + f['data'] = np.ones((18, 200, 200)) * 3 + self.fname.append('raw_file_4.h5') + self.fname = [osp.join(self.working_dir, ix) for ix in self.fname] + f.close() + + def test_eiger_high_level(self): + outfile = osp.join(self.working_dir, 'eiger.h5') + layout = h5.VirtualLayout(shape=(78, 200, 200), dtype=float) + + M_minus_1 = 0 + # Create the virtual dataset file + with h5.File(outfile, 'w', libver='latest') as f: + for foo in self.fname: + in_data = h5.File(foo, 'r')['data'] + src_shape = in_data.shape + in_data.file.close() + M = M_minus_1 + src_shape[0] + vsource = h5.VirtualSource(foo, 'data', shape=src_shape) + layout[M_minus_1:M, :, :] = vsource + M_minus_1 = M + f.create_virtual_dataset('data', layout, fillvalue=45) + + f = h5.File(outfile, 'r')['data'] + self.assertEqual(f[10, 100, 10], 0.0) + self.assertEqual(f[30, 100, 100], 1.0) + self.assertEqual(f[50, 100, 100], 2.0) + self.assertEqual(f[70, 100, 100], 3.0) + f.file.close() + + def tearDown(self): + shutil.rmtree(self.working_dir) + +''' +Unit test for the high level vds interface for excalibur +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' + +class ExcaliburData: + FEM_PIXELS_PER_CHIP_X = 256 + FEM_PIXELS_PER_CHIP_Y = 256 + FEM_CHIPS_PER_STRIPE_X = 8 + FEM_CHIPS_PER_STRIPE_Y = 1 + FEM_STRIPES_PER_MODULE = 2 + + @property + def sensor_module_dimensions(self): + x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X + y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y * self.FEM_STRIPES_PER_MODULE + return y_pixels, x_pixels, + + @property + def fem_stripe_dimensions(self): + x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X + y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y + return y_pixels, x_pixels, + + def generate_sensor_module_image(self, value, dtype='uint16'): + dset = np.empty(shape=self.sensor_module_dimensions, dtype=dtype) + dset.fill(value) + return dset + + def generate_fem_stripe_image(self, value, dtype='uint16'): + dset = np.empty(shape=self.fem_stripe_dimensions, dtype=dtype) + dset.fill(value) + return dset + + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class TestExcaliburHighLevel(ut.TestCase): + def create_excalibur_fem_stripe_datafile(self, fname, nframes, excalibur_data,scale): + shape = (nframes,) + excalibur_data.fem_stripe_dimensions + max_shape = shape#(None,) + excalibur_data.fem_stripe_dimensions + chunk = (1,) + excalibur_data.fem_stripe_dimensions + with h5.File(fname, 'w', libver='latest') as f: + dset = f.create_dataset('data', shape=shape, maxshape=max_shape, chunks=chunk, dtype='uint16') + for data_value_index in np.arange(nframes): + dset[data_value_index] = excalibur_data.generate_fem_stripe_image(data_value_index*scale) + + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ["stripe_%d.h5" % stripe for stripe in range(1,7)] + self.fname = [osp.join(self.working_dir, f) for f in self.fname] + nframes = 5 + self.edata = ExcaliburData() + for k, raw_file in enumerate(self.fname): + self.create_excalibur_fem_stripe_datafile(raw_file, nframes, self.edata,k) + + def test_excalibur_high_level(self): + outfile = osp.join(self.working_dir, 'excalibur.h5') + f = h5.File(outfile,'w',libver='latest') # create an output file. + in_key = 'data' # where is the data at the input? + in_sh = h5.File(self.fname[0],'r')[in_key].shape # get the input shape + dtype = h5.File(self.fname[0],'r')[in_key].dtype # get the datatype + + # now generate the output shape + vertical_gap = 10 # pixels spacing in the vertical + nfiles = len(self.fname) + nframes = in_sh[0] + width = in_sh[2] + height = (in_sh[1]*nfiles) + (vertical_gap*(nfiles-1)) + out_sh = (nframes, height, width) + + # Virtual layout is a representation of the output dataset + layout = h5.VirtualLayout(shape=out_sh, dtype=dtype) + offset = 0 # initial offset + for i, filename in enumerate(self.fname): + # A representation of the input dataset + vsource = h5.VirtualSource(filename, in_key, shape=in_sh) + layout[:, offset:(offset + in_sh[1]), :] = vsource # map them with indexing + offset += in_sh[1] + vertical_gap # increment the offset + + # pass the fill value and list of maps + f.create_virtual_dataset('data', layout, fillvalue=0x1) + f.close() + + f = h5.File(outfile,'r')['data'] + self.assertEqual(f[3,100,0], 0.0) + self.assertEqual(f[3,260,0], 1.0) + self.assertEqual(f[3,350,0], 3.0) + self.assertEqual(f[3,650,0], 6.0) + self.assertEqual(f[3,900,0], 9.0) + self.assertEqual(f[3,1150,0], 12.0) + self.assertEqual(f[3,1450,0], 15.0) + f.file.close() + + def tearDown(self): + shutil.rmtree(self.working_dir) + + +''' +Unit test for the high level vds interface for percival +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' + + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class TestPercivalHighLevel(ut.TestCase): + + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ['raw_file_1.h5','raw_file_2.h5','raw_file_3.h5'] + k = 0 + for outfile in self.fname: + filename = osp.join(self.working_dir, outfile) + f = h5.File(filename,'w') + f['data'] = np.ones((20,200,200))*k + k +=1 + f.close() + + f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w') + f['data'] = np.ones((19,200,200))*3 + self.fname.append('raw_file_4.h5') + self.fname = [osp.join(self.working_dir, ix) for ix in self.fname] + f.close() + + def test_percival_high_level(self): + outfile = osp.join(self.working_dir, 'percival.h5') + + # Virtual layout is a representation of the output dataset + layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float64) + for k, filename in enumerate(self.fname): + dim1 = 19 if k == 3 else 20 + vsource = h5.VirtualSource(filename, 'data',shape=(dim1, 200, 200)) + layout[k:79:4, :, :] = vsource[:, :, :] + + # Create the virtual dataset file + with h5.File(outfile, 'w', libver='latest') as f: + f.create_virtual_dataset('data', layout, fillvalue=-5) + + foo = np.array(2 * list(range(4))) + with h5.File(outfile,'r') as f: + ds = f['data'] + line = ds[:8,100,100] + self.assertEqual(ds.shape, (79,200,200),) + assert_array_equal(line, foo) + + def test_percival_source_from_dataset(self): + outfile = osp.join(self.working_dir, 'percival.h5') + + # Virtual layout is a representation of the output dataset + layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float64) + for k, filename in enumerate(self.fname): + with h5.File(filename, 'r') as f: + vsource = h5.VirtualSource(f['data']) + layout[k:79:4, :, :] = vsource + + # Create the virtual dataset file + with h5.File(outfile, 'w', libver='latest') as f: + f.create_virtual_dataset('data', layout, fillvalue=-5) + + foo = np.array(2 * list(range(4))) + with h5.File(outfile,'r') as f: + ds = f['data'] + line = ds[:8,100,100] + self.assertEqual(ds.shape, (79,200,200),) + assert_array_equal(line, foo) + + def tearDown(self): + shutil.rmtree(self.working_dir) + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class SlicingTestCase(ut.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + # Create source files (1.h5 to 4.h5) + for n in range(1, 5): + with h5.File(osp.join(self.tmpdir, '{}.h5'.format(n)), 'w') as f: + d = f.create_dataset('data', (100,), 'i4') + d[:] = np.arange(100) + n + + def make_virtual_ds(self): + # Assemble virtual dataset + layout = h5.VirtualLayout((4, 100), 'i4', maxshape=(4, None)) + + for n in range(1, 5): + filename = osp.join(self.tmpdir, "{}.h5".format(n)) + vsource = h5.VirtualSource(filename, 'data', shape=(100,)) + # Fill the first half with positions 0, 2, 4... from the source + layout[n - 1, :50] = vsource[0:100:2] + # Fill the second half with places 1, 3, 5... from the source + layout[n - 1, 50:] = vsource[1:100:2] + + outfile = osp.join(self.tmpdir, 'VDS.h5') + + # Add virtual dataset to output file + with h5.File(outfile, 'w', libver='latest') as f: + f.create_virtual_dataset('/group/data', layout, fillvalue=-5) + + return outfile + + def test_slice_source(self): + outfile = self.make_virtual_ds() + + with h5.File(outfile, 'r') as f: + assert_array_equal(f['/group/data'][0][:3], [1, 3, 5]) + assert_array_equal(f['/group/data'][0][50:53], [2, 4, 6]) + assert_array_equal(f['/group/data'][3][:3], [4, 6, 8]) + assert_array_equal(f['/group/data'][3][50:53], [5, 7, 9]) + + def test_inspection(self): + with h5.File(osp.join(self.tmpdir, '1.h5'), 'r') as f: + assert not f['data'].is_virtual + + outfile = self.make_virtual_ds() + + with h5.File(outfile, 'r') as f: + ds = f['/group/data'] + assert ds.is_virtual + + src_files = {osp.join(self.tmpdir, '{}.h5'.format(n)) + for n in range(1, 5)} + assert {s.file_name for s in ds.virtual_sources()} == src_files + + def test_mismatched_selections(self): + layout = h5.VirtualLayout((4, 100), 'i4', maxshape=(4, None)) + + filename = osp.join(self.tmpdir, "1.h5") + vsource = h5.VirtualSource(filename, 'data', shape=(100,)) + with self.assertRaisesRegex(ValueError, r'different number'): + layout[0, :49] = vsource[0:100:2] + + def tearDown(self): + shutil.rmtree(self.tmpdir) + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class IndexingTestCase(ut.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + # Create source file (1.h5) + with h5.File(osp.join(self.tmpdir, '1.h5'), 'w') as f: + d = f.create_dataset('data', (10,), 'i4') + d[:] = np.arange(10)*10 + + def test_index_layout(self): + # Assemble virtual dataset (indexing target) + layout = h5.VirtualLayout((100,), 'i4') + + inds = [3,6,20,25,33,47,70,75,96,98] + + filename = osp.join(self.tmpdir, "1.h5") + vsource = h5.VirtualSource(filename, 'data', shape=(10,)) + layout[inds] = vsource + + outfile = osp.join(self.tmpdir, 'VDS.h5') + + # Assembly virtual dataset (indexing source) + layout2 = h5.VirtualLayout((6,), 'i4') + + inds2 = [0,1,4,5,8] + layout2[1:] = vsource[inds2] + + # Add virtual datasets to output file and close + with h5.File(outfile, 'w', libver='latest') as f: + f.create_virtual_dataset('/data', layout, fillvalue=-5) + f.create_virtual_dataset(b'/data2', layout2, fillvalue=-3) + + # Read data from virtual datasets + with h5.File(outfile, 'r') as f: + data = f['/data'][()] + data2 = f['/data2'][()] + + # Verify + assert_array_equal(data[inds], np.arange(10)*10) + assert_array_equal(data2[1:], [0,10,40,50,80]) + + mask = np.zeros(100) + mask[inds] = 1 + self.assertEqual(data[mask == 0].min(), -5) + self.assertEqual(data[mask == 0].max(), -5) + self.assertEqual(data2[0], -3) + + def tearDown(self): + shutil.rmtree(self.tmpdir) + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class RelativeLinkTestCase(ut.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + self.f1 = osp.join(self.tmpdir, 'testfile1.h5') + self.f2 = osp.join(self.tmpdir, 'testfile2.h5') + + self.data1 = np.arange(10) + self.data2 = np.arange(10) * -1 + + with h5.File(self.f1, 'w') as f: + # dataset + ds = f.create_dataset('data', (10,), 'f4') + ds[:] = self.data1 + + with h5.File(self.f2, 'w') as f: + # dataset + ds = f.create_dataset('data', (10,), 'f4') + ds[:] = self.data2 + self.make_vds(f) + + def make_vds(self, f): + # virtual dataset + layout = h5.VirtualLayout((2, 10), 'f4') + vsource1 = h5.VirtualSource(self.f1, 'data', shape=(10,)) + vsource2 = h5.VirtualSource(self.f2, 'data', shape=(10,)) + layout[0] = vsource1 + layout[1] = vsource2 + f.create_virtual_dataset('virtual', layout) + + def test_relative_vds(self): + with h5.File(self.f2) as f: + data = f['virtual'][:] + np.testing.assert_array_equal(data[0], self.data1) + np.testing.assert_array_equal(data[1], self.data2) + + # move f2 -> f3 + f3 = osp.join(self.tmpdir, 'testfile3.h5') + os.rename(self.f2, f3) + + with h5.File(f3) as f: + data = f['virtual'][:] + assert data.dtype == 'f4' + np.testing.assert_array_equal(data[0], self.data1) + np.testing.assert_array_equal(data[1], self.data2) + + # moving other file + f4 = osp.join(self.tmpdir, 'testfile4.h5') + os.rename(self.f1, f4) + + with h5.File(f3) as f: + data = f['virtual'][:] + assert data.dtype == 'f4' + # unavailable data is silently converted to default value + np.testing.assert_array_equal(data[0], 0) + np.testing.assert_array_equal(data[1], self.data2) + + def tearDown(self): + shutil.rmtree(self.tmpdir) + +class RelativeLinkBuildVDSTestCase(RelativeLinkTestCase): + # Test a link to the same file with the virtual dataset created by + # File.build_virtual_dataset() + def make_vds(self, f): + with f.build_virtual_dataset('virtual', (2, 10), dtype='f4') as layout: + layout[0] = h5.VirtualSource(self.f1, 'data', shape=(10,)) + layout[1] = h5.VirtualSource(self.f2, 'data', shape=(10,)) + +@ut.skipUnless(vds_support, + 'VDS requires HDF5 >= 1.9.233') +class VDSUnlimitedTestCase(ut.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + self.path = osp.join(self.tmpdir, "resize.h5") + with h5.File(self.path, "w") as f: + source_dset = f.create_dataset( + "source", + data=np.arange(20), + shape=(10, 2), + maxshape=(None, 2), + chunks=(10, 1), + fillvalue=-1 + ) + self.layout = h5.VirtualLayout((10, 1), int, maxshape=(None, 1)) + layout_source = h5.VirtualSource(source_dset) + self.layout[:h5.UNLIMITED, 0] = layout_source[:h5.UNLIMITED, 1] + + f.create_virtual_dataset("virtual", self.layout) + + def test_unlimited_axis(self): + comp1 = np.arange(1, 20, 2).reshape(10, 1) + comp2 = np.vstack(( + comp1, + np.full(shape=(10, 1), fill_value=-1) + )) + comp3 = np.vstack(( + comp1, + np.full(shape=(10, 1), fill_value=0) + )) + with h5.File(self.path, "a") as f: + source_dset = f['source'] + virtual_dset = f['virtual'] + np.testing.assert_array_equal(comp1, virtual_dset) + source_dset.resize(20, axis=0) + np.testing.assert_array_equal(comp2, virtual_dset) + source_dset[10:, 1] = np.zeros((10,), dtype=int) + np.testing.assert_array_equal(comp3, virtual_dset) + + def tearDown(self): + shutil.rmtree(self.tmpdir) + +if __name__ == "__main__": + ut.main() diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/test_lowlevel_vds.py b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_lowlevel_vds.py new file mode 100644 index 0000000000000000000000000000000000000000..c48c9dda112372122aa9babdbc1c377f092b2dc0 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_lowlevel_vds.py @@ -0,0 +1,296 @@ +''' +Unit test for the low level vds interface for eiger +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' + + +from ..common import ut +import numpy as np +import h5py as h5 +import tempfile + + +class TestEigerLowLevel(ut.TestCase): + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ['raw_file_1.h5', 'raw_file_2.h5', 'raw_file_3.h5'] + k = 0 + for outfile in self.fname: + filename = self.working_dir + outfile + f = h5.File(filename, 'w') + f['data'] = np.ones((20, 200, 200))*k + k += 1 + f.close() + + f = h5.File(self.working_dir+'raw_file_4.h5', 'w') + f['data'] = np.ones((18, 200, 200))*3 + self.fname.append('raw_file_4.h5') + self.fname = [self.working_dir+ix for ix in self.fname] + f.close() + + def test_eiger_low_level(self): + self.outfile = self.working_dir + 'eiger.h5' + with h5.File(self.outfile, 'w', libver='latest') as f: + vdset_shape = (78, 200, 200) + vdset_max_shape = vdset_shape + virt_dspace = h5.h5s.create_simple(vdset_shape, vdset_max_shape) + dcpl = h5.h5p.create(h5.h5p.DATASET_CREATE) + dcpl.set_fill_value(np.array([-1])) + # Create the source dataset dataspace + k = 0 + for foo in self.fname: + in_data = h5.File(foo, 'r')['data'] + src_shape = in_data.shape + max_src_shape = src_shape + in_data.file.close() + src_dspace = h5.h5s.create_simple(src_shape, max_src_shape) + # Select the source dataset hyperslab + src_dspace.select_hyperslab(start=(0, 0, 0), + stride=(1, 1, 1), + count=(1, 1, 1), + block=src_shape) + + virt_dspace.select_hyperslab(start=(k, 0, 0), + stride=(1, 1, 1), + count=(1, 1, 1), + block=src_shape) + + dcpl.set_virtual(virt_dspace, foo.encode('utf-8'), + b'data', src_dspace) + k += src_shape[0] + + # Create the virtual dataset + h5.h5d.create(f.id, name=b"data", tid=h5.h5t.NATIVE_INT16, + space=virt_dspace, dcpl=dcpl) + + f = h5.File(self.outfile, 'r')['data'] + self.assertEqual(f[10, 100, 10], 0.0) + self.assertEqual(f[30, 100, 100], 1.0) + self.assertEqual(f[50, 100, 100], 2.0) + self.assertEqual(f[70, 100, 100], 3.0) + f.file.close() + + def tearDown(self): + import os + for f in self.fname: + os.remove(f) + os.remove(self.outfile) + + +if __name__ == "__main__": + ut.main() +''' +Unit test for the low level vds interface for excalibur +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' + + +class ExcaliburData: + FEM_PIXELS_PER_CHIP_X = 256 + FEM_PIXELS_PER_CHIP_Y = 256 + FEM_CHIPS_PER_STRIPE_X = 8 + FEM_CHIPS_PER_STRIPE_Y = 1 + FEM_STRIPES_PER_MODULE = 2 + + @property + def sensor_module_dimensions(self): + x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X + y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y * self.FEM_STRIPES_PER_MODULE + return y_pixels, x_pixels, + + @property + def fem_stripe_dimensions(self): + x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X + y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y + return y_pixels, x_pixels, + + def generate_sensor_module_image(self, value, dtype='uint16'): + dset = np.empty(shape=self.sensor_module_dimensions, dtype=dtype) + dset.fill(value) + return dset + + def generate_fem_stripe_image(self, value, dtype='uint16'): + dset = np.empty(shape=self.fem_stripe_dimensions, dtype=dtype) + dset.fill(value) + return dset + + +class TestExcaliburLowLevel(ut.TestCase): + def create_excalibur_fem_stripe_datafile(self, fname, nframes, excalibur_data,scale): + shape = (nframes,) + excalibur_data.fem_stripe_dimensions + max_shape = (nframes,) + excalibur_data.fem_stripe_dimensions + chunk = (1,) + excalibur_data.fem_stripe_dimensions + with h5.File(fname, 'w', libver='latest') as f: + dset = f.create_dataset('data', shape=shape, maxshape=max_shape, chunks=chunk, dtype='uint16') + for data_value_index in np.arange(nframes): + dset[data_value_index] = excalibur_data.generate_fem_stripe_image(data_value_index*scale) + + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ["stripe_%d.h5" % stripe for stripe in range(1,7)] + self.fname = [self.working_dir+ix for ix in self.fname] + nframes = 5 + self.edata = ExcaliburData() + k=0 + for raw_file in self.fname: + self.create_excalibur_fem_stripe_datafile(raw_file, nframes, self.edata,k) + k+=1 + + def test_excalibur_low_level(self): + + excalibur_data = self.edata + self.outfile = self.working_dir+'excalibur.h5' + vdset_stripe_shape = (1,) + excalibur_data.fem_stripe_dimensions + vdset_stripe_max_shape = (5, ) + excalibur_data.fem_stripe_dimensions + vdset_shape = (5, + excalibur_data.fem_stripe_dimensions[0] * len(self.fname) + (10 * (len(self.fname)-1)), + excalibur_data.fem_stripe_dimensions[1]) + vdset_max_shape = (5, + excalibur_data.fem_stripe_dimensions[0] * len(self.fname) + (10 * (len(self.fname)-1)), + excalibur_data.fem_stripe_dimensions[1]) + vdset_y_offset = 0 + + # Create the virtual dataset file + with h5.File(self.outfile, 'w', libver='latest') as f: + + # Create the source dataset dataspace + src_dspace = h5.h5s.create_simple(vdset_stripe_shape, vdset_stripe_max_shape) + # Create the virtual dataset dataspace + virt_dspace = h5.h5s.create_simple(vdset_shape, vdset_max_shape) + + # Create the virtual dataset property list + dcpl = h5.h5p.create(h5.h5p.DATASET_CREATE) + dcpl.set_fill_value(np.array([0x01])) + + # Select the source dataset hyperslab + src_dspace.select_hyperslab(start=(0, 0, 0), count=(1, 1, 1), block=vdset_stripe_max_shape) + + for raw_file in self.fname: + # Select the virtual dataset hyperslab (for the source dataset) + virt_dspace.select_hyperslab(start=(0, vdset_y_offset, 0), + count=(1, 1, 1), + block=vdset_stripe_max_shape) + # Set the virtual dataset hyperslab to point to the real first dataset + dcpl.set_virtual(virt_dspace, raw_file.encode('utf-8'), + b"/data", src_dspace) + vdset_y_offset += vdset_stripe_shape[1] + 10 + + # Create the virtual dataset + dset = h5.h5d.create(f.id, name=b"data", + tid=h5.h5t.NATIVE_INT16, space=virt_dspace, dcpl=dcpl) + assert(f['data'].fillvalue == 0x01) + + f = h5.File(self.outfile,'r')['data'] + self.assertEqual(f[3,100,0], 0.0) + self.assertEqual(f[3,260,0], 1.0) + self.assertEqual(f[3,350,0], 3.0) + self.assertEqual(f[3,650,0], 6.0) + self.assertEqual(f[3,900,0], 9.0) + self.assertEqual(f[3,1150,0], 12.0) + self.assertEqual(f[3,1450,0], 15.0) + f.file.close() + + def tearDown(self): + import os + for f in self.fname: + os.remove(f) + os.remove(self.outfile) + +''' +Unit test for the low level vds interface for percival +https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf +''' + + +class TestPercivalLowLevel(ut.TestCase): + + def setUp(self): + self.working_dir = tempfile.mkdtemp() + self.fname = ['raw_file_1.h5','raw_file_2.h5','raw_file_3.h5'] + k = 0 + for outfile in self.fname: + filename = self.working_dir + outfile + f = h5.File(filename,'w') + f['data'] = np.ones((20,200,200))*k + k +=1 + f.close() + + f = h5.File(self.working_dir+'raw_file_4.h5','w') + f['data'] = np.ones((19,200,200))*3 + self.fname.append('raw_file_4.h5') + self.fname = [self.working_dir+ix for ix in self.fname] + f.close() + + def test_percival_low_level(self): + self.outfile = self.working_dir + 'percival.h5' + with h5.File(self.outfile, 'w', libver='latest') as f: + vdset_shape = (1,200,200) + num = h5.h5s.UNLIMITED + vdset_max_shape = (num,)+vdset_shape[1:] + virt_dspace = h5.h5s.create_simple(vdset_shape, vdset_max_shape) + dcpl = h5.h5p.create(h5.h5p.DATASET_CREATE) + dcpl.set_fill_value(np.array([-1])) + # Create the source dataset dataspace + k = 0 + for foo in self.fname: + in_data = h5.File(foo, 'r')['data'] + src_shape = in_data.shape + max_src_shape = (num,)+src_shape[1:] + in_data.file.close() + src_dspace = h5.h5s.create_simple(src_shape, max_src_shape) + # Select the source dataset hyperslab + src_dspace.select_hyperslab(start=(0, 0, 0), + stride=(1,1,1), + count=(num, 1, 1), + block=(1,)+src_shape[1:]) + + virt_dspace.select_hyperslab(start=(k, 0, 0), + stride=(4,1,1), + count=(num, 1, 1), + block=(1,)+src_shape[1:]) + + dcpl.set_virtual(virt_dspace, foo.encode('utf-8'), b'data', src_dspace) + k+=1 + + # Create the virtual dataset + dset = h5.h5d.create(f.id, name=b"data", tid=h5.h5t.NATIVE_INT16, space=virt_dspace, dcpl=dcpl) + + f = h5.File(self.outfile,'r') + sh = f['data'].shape + line = f['data'][:8,100,100] + foo = np.array(2*list(range(4))) + f.close() + self.assertEqual(sh,(79,200,200),) + np.testing.assert_array_equal(line,foo) + + def tearDown(self): + import os + for f in self.fname: + os.remove(f) + os.remove(self.outfile) + + +def test_virtual_prefix(tmp_path): + (tmp_path / 'a').mkdir() + (tmp_path / 'b').mkdir() + src_file = h5.File(tmp_path / 'a' / 'src.h5', 'w') + src_file['data'] = np.arange(10) + + vds_file = h5.File(tmp_path / 'b' / 'vds.h5', 'w') + layout = h5.VirtualLayout(shape=(10,), dtype=np.int64) + layout[:] = h5.VirtualSource('src.h5', 'data', shape=(10,)) + vds_file.create_virtual_dataset('data', layout, fillvalue=-1) + + # Path doesn't resolve + np.testing.assert_array_equal(vds_file['data'], np.full(10, fill_value=-1)) + + path_a = bytes(tmp_path / 'a') + dapl = h5.h5p.create(h5.h5p.DATASET_ACCESS) + dapl.set_virtual_prefix(path_a) + vds_id = h5.h5d.open(vds_file.id, b'data', dapl=dapl) + vds = h5.Dataset(vds_id) + + # Now it should find the source file and read the data correctly + np.testing.assert_array_equal(vds[:], np.arange(10)) + # Check that get_virtual_prefix gives back what we put in + assert vds.id.get_access_plist().get_virtual_prefix() == path_a diff --git a/MLPY/Lib/site-packages/h5py/tests/test_vds/test_virtual_source.py b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_virtual_source.py new file mode 100644 index 0000000000000000000000000000000000000000..bac2439f6788bb7c0319052520dfb841abad4c86 --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/tests/test_vds/test_virtual_source.py @@ -0,0 +1,166 @@ +from ..common import ut +import h5py as h5 +import numpy as np + + +class TestVirtualSource(ut.TestCase): + def test_full_slice(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[:,:,:] + self.assertEqual(dataset.shape,sliced.shape) + + # def test_full_slice_inverted(self): + # dataset = h5.VirtualSource('test','test',(20,30,30)) + # sliced = dataset[:,:,::-1] + # self.assertEqual(dataset.shape,sliced.shape) + # + # def test_subsampled_slice_inverted(self): + # dataset = h5.VirtualSource('test','test',(20,30,30)) + # sliced = dataset[:,:,::-2] + # self.assertEqual((20,30,15),sliced.shape) + + def test_integer_indexed(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[5,:,:] + self.assertEqual((30,30),sliced.shape) + + def test_integer_single_indexed(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[5] + self.assertEqual((30,30),sliced.shape) + + def test_two_integer_indexed(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[5,:,10] + self.assertEqual((30,),sliced.shape) + + def test_single_range(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[5:10,:,:] + self.assertEqual((5,)+dataset.shape[1:],sliced.shape) + + def test_shape_calculation_positive_step(self): + dataset = h5.VirtualSource('test','test',(20,)) + cmp = [] + for i in range(5): + d = dataset[2:12+i:3].shape[0] + ref = np.arange(20)[2:12+i:3].size + cmp.append(ref==d) + self.assertEqual(5, sum(cmp)) + + # def test_shape_calculation_positive_step_switched_start_stop(self): + # dataset = h5.VirtualSource('test','test',(20,)) + # cmp = [] + # for i in range(5): + # d = dataset[12+i:2:3].shape[0] + # ref = np.arange(20)[12+i:2:3].size + # cmp.append(ref==d) + # self.assertEqual(5, sum(cmp)) + # + # + # def test_shape_calculation_negative_step(self): + # dataset = h5.VirtualSource('test','test',(20,)) + # cmp = [] + # for i in range(5): + # d = dataset[12+i:2:-3].shape[0] + # ref = np.arange(20)[12+i:2:-3].size + # cmp.append(ref==d) + # self.assertEqual(5, sum(cmp)) + # + # def test_shape_calculation_negative_step_switched_start_stop(self): + # dataset = h5.VirtualSource('test','test',(20,)) + # cmp = [] + # for i in range(5): + # d = dataset[2:12+i:-3].shape[0] + # ref = np.arange(20)[2:12+i:-3].size + # cmp.append(ref==d) + # self.assertEqual(5, sum(cmp)) + + + def test_double_range(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[5:10,:,20:25] + self.assertEqual((5,30,5),sliced.shape) + + def test_double_strided_range(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[6:12:2,:,20:26:3] + self.assertEqual((3,30,2,),sliced.shape) + + # def test_double_strided_range_inverted(self): + # dataset = h5.VirtualSource('test','test',(20,30,30)) + # sliced = dataset[12:6:-2,:,26:20:-3] + # self.assertEqual((3,30,2),sliced.shape) + + def test_negative_start_index(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[-10:16] + self.assertEqual((6,30,30),sliced.shape) + + def test_negative_stop_index(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[10:-4] + self.assertEqual((6,30,30),sliced.shape) + + def test_negative_start_and_stop_index(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[-10:-4] + self.assertEqual((6,30,30),sliced.shape) + + # def test_negative_start_and_stop_and_stride_index(self): + # dataset = h5.VirtualSource('test','test',(20,30,30)) + # sliced = dataset[-4:-10:-2] + # self.assertEqual((3,30,30),sliced.shape) +# + def test_ellipsis(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[...] + self.assertEqual(dataset.shape,sliced.shape) + + def test_ellipsis_end(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[0:1,...] + self.assertEqual((1,)+dataset.shape[1:],sliced.shape) + + def test_ellipsis_start(self): + dataset = h5.VirtualSource('test','test',(20,30,30)) + sliced = dataset[...,0:1] + self.assertEqual(dataset.shape[:-1]+(1,),sliced.shape) + + def test_ellipsis_sandwich(self): + dataset = h5.VirtualSource('test','test',(20,30,30,40)) + sliced = dataset[0:1,...,5:6] + self.assertEqual((1,)+dataset.shape[1:-1]+(1,),sliced.shape) + + def test_integer_shape(self): + dataset = h5.VirtualSource('test','test', 20) + self.assertEqual(dataset.shape, (20,)) + + def test_integer_maxshape(self): + dataset = h5.VirtualSource('test','test', 20, maxshape=30) + self.assertEqual(dataset.maxshape, (30,)) + + def test_extra_args(self): + with h5.File(name='f1', driver='core', + backing_store=False, mode='w') as ftest: + ftest['a'] = [1, 2, 3] + a = ftest['a'] + + with self.assertRaises(TypeError): + h5.VirtualSource(a, 'b') + with self.assertRaises(TypeError): + h5.VirtualSource(a, shape=(1, )) + with self.assertRaises(TypeError): + h5.VirtualSource(a, maxshape=(None,)) + with self.assertRaises(TypeError): + h5.VirtualSource(a, dtype=int) + + def test_repeated_slice(self): + dataset = h5.VirtualSource('test', 'test', (20, 30, 30)) + sliced = dataset[5:10, :, :] + with self.assertRaises(RuntimeError): + sliced[:, :4] + + +if __name__ == "__main__": + ut.main() diff --git a/MLPY/Lib/site-packages/h5py/utils.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/h5py/utils.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5d72f2cebbde64f594751e8925e27185ff4ee537 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/utils.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/h5py/version.py b/MLPY/Lib/site-packages/h5py/version.py new file mode 100644 index 0000000000000000000000000000000000000000..36b0779585fdff8b966a24bfba7e46e81022a9fd --- /dev/null +++ b/MLPY/Lib/site-packages/h5py/version.py @@ -0,0 +1,65 @@ +# This file is part of h5py, a Python interface to the HDF5 library. +# +# http://www.h5py.org +# +# Copyright 2008-2013 Andrew Collette and contributors +# +# License: Standard 3-clause BSD; see "license.txt" for full license terms +# and contributor agreement. + +""" + Versioning module for h5py. +""" + +from collections import namedtuple +from . import h5 as _h5 +import sys +import numpy + +# All should be integers, except pre, as validating versions is more than is +# needed for our use case +_H5PY_VERSION_CLS = namedtuple("_H5PY_VERSION_CLS", + "major minor bugfix pre post dev") + +hdf5_built_version_tuple = _h5.HDF5_VERSION_COMPILED_AGAINST + +version_tuple = _H5PY_VERSION_CLS(3, 11, 0, None, None, None) + +version = "{0.major:d}.{0.minor:d}.{0.bugfix:d}".format(version_tuple) +if version_tuple.pre is not None: + version += version_tuple.pre +if version_tuple.post is not None: + version += ".post{0.post:d}".format(version_tuple) +if version_tuple.dev is not None: + version += ".dev{0.dev:d}".format(version_tuple) + +hdf5_version_tuple = _h5.get_libversion() +hdf5_version = "%d.%d.%d" % hdf5_version_tuple + +api_version_tuple = (1,8) +api_version = "%d.%d" % api_version_tuple + +info = """\ +Summary of the h5py configuration +--------------------------------- + +h5py %(h5py)s +HDF5 %(hdf5)s +Python %(python)s +sys.platform %(platform)s +sys.maxsize %(maxsize)s +numpy %(numpy)s +cython (built with) %(cython_version)s +numpy (built against) %(numpy_build_version)s +HDF5 (built against) %(hdf5_build_version)s +""" % { + 'h5py': version, + 'hdf5': hdf5_version, + 'python': sys.version, + 'platform': sys.platform, + 'maxsize': sys.maxsize, + 'numpy': numpy.__version__, + 'cython_version': _h5.CYTHON_VERSION_COMPILED_WITH, + 'numpy_build_version': _h5.NUMPY_VERSION_COMPILED_AGAINST, + 'hdf5_build_version': "%d.%d.%d" % hdf5_built_version_tuple, +} diff --git a/MLPY/Lib/site-packages/h5py/zlib.dll b/MLPY/Lib/site-packages/h5py/zlib.dll new file mode 100644 index 0000000000000000000000000000000000000000..eadc847b56035c0f740a4289673a8fd415530b97 Binary files /dev/null and b/MLPY/Lib/site-packages/h5py/zlib.dll differ diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/LICENSE b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/METADATA b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..85513e8a9f651c99f4a1fa372ba7768255bfa1c9 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/METADATA @@ -0,0 +1,129 @@ +Metadata-Version: 2.1 +Name: importlib_metadata +Version: 8.0.0 +Summary: Read metadata from Python packages +Author-email: "Jason R. Coombs" +Project-URL: Source, https://github.com/python/importlib_metadata +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: zipp >=0.5 +Requires-Dist: typing-extensions >=3.6.4 ; python_version < "3.8" +Provides-Extra: doc +Requires-Dist: sphinx >=3.5 ; extra == 'doc' +Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc' +Requires-Dist: rst.linker >=1.9 ; extra == 'doc' +Requires-Dist: furo ; extra == 'doc' +Requires-Dist: sphinx-lint ; extra == 'doc' +Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' +Provides-Extra: perf +Requires-Dist: ipython ; extra == 'perf' +Provides-Extra: test +Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' +Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test' +Requires-Dist: pytest-cov ; extra == 'test' +Requires-Dist: pytest-mypy ; extra == 'test' +Requires-Dist: pytest-enabler >=2.2 ; extra == 'test' +Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test' +Requires-Dist: packaging ; extra == 'test' +Requires-Dist: pyfakefs ; extra == 'test' +Requires-Dist: flufl.flake8 ; extra == 'test' +Requires-Dist: pytest-perf >=0.9.2 ; extra == 'test' +Requires-Dist: jaraco.test >=5.4 ; extra == 'test' +Requires-Dist: importlib-resources >=1.3 ; (python_version < "3.9") and extra == 'test' + +.. image:: https://img.shields.io/pypi/v/importlib_metadata.svg + :target: https://pypi.org/project/importlib_metadata + +.. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg + +.. image:: https://github.com/python/importlib_metadata/actions/workflows/main.yml/badge.svg + :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22 + :alt: tests + +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json + :target: https://github.com/astral-sh/ruff + :alt: Ruff + +.. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest + :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest + +.. image:: https://img.shields.io/badge/skeleton-2024-informational + :target: https://blog.jaraco.com/skeleton + +.. image:: https://tidelift.com/badges/package/pypi/importlib-metadata + :target: https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=readme + +Library to access the metadata for a Python package. + +This package supplies third-party access to the functionality of +`importlib.metadata `_ +including improvements added to subsequent Python versions. + + +Compatibility +============= + +New features are introduced in this third-party library and later merged +into CPython. The following table indicates which versions of this library +were contributed to different versions in the standard library: + +.. list-table:: + :header-rows: 1 + + * - importlib_metadata + - stdlib + * - 7.0 + - 3.13 + * - 6.5 + - 3.12 + * - 4.13 + - 3.11 + * - 4.6 + - 3.10 + * - 1.4 + - 3.8 + + +Usage +===== + +See the `online documentation `_ +for usage details. + +`Finder authors +`_ can +also add support for custom package installers. See the above documentation +for details. + + +Caveats +======= + +This project primarily supports third-party packages installed by PyPA +tools (or other conforming packages). It does not support: + +- Packages in the stdlib. +- Packages installed without metadata. + +Project details +=============== + + * Project home: https://github.com/python/importlib_metadata + * Report bugs at: https://github.com/python/importlib_metadata/issues + * Code hosting: https://github.com/python/importlib_metadata + * Documentation: https://importlib-metadata.readthedocs.io/ + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/RECORD b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cdeb760f06d1b557c21cbcd3d0b5836fe5b98ef1 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/RECORD @@ -0,0 +1,31 @@ +importlib_metadata-8.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +importlib_metadata-8.0.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +importlib_metadata-8.0.0.dist-info/METADATA,sha256=anuQ7_7h4J1bSEzfcjIBakPi2cyVQ7y7jklLHsBeH1k,4648 +importlib_metadata-8.0.0.dist-info/RECORD,, +importlib_metadata-8.0.0.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91 +importlib_metadata-8.0.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 +importlib_metadata/__init__.py,sha256=tZNB-23h8Bixi9uCrQqj9Yf0aeC--Josdy3IZRIQeB0,33798 +importlib_metadata/__pycache__/__init__.cpython-39.pyc,, +importlib_metadata/__pycache__/_adapters.cpython-39.pyc,, +importlib_metadata/__pycache__/_collections.cpython-39.pyc,, +importlib_metadata/__pycache__/_compat.cpython-39.pyc,, +importlib_metadata/__pycache__/_functools.cpython-39.pyc,, +importlib_metadata/__pycache__/_itertools.cpython-39.pyc,, +importlib_metadata/__pycache__/_meta.cpython-39.pyc,, +importlib_metadata/__pycache__/_text.cpython-39.pyc,, +importlib_metadata/__pycache__/diagnose.cpython-39.pyc,, +importlib_metadata/_adapters.py,sha256=rIhWTwBvYA1bV7i-5FfVX38qEXDTXFeS5cb5xJtP3ks,2317 +importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 +importlib_metadata/_compat.py,sha256=73QKrN9KNoaZzhbX5yPCCZa-FaALwXe8TPlDR72JgBU,1314 +importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 +importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 +importlib_metadata/_meta.py,sha256=nxZ7C8GVlcBFAKWyVOn_dn7ot_twBcbm1NmvjIetBHI,1801 +importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 +importlib_metadata/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_metadata/compat/__pycache__/__init__.cpython-39.pyc,, +importlib_metadata/compat/__pycache__/py311.cpython-39.pyc,, +importlib_metadata/compat/__pycache__/py39.cpython-39.pyc,, +importlib_metadata/compat/py311.py,sha256=uqm-K-uohyj1042TH4a9Er_I5o7667DvulcD-gC_fSA,608 +importlib_metadata/compat/py39.py,sha256=cPkMv6-0ilK-0Jw_Tkn0xYbOKJZc4WJKQHow0c2T44w,1102 +importlib_metadata/diagnose.py,sha256=nkSRMiowlmkhLYhKhvCg9glmt_11Cox-EmLzEbqYTa8,379 +importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/WHEEL b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..edf4ec7c70d7dbfc16600ff1b368daf1097c5dc7 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (70.1.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbb07547a19c30031d13c45cf01cba61dc434e47 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata-8.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +importlib_metadata diff --git a/MLPY/Lib/site-packages/importlib_metadata/__init__.py b/MLPY/Lib/site-packages/importlib_metadata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed4813551ac238bfb9b5a48f4476463355415d27 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/__init__.py @@ -0,0 +1,1083 @@ +from __future__ import annotations + +import os +import re +import abc +import sys +import json +import zipp +import email +import types +import inspect +import pathlib +import operator +import textwrap +import functools +import itertools +import posixpath +import collections + +from . import _meta +from .compat import py39, py311 +from ._collections import FreezableDefaultDict, Pair +from ._compat import ( + NullFinder, + install, +) +from ._functools import method_cache, pass_none +from ._itertools import always_iterable, unique_everseen +from ._meta import PackageMetadata, SimplePath + +from contextlib import suppress +from importlib import import_module +from importlib.abc import MetaPathFinder +from itertools import starmap +from typing import Any, Iterable, List, Mapping, Match, Optional, Set, cast + +__all__ = [ + 'Distribution', + 'DistributionFinder', + 'PackageMetadata', + 'PackageNotFoundError', + 'distribution', + 'distributions', + 'entry_points', + 'files', + 'metadata', + 'packages_distributions', + 'requires', + 'version', +] + + +class PackageNotFoundError(ModuleNotFoundError): + """The package was not found.""" + + def __str__(self) -> str: + return f"No package metadata was found for {self.name}" + + @property + def name(self) -> str: # type: ignore[override] + (name,) = self.args + return name + + +class Sectioned: + """ + A simple entry point config parser for performance + + >>> for item in Sectioned.read(Sectioned._sample): + ... print(item) + Pair(name='sec1', value='# comments ignored') + Pair(name='sec1', value='a = 1') + Pair(name='sec1', value='b = 2') + Pair(name='sec2', value='a = 2') + + >>> res = Sectioned.section_pairs(Sectioned._sample) + >>> item = next(res) + >>> item.name + 'sec1' + >>> item.value + Pair(name='a', value='1') + >>> item = next(res) + >>> item.value + Pair(name='b', value='2') + >>> item = next(res) + >>> item.name + 'sec2' + >>> item.value + Pair(name='a', value='2') + >>> list(res) + [] + """ + + _sample = textwrap.dedent( + """ + [sec1] + # comments ignored + a = 1 + b = 2 + + [sec2] + a = 2 + """ + ).lstrip() + + @classmethod + def section_pairs(cls, text): + return ( + section._replace(value=Pair.parse(section.value)) + for section in cls.read(text, filter_=cls.valid) + if section.name is not None + ) + + @staticmethod + def read(text, filter_=None): + lines = filter(filter_, map(str.strip, text.splitlines())) + name = None + for value in lines: + section_match = value.startswith('[') and value.endswith(']') + if section_match: + name = value.strip('[]') + continue + yield Pair(name, value) + + @staticmethod + def valid(line: str): + return line and not line.startswith('#') + + +class EntryPoint: + """An entry point as defined by Python packaging conventions. + + See `the packaging docs on entry points + `_ + for more information. + + >>> ep = EntryPoint( + ... name=None, group=None, value='package.module:attr [extra1, extra2]') + >>> ep.module + 'package.module' + >>> ep.attr + 'attr' + >>> ep.extras + ['extra1', 'extra2'] + """ + + pattern = re.compile( + r'(?P[\w.]+)\s*' + r'(:\s*(?P[\w.]+)\s*)?' + r'((?P\[.*\])\s*)?$' + ) + """ + A regular expression describing the syntax for an entry point, + which might look like: + + - module + - package.module + - package.module:attribute + - package.module:object.attribute + - package.module:attr [extra1, extra2] + + Other combinations are possible as well. + + The expression is lenient about whitespace around the ':', + following the attr, and following any extras. + """ + + name: str + value: str + group: str + + dist: Optional[Distribution] = None + + def __init__(self, name: str, value: str, group: str) -> None: + vars(self).update(name=name, value=value, group=group) + + def load(self) -> Any: + """Load the entry point from its definition. If only a module + is indicated by the value, return that module. Otherwise, + return the named object. + """ + match = cast(Match, self.pattern.match(self.value)) + module = import_module(match.group('module')) + attrs = filter(None, (match.group('attr') or '').split('.')) + return functools.reduce(getattr, attrs, module) + + @property + def module(self) -> str: + match = self.pattern.match(self.value) + assert match is not None + return match.group('module') + + @property + def attr(self) -> str: + match = self.pattern.match(self.value) + assert match is not None + return match.group('attr') + + @property + def extras(self) -> List[str]: + match = self.pattern.match(self.value) + assert match is not None + return re.findall(r'\w+', match.group('extras') or '') + + def _for(self, dist): + vars(self).update(dist=dist) + return self + + def matches(self, **params): + """ + EntryPoint matches the given parameters. + + >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]') + >>> ep.matches(group='foo') + True + >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]') + True + >>> ep.matches(group='foo', name='other') + False + >>> ep.matches() + True + >>> ep.matches(extras=['extra1', 'extra2']) + True + >>> ep.matches(module='bing') + True + >>> ep.matches(attr='bong') + True + """ + attrs = (getattr(self, param) for param in params) + return all(map(operator.eq, params.values(), attrs)) + + def _key(self): + return self.name, self.value, self.group + + def __lt__(self, other): + return self._key() < other._key() + + def __eq__(self, other): + return self._key() == other._key() + + def __setattr__(self, name, value): + raise AttributeError("EntryPoint objects are immutable.") + + def __repr__(self): + return ( + f'EntryPoint(name={self.name!r}, value={self.value!r}, ' + f'group={self.group!r})' + ) + + def __hash__(self) -> int: + return hash(self._key()) + + +class EntryPoints(tuple): + """ + An immutable collection of selectable EntryPoint objects. + """ + + __slots__ = () + + def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] + """ + Get the EntryPoint in self matching name. + """ + try: + return next(iter(self.select(name=name))) + except StopIteration: + raise KeyError(name) + + def __repr__(self): + """ + Repr with classname and tuple constructor to + signal that we deviate from regular tuple behavior. + """ + return '%s(%r)' % (self.__class__.__name__, tuple(self)) + + def select(self, **params) -> EntryPoints: + """ + Select entry points from self that match the + given parameters (typically group and/or name). + """ + return EntryPoints(ep for ep in self if py39.ep_matches(ep, **params)) + + @property + def names(self) -> Set[str]: + """ + Return the set of all names of all entry points. + """ + return {ep.name for ep in self} + + @property + def groups(self) -> Set[str]: + """ + Return the set of all groups of all entry points. + """ + return {ep.group for ep in self} + + @classmethod + def _from_text_for(cls, text, dist): + return cls(ep._for(dist) for ep in cls._from_text(text)) + + @staticmethod + def _from_text(text): + return ( + EntryPoint(name=item.value.name, value=item.value.value, group=item.name) + for item in Sectioned.section_pairs(text or '') + ) + + +class PackagePath(pathlib.PurePosixPath): + """A reference to a path in a package""" + + hash: Optional[FileHash] + size: int + dist: Distribution + + def read_text(self, encoding: str = 'utf-8') -> str: # type: ignore[override] + return self.locate().read_text(encoding=encoding) + + def read_binary(self) -> bytes: + return self.locate().read_bytes() + + def locate(self) -> SimplePath: + """Return a path-like object for this path""" + return self.dist.locate_file(self) + + +class FileHash: + def __init__(self, spec: str) -> None: + self.mode, _, self.value = spec.partition('=') + + def __repr__(self) -> str: + return f'' + + +class Distribution(metaclass=abc.ABCMeta): + """ + An abstract Python distribution package. + + Custom providers may derive from this class and define + the abstract methods to provide a concrete implementation + for their environment. Some providers may opt to override + the default implementation of some properties to bypass + the file-reading mechanism. + """ + + @abc.abstractmethod + def read_text(self, filename) -> Optional[str]: + """Attempt to load metadata file given by the name. + + Python distribution metadata is organized by blobs of text + typically represented as "files" in the metadata directory + (e.g. package-1.0.dist-info). These files include things + like: + + - METADATA: The distribution metadata including fields + like Name and Version and Description. + - entry_points.txt: A series of entry points as defined in + `the entry points spec `_. + - RECORD: A record of files according to + `this recording spec `_. + + A package may provide any set of files, including those + not listed here or none at all. + + :param filename: The name of the file in the distribution info. + :return: The text if found, otherwise None. + """ + + @abc.abstractmethod + def locate_file(self, path: str | os.PathLike[str]) -> SimplePath: + """ + Given a path to a file in this distribution, return a SimplePath + to it. + """ + + @classmethod + def from_name(cls, name: str) -> Distribution: + """Return the Distribution for the given package name. + + :param name: The name of the distribution package to search for. + :return: The Distribution instance (or subclass thereof) for the named + package, if found. + :raises PackageNotFoundError: When the named package's distribution + metadata cannot be found. + :raises ValueError: When an invalid value is supplied for name. + """ + if not name: + raise ValueError("A distribution name is required.") + try: + return next(iter(cls.discover(name=name))) + except StopIteration: + raise PackageNotFoundError(name) + + @classmethod + def discover( + cls, *, context: Optional[DistributionFinder.Context] = None, **kwargs + ) -> Iterable[Distribution]: + """Return an iterable of Distribution objects for all packages. + + Pass a ``context`` or pass keyword arguments for constructing + a context. + + :context: A ``DistributionFinder.Context`` object. + :return: Iterable of Distribution objects for packages matching + the context. + """ + if context and kwargs: + raise ValueError("cannot accept context and kwargs") + context = context or DistributionFinder.Context(**kwargs) + return itertools.chain.from_iterable( + resolver(context) for resolver in cls._discover_resolvers() + ) + + @staticmethod + def at(path: str | os.PathLike[str]) -> Distribution: + """Return a Distribution for the indicated metadata path. + + :param path: a string or path-like object + :return: a concrete Distribution instance for the path + """ + return PathDistribution(pathlib.Path(path)) + + @staticmethod + def _discover_resolvers(): + """Search the meta_path for resolvers (MetadataPathFinders).""" + declared = ( + getattr(finder, 'find_distributions', None) for finder in sys.meta_path + ) + return filter(None, declared) + + @property + def metadata(self) -> _meta.PackageMetadata: + """Return the parsed metadata for this Distribution. + + The returned object will have keys that name the various bits of + metadata per the + `Core metadata specifications `_. + + Custom providers may provide the METADATA file or override this + property. + """ + # deferred for performance (python/cpython#109829) + from . import _adapters + + opt_text = ( + self.read_text('METADATA') + or self.read_text('PKG-INFO') + # This last clause is here to support old egg-info files. Its + # effect is to just end up using the PathDistribution's self._path + # (which points to the egg-info file) attribute unchanged. + or self.read_text('') + ) + text = cast(str, opt_text) + return _adapters.Message(email.message_from_string(text)) + + @property + def name(self) -> str: + """Return the 'Name' metadata for the distribution package.""" + return self.metadata['Name'] + + @property + def _normalized_name(self): + """Return a normalized version of the name.""" + return Prepared.normalize(self.name) + + @property + def version(self) -> str: + """Return the 'Version' metadata for the distribution package.""" + return self.metadata['Version'] + + @property + def entry_points(self) -> EntryPoints: + """ + Return EntryPoints for this distribution. + + Custom providers may provide the ``entry_points.txt`` file + or override this property. + """ + return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) + + @property + def files(self) -> Optional[List[PackagePath]]: + """Files in this distribution. + + :return: List of PackagePath for this distribution or None + + Result is `None` if the metadata file that enumerates files + (i.e. RECORD for dist-info, or installed-files.txt or + SOURCES.txt for egg-info) is missing. + Result may be empty if the metadata exists but is empty. + + Custom providers are recommended to provide a "RECORD" file (in + ``read_text``) or override this property to allow for callers to be + able to resolve filenames provided by the package. + """ + + def make_file(name, hash=None, size_str=None): + result = PackagePath(name) + result.hash = FileHash(hash) if hash else None + result.size = int(size_str) if size_str else None + result.dist = self + return result + + @pass_none + def make_files(lines): + # Delay csv import, since Distribution.files is not as widely used + # as other parts of importlib.metadata + import csv + + return starmap(make_file, csv.reader(lines)) + + @pass_none + def skip_missing_files(package_paths): + return list(filter(lambda path: path.locate().exists(), package_paths)) + + return skip_missing_files( + make_files( + self._read_files_distinfo() + or self._read_files_egginfo_installed() + or self._read_files_egginfo_sources() + ) + ) + + def _read_files_distinfo(self): + """ + Read the lines of RECORD. + """ + text = self.read_text('RECORD') + return text and text.splitlines() + + def _read_files_egginfo_installed(self): + """ + Read installed-files.txt and return lines in a similar + CSV-parsable format as RECORD: each file must be placed + relative to the site-packages directory and must also be + quoted (since file names can contain literal commas). + + This file is written when the package is installed by pip, + but it might not be written for other installation methods. + Assume the file is accurate if it exists. + """ + text = self.read_text('installed-files.txt') + # Prepend the .egg-info/ subdir to the lines in this file. + # But this subdir is only available from PathDistribution's + # self._path. + subdir = getattr(self, '_path', None) + if not text or not subdir: + return + + paths = ( + py311.relative_fix((subdir / name).resolve()) + .relative_to(self.locate_file('').resolve(), walk_up=True) + .as_posix() + for name in text.splitlines() + ) + return map('"{}"'.format, paths) + + def _read_files_egginfo_sources(self): + """ + Read SOURCES.txt and return lines in a similar CSV-parsable + format as RECORD: each file name must be quoted (since it + might contain literal commas). + + Note that SOURCES.txt is not a reliable source for what + files are installed by a package. This file is generated + for a source archive, and the files that are present + there (e.g. setup.py) may not correctly reflect the files + that are present after the package has been installed. + """ + text = self.read_text('SOURCES.txt') + return text and map('"{}"'.format, text.splitlines()) + + @property + def requires(self) -> Optional[List[str]]: + """Generated requirements specified for this Distribution""" + reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() + return reqs and list(reqs) + + def _read_dist_info_reqs(self): + return self.metadata.get_all('Requires-Dist') + + def _read_egg_info_reqs(self): + source = self.read_text('requires.txt') + return pass_none(self._deps_from_requires_text)(source) + + @classmethod + def _deps_from_requires_text(cls, source): + return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source)) + + @staticmethod + def _convert_egg_info_reqs_to_simple_reqs(sections): + """ + Historically, setuptools would solicit and store 'extra' + requirements, including those with environment markers, + in separate sections. More modern tools expect each + dependency to be defined separately, with any relevant + extras and environment markers attached directly to that + requirement. This method converts the former to the + latter. See _test_deps_from_requires_text for an example. + """ + + def make_condition(name): + return name and f'extra == "{name}"' + + def quoted_marker(section): + section = section or '' + extra, sep, markers = section.partition(':') + if extra and markers: + markers = f'({markers})' + conditions = list(filter(None, [markers, make_condition(extra)])) + return '; ' + ' and '.join(conditions) if conditions else '' + + def url_req_space(req): + """ + PEP 508 requires a space between the url_spec and the quoted_marker. + Ref python/importlib_metadata#357. + """ + # '@' is uniquely indicative of a url_req. + return ' ' * ('@' in req) + + for section in sections: + space = url_req_space(section.value) + yield section.value + space + quoted_marker(section.name) + + @property + def origin(self): + return self._load_json('direct_url.json') + + def _load_json(self, filename): + return pass_none(json.loads)( + self.read_text(filename), + object_hook=lambda data: types.SimpleNamespace(**data), + ) + + +class DistributionFinder(MetaPathFinder): + """ + A MetaPathFinder capable of discovering installed distributions. + + Custom providers should implement this interface in order to + supply metadata. + """ + + class Context: + """ + Keyword arguments presented by the caller to + ``distributions()`` or ``Distribution.discover()`` + to narrow the scope of a search for distributions + in all DistributionFinders. + + Each DistributionFinder may expect any parameters + and should attempt to honor the canonical + parameters defined below when appropriate. + + This mechanism gives a custom provider a means to + solicit additional details from the caller beyond + "name" and "path" when searching distributions. + For example, imagine a provider that exposes suites + of packages in either a "public" or "private" ``realm``. + A caller may wish to query only for distributions in + a particular realm and could call + ``distributions(realm="private")`` to signal to the + custom provider to only include distributions from that + realm. + """ + + name = None + """ + Specific name for which a distribution finder should match. + A name of ``None`` matches all distributions. + """ + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + @property + def path(self) -> List[str]: + """ + The sequence of directory path that a distribution finder + should search. + + Typically refers to Python installed package paths such as + "site-packages" directories and defaults to ``sys.path``. + """ + return vars(self).get('path', sys.path) + + @abc.abstractmethod + def find_distributions(self, context=Context()) -> Iterable[Distribution]: + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching the ``context``, + a DistributionFinder.Context instance. + """ + + +class FastPath: + """ + Micro-optimized class for searching a root for children. + + Root is a path on the file system that may contain metadata + directories either as natural directories or within a zip file. + + >>> FastPath('').children() + ['...'] + + FastPath objects are cached and recycled for any given root. + + >>> FastPath('foobar') is FastPath('foobar') + True + """ + + @functools.lru_cache() # type: ignore + def __new__(cls, root): + return super().__new__(cls) + + def __init__(self, root): + self.root = root + + def joinpath(self, child): + return pathlib.Path(self.root, child) + + def children(self): + with suppress(Exception): + return os.listdir(self.root or '.') + with suppress(Exception): + return self.zip_children() + return [] + + def zip_children(self): + zip_path = zipp.Path(self.root) + names = zip_path.root.namelist() + self.joinpath = zip_path.joinpath + + return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names) + + def search(self, name): + return self.lookup(self.mtime).search(name) + + @property + def mtime(self): + with suppress(OSError): + return os.stat(self.root).st_mtime + self.lookup.cache_clear() + + @method_cache + def lookup(self, mtime): + return Lookup(self) + + +class Lookup: + """ + A micro-optimized class for searching a (fast) path for metadata. + """ + + def __init__(self, path: FastPath): + """ + Calculate all of the children representing metadata. + + From the children in the path, calculate early all of the + children that appear to represent metadata (infos) or legacy + metadata (eggs). + """ + + base = os.path.basename(path.root).lower() + base_is_egg = base.endswith(".egg") + self.infos = FreezableDefaultDict(list) + self.eggs = FreezableDefaultDict(list) + + for child in path.children(): + low = child.lower() + if low.endswith((".dist-info", ".egg-info")): + # rpartition is faster than splitext and suitable for this purpose. + name = low.rpartition(".")[0].partition("-")[0] + normalized = Prepared.normalize(name) + self.infos[normalized].append(path.joinpath(child)) + elif base_is_egg and low == "egg-info": + name = base.rpartition(".")[0].partition("-")[0] + legacy_normalized = Prepared.legacy_normalize(name) + self.eggs[legacy_normalized].append(path.joinpath(child)) + + self.infos.freeze() + self.eggs.freeze() + + def search(self, prepared: Prepared): + """ + Yield all infos and eggs matching the Prepared query. + """ + infos = ( + self.infos[prepared.normalized] + if prepared + else itertools.chain.from_iterable(self.infos.values()) + ) + eggs = ( + self.eggs[prepared.legacy_normalized] + if prepared + else itertools.chain.from_iterable(self.eggs.values()) + ) + return itertools.chain(infos, eggs) + + +class Prepared: + """ + A prepared search query for metadata on a possibly-named package. + + Pre-calculates the normalization to prevent repeated operations. + + >>> none = Prepared(None) + >>> none.normalized + >>> none.legacy_normalized + >>> bool(none) + False + >>> sample = Prepared('Sample__Pkg-name.foo') + >>> sample.normalized + 'sample_pkg_name_foo' + >>> sample.legacy_normalized + 'sample__pkg_name.foo' + >>> bool(sample) + True + """ + + normalized = None + legacy_normalized = None + + def __init__(self, name: Optional[str]): + self.name = name + if name is None: + return + self.normalized = self.normalize(name) + self.legacy_normalized = self.legacy_normalize(name) + + @staticmethod + def normalize(name): + """ + PEP 503 normalization plus dashes as underscores. + """ + return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_') + + @staticmethod + def legacy_normalize(name): + """ + Normalize the package name as found in the convention in + older packaging tools versions and specs. + """ + return name.lower().replace('-', '_') + + def __bool__(self): + return bool(self.name) + + +@install +class MetadataPathFinder(NullFinder, DistributionFinder): + """A degenerate finder for distribution packages on the file system. + + This finder supplies only a find_distributions() method for versions + of Python that do not have a PathFinder find_distributions(). + """ + + @classmethod + def find_distributions( + cls, context=DistributionFinder.Context() + ) -> Iterable[PathDistribution]: + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + found = cls._search_paths(context.name, context.path) + return map(PathDistribution, found) + + @classmethod + def _search_paths(cls, name, paths): + """Find metadata directories in paths heuristically.""" + prepared = Prepared(name) + return itertools.chain.from_iterable( + path.search(prepared) for path in map(FastPath, paths) + ) + + @classmethod + def invalidate_caches(cls) -> None: + FastPath.__new__.cache_clear() + + +class PathDistribution(Distribution): + def __init__(self, path: SimplePath) -> None: + """Construct a distribution. + + :param path: SimplePath indicating the metadata directory. + """ + self._path = path + + def read_text(self, filename: str | os.PathLike[str]) -> Optional[str]: + with suppress( + FileNotFoundError, + IsADirectoryError, + KeyError, + NotADirectoryError, + PermissionError, + ): + return self._path.joinpath(filename).read_text(encoding='utf-8') + + return None + + read_text.__doc__ = Distribution.read_text.__doc__ + + def locate_file(self, path: str | os.PathLike[str]) -> SimplePath: + return self._path.parent / path + + @property + def _normalized_name(self): + """ + Performance optimization: where possible, resolve the + normalized name from the file system path. + """ + stem = os.path.basename(str(self._path)) + return ( + pass_none(Prepared.normalize)(self._name_from_stem(stem)) + or super()._normalized_name + ) + + @staticmethod + def _name_from_stem(stem): + """ + >>> PathDistribution._name_from_stem('foo-3.0.egg-info') + 'foo' + >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info') + 'CherryPy' + >>> PathDistribution._name_from_stem('face.egg-info') + 'face' + >>> PathDistribution._name_from_stem('foo.bar') + """ + filename, ext = os.path.splitext(stem) + if ext not in ('.dist-info', '.egg-info'): + return + name, sep, rest = filename.partition('-') + return name + + +def distribution(distribution_name: str) -> Distribution: + """Get the ``Distribution`` instance for the named package. + + :param distribution_name: The name of the distribution package as a string. + :return: A ``Distribution`` instance (or subclass thereof). + """ + return Distribution.from_name(distribution_name) + + +def distributions(**kwargs) -> Iterable[Distribution]: + """Get all ``Distribution`` instances in the current environment. + + :return: An iterable of ``Distribution`` instances. + """ + return Distribution.discover(**kwargs) + + +def metadata(distribution_name: str) -> _meta.PackageMetadata: + """Get the metadata for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: A PackageMetadata containing the parsed metadata. + """ + return Distribution.from_name(distribution_name).metadata + + +def version(distribution_name: str) -> str: + """Get the version string for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: The version string for the package as defined in the package's + "Version" metadata key. + """ + return distribution(distribution_name).version + + +_unique = functools.partial( + unique_everseen, + key=py39.normalized_name, +) +""" +Wrapper for ``distributions`` to return unique distributions by name. +""" + + +def entry_points(**params) -> EntryPoints: + """Return EntryPoint objects for all installed packages. + + Pass selection parameters (group or name) to filter the + result to entry points matching those properties (see + EntryPoints.select()). + + :return: EntryPoints for all installed packages. + """ + eps = itertools.chain.from_iterable( + dist.entry_points for dist in _unique(distributions()) + ) + return EntryPoints(eps).select(**params) + + +def files(distribution_name: str) -> Optional[List[PackagePath]]: + """Return a list of files for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: List of files composing the distribution. + """ + return distribution(distribution_name).files + + +def requires(distribution_name: str) -> Optional[List[str]]: + """ + Return a list of requirements for the named package. + + :return: An iterable of requirements, suitable for + packaging.requirement.Requirement. + """ + return distribution(distribution_name).requires + + +def packages_distributions() -> Mapping[str, List[str]]: + """ + Return a mapping of top-level packages to their + distributions. + + >>> import collections.abc + >>> pkgs = packages_distributions() + >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values()) + True + """ + pkg_to_dist = collections.defaultdict(list) + for dist in distributions(): + for pkg in _top_level_declared(dist) or _top_level_inferred(dist): + pkg_to_dist[pkg].append(dist.metadata['Name']) + return dict(pkg_to_dist) + + +def _top_level_declared(dist): + return (dist.read_text('top_level.txt') or '').split() + + +def _topmost(name: PackagePath) -> Optional[str]: + """ + Return the top-most parent as long as there is a parent. + """ + top, *rest = name.parts + return top if rest else None + + +def _get_toplevel_name(name: PackagePath) -> str: + """ + Infer a possibly importable module name from a name presumed on + sys.path. + + >>> _get_toplevel_name(PackagePath('foo.py')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo.pyc')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo/__init__.py')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo.pth')) + 'foo.pth' + >>> _get_toplevel_name(PackagePath('foo.dist-info')) + 'foo.dist-info' + """ + return _topmost(name) or ( + # python/typeshed#10328 + inspect.getmodulename(name) # type: ignore + or str(name) + ) + + +def _top_level_inferred(dist): + opt_names = set(map(_get_toplevel_name, always_iterable(dist.files))) + + def importable_name(name): + return '.' not in name + + return filter(importable_name, opt_names) diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bab993948e3709971deb4bbdccc002956db515c Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_adapters.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_adapters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecc5d80cfd406e3aab4da23e496196512360a145 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_adapters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_collections.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_collections.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6757dd62e777d5ec4352b7bb885181e5050ec322 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_collections.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_compat.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_compat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d716e586c491de3e0e12e350cdda67276916f73b Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_compat.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_functools.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_functools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd1fed4aa96b7f26ed649f521237c4ecd78d08f1 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_functools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_itertools.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_itertools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ddfe859dcda0d7bce4da281af523c2ba287ded2 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_itertools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_meta.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_meta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20d385ceafeb61e20e78adca6f58a410193045fc Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_meta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_text.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_text.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbd32ae3cf1c49626cbb9f0c18887a68c96c8b59 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/_text.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/__pycache__/diagnose.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/diagnose.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5772ff407ae945010cf512766283a155fc02ed2 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/__pycache__/diagnose.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/_adapters.py b/MLPY/Lib/site-packages/importlib_metadata/_adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..6223263ed53f22fc25c09de06789718d2cd3b6ea --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_adapters.py @@ -0,0 +1,83 @@ +import re +import textwrap +import email.message + +from ._text import FoldedCase + + +class Message(email.message.Message): + multiple_use_keys = set( + map( + FoldedCase, + [ + 'Classifier', + 'Obsoletes-Dist', + 'Platform', + 'Project-URL', + 'Provides-Dist', + 'Provides-Extra', + 'Requires-Dist', + 'Requires-External', + 'Supported-Platform', + 'Dynamic', + ], + ) + ) + """ + Keys that may be indicated multiple times per PEP 566. + """ + + def __new__(cls, orig: email.message.Message): + res = super().__new__(cls) + vars(res).update(vars(orig)) + return res + + def __init__(self, *args, **kwargs): + self._headers = self._repair_headers() + + # suppress spurious error from mypy + def __iter__(self): + return super().__iter__() + + def __getitem__(self, item): + """ + Override parent behavior to typical dict behavior. + + ``email.message.Message`` will emit None values for missing + keys. Typical mappings, including this ``Message``, will raise + a key error for missing keys. + + Ref python/importlib_metadata#371. + """ + res = super().__getitem__(item) + if res is None: + raise KeyError(item) + return res + + def _repair_headers(self): + def redent(value): + "Correct for RFC822 indentation" + if not value or '\n' not in value: + return value + return textwrap.dedent(' ' * 8 + value) + + headers = [(key, redent(value)) for key, value in vars(self)['_headers']] + if self._payload: + headers.append(('Description', self.get_payload())) + return headers + + @property + def json(self): + """ + Convert PackageMetadata to a JSON-compatible format + per PEP 0566. + """ + + def transform(key): + value = self.get_all(key) if key in self.multiple_use_keys else self[key] + if key == 'Keywords': + value = re.split(r'\s+', value) + tk = key.lower().replace('-', '_') + return tk, value + + return dict(map(transform, map(FoldedCase, self))) diff --git a/MLPY/Lib/site-packages/importlib_metadata/_collections.py b/MLPY/Lib/site-packages/importlib_metadata/_collections.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0954e1a30546d781bf25781ec716ef92a77e32 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_collections.py @@ -0,0 +1,30 @@ +import collections + + +# from jaraco.collections 3.3 +class FreezableDefaultDict(collections.defaultdict): + """ + Often it is desirable to prevent the mutation of + a default dict after its initial construction, such + as to prevent mutation during iteration. + + >>> dd = FreezableDefaultDict(list) + >>> dd[0].append('1') + >>> dd.freeze() + >>> dd[1] + [] + >>> len(dd) + 1 + """ + + def __missing__(self, key): + return getattr(self, '_frozen', super().__missing__)(key) + + def freeze(self): + self._frozen = lambda key: self.default_factory() + + +class Pair(collections.namedtuple('Pair', 'name value')): + @classmethod + def parse(cls, text): + return cls(*map(str.strip, text.split("=", 1))) diff --git a/MLPY/Lib/site-packages/importlib_metadata/_compat.py b/MLPY/Lib/site-packages/importlib_metadata/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..df312b1cbbf18a337278df0e618fb9e8c862e5f6 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_compat.py @@ -0,0 +1,57 @@ +import sys +import platform + + +__all__ = ['install', 'NullFinder'] + + +def install(cls): + """ + Class decorator for installation on sys.meta_path. + + Adds the backport DistributionFinder to sys.meta_path and + attempts to disable the finder functionality of the stdlib + DistributionFinder. + """ + sys.meta_path.append(cls()) + disable_stdlib_finder() + return cls + + +def disable_stdlib_finder(): + """ + Give the backport primacy for discovering path-based distributions + by monkey-patching the stdlib O_O. + + See #91 for more background for rationale on this sketchy + behavior. + """ + + def matches(finder): + return getattr( + finder, '__module__', None + ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') + + for finder in filter(matches, sys.meta_path): # pragma: nocover + del finder.find_distributions + + +class NullFinder: + """ + A "Finder" (aka "MetaPathFinder") that never finds any modules, + but may find distributions. + """ + + @staticmethod + def find_spec(*args, **kwargs): + return None + + +def pypy_partial(val): + """ + Adjust for variable stacklevel on partial under PyPy. + + Workaround for #327. + """ + is_pypy = platform.python_implementation() == 'PyPy' + return val + is_pypy diff --git a/MLPY/Lib/site-packages/importlib_metadata/_functools.py b/MLPY/Lib/site-packages/importlib_metadata/_functools.py new file mode 100644 index 0000000000000000000000000000000000000000..71f66bd03cb713a2190853bdf7170c4ea80d2425 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_functools.py @@ -0,0 +1,104 @@ +import types +import functools + + +# from jaraco.functools 3.3 +def method_cache(method, cache_wrapper=None): + """ + Wrap lru_cache to support storing the cache data in the object instances. + + Abstracts the common paradigm where the method explicitly saves an + underscore-prefixed protected property on first call and returns that + subsequently. + + >>> class MyClass: + ... calls = 0 + ... + ... @method_cache + ... def method(self, value): + ... self.calls += 1 + ... return value + + >>> a = MyClass() + >>> a.method(3) + 3 + >>> for x in range(75): + ... res = a.method(x) + >>> a.calls + 75 + + Note that the apparent behavior will be exactly like that of lru_cache + except that the cache is stored on each instance, so values in one + instance will not flush values from another, and when an instance is + deleted, so are the cached values for that instance. + + >>> b = MyClass() + >>> for x in range(35): + ... res = b.method(x) + >>> b.calls + 35 + >>> a.method(0) + 0 + >>> a.calls + 75 + + Note that if method had been decorated with ``functools.lru_cache()``, + a.calls would have been 76 (due to the cached value of 0 having been + flushed by the 'b' instance). + + Clear the cache with ``.cache_clear()`` + + >>> a.method.cache_clear() + + Same for a method that hasn't yet been called. + + >>> c = MyClass() + >>> c.method.cache_clear() + + Another cache wrapper may be supplied: + + >>> cache = functools.lru_cache(maxsize=2) + >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) + >>> a = MyClass() + >>> a.method2() + 3 + + Caution - do not subsequently wrap the method with another decorator, such + as ``@property``, which changes the semantics of the function. + + See also + http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ + for another implementation and additional justification. + """ + cache_wrapper = cache_wrapper or functools.lru_cache() + + def wrapper(self, *args, **kwargs): + # it's the first call, replace the method with a cached, bound method + bound_method = types.MethodType(method, self) + cached_method = cache_wrapper(bound_method) + setattr(self, method.__name__, cached_method) + return cached_method(*args, **kwargs) + + # Support cache clear even before cache has been created. + wrapper.cache_clear = lambda: None + + return wrapper + + +# From jaraco.functools 3.3 +def pass_none(func): + """ + Wrap func so it's not called if its first param is None + + >>> print_text = pass_none(print) + >>> print_text('text') + text + >>> print_text(None) + """ + + @functools.wraps(func) + def wrapper(param, *args, **kwargs): + if param is not None: + return func(param, *args, **kwargs) + + return wrapper diff --git a/MLPY/Lib/site-packages/importlib_metadata/_itertools.py b/MLPY/Lib/site-packages/importlib_metadata/_itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ca9b9140e3f085b36609bb8dfdaea79c78e144 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_itertools.py @@ -0,0 +1,73 @@ +from itertools import filterfalse + + +def unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + + +# copied from more_itertools 8.8 +def always_iterable(obj, base_type=(str, bytes)): + """If *obj* is iterable, return an iterator over its items:: + + >>> obj = (1, 2, 3) + >>> list(always_iterable(obj)) + [1, 2, 3] + + If *obj* is not iterable, return a one-item iterable containing *obj*:: + + >>> obj = 1 + >>> list(always_iterable(obj)) + [1] + + If *obj* is ``None``, return an empty iterable: + + >>> obj = None + >>> list(always_iterable(None)) + [] + + By default, binary and text strings are not considered iterable:: + + >>> obj = 'foo' + >>> list(always_iterable(obj)) + ['foo'] + + If *base_type* is set, objects for which ``isinstance(obj, base_type)`` + returns ``True`` won't be considered iterable. + + >>> obj = {'a': 1} + >>> list(always_iterable(obj)) # Iterate over the dict's keys + ['a'] + >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit + [{'a': 1}] + + Set *base_type* to ``None`` to avoid any special handling and treat objects + Python considers iterable as iterable: + + >>> obj = 'foo' + >>> list(always_iterable(obj, base_type=None)) + ['f', 'o', 'o'] + """ + if obj is None: + return iter(()) + + if (base_type is not None) and isinstance(obj, base_type): + return iter((obj,)) + + try: + return iter(obj) + except TypeError: + return iter((obj,)) diff --git a/MLPY/Lib/site-packages/importlib_metadata/_meta.py b/MLPY/Lib/site-packages/importlib_metadata/_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..1927d0f624d82f2fa12f81c80cce91279f039e84 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_meta.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import os +from typing import Protocol +from typing import Any, Dict, Iterator, List, Optional, TypeVar, Union, overload + + +_T = TypeVar("_T") + + +class PackageMetadata(Protocol): + def __len__(self) -> int: ... # pragma: no cover + + def __contains__(self, item: str) -> bool: ... # pragma: no cover + + def __getitem__(self, key: str) -> str: ... # pragma: no cover + + def __iter__(self) -> Iterator[str]: ... # pragma: no cover + + @overload + def get( + self, name: str, failobj: None = None + ) -> Optional[str]: ... # pragma: no cover + + @overload + def get(self, name: str, failobj: _T) -> Union[str, _T]: ... # pragma: no cover + + # overload per python/importlib_metadata#435 + @overload + def get_all( + self, name: str, failobj: None = None + ) -> Optional[List[Any]]: ... # pragma: no cover + + @overload + def get_all(self, name: str, failobj: _T) -> Union[List[Any], _T]: + """ + Return all values associated with a possibly multi-valued key. + """ + + @property + def json(self) -> Dict[str, Union[str, List[str]]]: + """ + A JSON-compatible form of the metadata. + """ + + +class SimplePath(Protocol): + """ + A minimal subset of pathlib.Path required by Distribution. + """ + + def joinpath( + self, other: Union[str, os.PathLike[str]] + ) -> SimplePath: ... # pragma: no cover + + def __truediv__( + self, other: Union[str, os.PathLike[str]] + ) -> SimplePath: ... # pragma: no cover + + @property + def parent(self) -> SimplePath: ... # pragma: no cover + + def read_text(self, encoding=None) -> str: ... # pragma: no cover + + def read_bytes(self) -> bytes: ... # pragma: no cover + + def exists(self) -> bool: ... # pragma: no cover diff --git a/MLPY/Lib/site-packages/importlib_metadata/_text.py b/MLPY/Lib/site-packages/importlib_metadata/_text.py new file mode 100644 index 0000000000000000000000000000000000000000..c88cfbb2349c6401336bc5ba6623f51afd1eb59d --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/_text.py @@ -0,0 +1,99 @@ +import re + +from ._functools import method_cache + + +# from jaraco.text 3.5 +class FoldedCase(str): + """ + A case insensitive string class; behaves just like str + except compares equal when the only variation is case. + + >>> s = FoldedCase('hello world') + + >>> s == 'Hello World' + True + + >>> 'Hello World' == s + True + + >>> s != 'Hello World' + False + + >>> s.index('O') + 4 + + >>> s.split('O') + ['hell', ' w', 'rld'] + + >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) + ['alpha', 'Beta', 'GAMMA'] + + Sequence membership is straightforward. + + >>> "Hello World" in [s] + True + >>> s in ["Hello World"] + True + + You may test for set inclusion, but candidate and elements + must both be folded. + + >>> FoldedCase("Hello World") in {s} + True + >>> s in {FoldedCase("Hello World")} + True + + String inclusion works as long as the FoldedCase object + is on the right. + + >>> "hello" in FoldedCase("Hello World") + True + + But not if the FoldedCase object is on the left: + + >>> FoldedCase('hello') in 'Hello World' + False + + In that case, use in_: + + >>> FoldedCase('hello').in_('Hello World') + True + + >>> FoldedCase('hello') > FoldedCase('Hello') + False + """ + + def __lt__(self, other): + return self.lower() < other.lower() + + def __gt__(self, other): + return self.lower() > other.lower() + + def __eq__(self, other): + return self.lower() == other.lower() + + def __ne__(self, other): + return self.lower() != other.lower() + + def __hash__(self): + return hash(self.lower()) + + def __contains__(self, other): + return super().lower().__contains__(other.lower()) + + def in_(self, other): + "Does self appear in other?" + return self in FoldedCase(other) + + # cache lower since it's likely to be called frequently. + @method_cache + def lower(self): + return super().lower() + + def index(self, sub): + return self.lower().index(sub.lower()) + + def split(self, splitter=' ', maxsplit=0): + pattern = re.compile(re.escape(splitter), re.I) + return pattern.split(self, maxsplit) diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/__init__.py b/MLPY/Lib/site-packages/importlib_metadata/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18650e65b7f59368babf205796aae101fa4edc6d Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py311.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py311.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98eed476460c8374a8f76c4af969e40653a5dc09 Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py311.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py39.cpython-39.pyc b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py39.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a11669f0f8f2f6fd03e06f57efb8433228e2922f Binary files /dev/null and b/MLPY/Lib/site-packages/importlib_metadata/compat/__pycache__/py39.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/py311.py b/MLPY/Lib/site-packages/importlib_metadata/compat/py311.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5327436f9b1d9eae371e321c491a270634b3cf --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/compat/py311.py @@ -0,0 +1,22 @@ +import os +import pathlib +import sys +import types + + +def wrap(path): # pragma: no cover + """ + Workaround for https://github.com/python/cpython/issues/84538 + to add backward compatibility for walk_up=True. + An example affected package is dask-labextension, which uses + jupyter-packaging to install JupyterLab javascript files outside + of site-packages. + """ + + def relative_to(root, *, walk_up=False): + return pathlib.Path(os.path.relpath(path, root)) + + return types.SimpleNamespace(relative_to=relative_to) + + +relative_fix = wrap if sys.version_info < (3, 12) else lambda x: x diff --git a/MLPY/Lib/site-packages/importlib_metadata/compat/py39.py b/MLPY/Lib/site-packages/importlib_metadata/compat/py39.py new file mode 100644 index 0000000000000000000000000000000000000000..1f15bd97e6aa028d3e86734dd08c0eb5c06d79bc --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/compat/py39.py @@ -0,0 +1,36 @@ +""" +Compatibility layer with Python 3.8/3.9 +""" + +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: # pragma: no cover + # Prevent circular imports on runtime. + from .. import Distribution, EntryPoint +else: + Distribution = EntryPoint = Any + + +def normalized_name(dist: Distribution) -> Optional[str]: + """ + Honor name normalization for distributions that don't provide ``_normalized_name``. + """ + try: + return dist._normalized_name + except AttributeError: + from .. import Prepared # -> delay to prevent circular imports. + + return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) + + +def ep_matches(ep: EntryPoint, **params) -> bool: + """ + Workaround for ``EntryPoint`` objects without the ``matches`` method. + """ + try: + return ep.matches(**params) + except AttributeError: + from .. import EntryPoint # -> delay to prevent circular imports. + + # Reconstruct the EntryPoint object to make sure it is compatible. + return EntryPoint(ep.name, ep.value, ep.group).matches(**params) diff --git a/MLPY/Lib/site-packages/importlib_metadata/diagnose.py b/MLPY/Lib/site-packages/importlib_metadata/diagnose.py new file mode 100644 index 0000000000000000000000000000000000000000..e405471ac4d94371b1ee9b1622227ff76b337180 --- /dev/null +++ b/MLPY/Lib/site-packages/importlib_metadata/diagnose.py @@ -0,0 +1,21 @@ +import sys + +from . import Distribution + + +def inspect(path): + print("Inspecting", path) + dists = list(Distribution.discover(path=[path])) + if not dists: + return + print("Found", len(dists), "packages:", end=' ') + print(', '.join(dist.name for dist in dists)) + + +def run(): + for path in sys.path: + inspect(path) + + +if __name__ == '__main__': + run() diff --git a/MLPY/Lib/site-packages/importlib_metadata/py.typed b/MLPY/Lib/site-packages/importlib_metadata/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/LICENSE.txt b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b36c4d0d378a9ed81a217672719c7692637bf87 --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/LICENSE.txt @@ -0,0 +1,551 @@ +Intel End User License Agreement for Developer Tools (Version October 2020) + +IMPORTANT NOTICE - PLEASE READ AND AGREE BEFORE DOWNLOADING, INSTALLING, COPYING +OR USING + +This Agreement is between you, or the company or other legal entity that you +represent and warrant you have the legal authority to bind, (each, "You" or +"Your") and Intel Corporation and its subsidiaries (collectively, "Intel") +regarding Your use of the Materials. By downloading, installing, copying or +otherwise using the Materials, You agree to be bound by the terms of this +Agreement. If You do not agree to the terms of this Agreement, or do not have +legal authority or required age to agree to them, do not download, install, copy +or otherwise use the Materials. + +1. LICENSE DEFINITIONS. + +A. "Cloud Provider" means a third party cloud service provider offering a + cloud-based platform, infrastructure, application or storage services, such + as Microsoft Azure or Amazon Web Services, which You may utilize solely + subject to the restrictions set forth in Section 3.3 B. + +B. "Computer" means a computer, workstation or server(s); as well as a container + or virtual machine located on Your or Your Cloud Provider's server. + +C. "Derivative Work" means a derivative work, as defined in 17 U.S.C. 101, of + the Source Code. + +D. "Executable Code" means computer programming code in binary form suitable for + machine execution by a processor without the intervening steps of + interpretation or compilation. + +E. "Instance" means a single running copy of the Materials on a Computer. + +F. "Licensed Patent Claims" mean the claims of Intel's patents that are + necessarily and directly infringed by the reproduction and distribution of + the Materials that is authorized in Section 3 below, when the Materials are + in their unmodified form as delivered by Intel to You and not modified or + combined with anything else. Licensed Patent Claims are only those claims + that Intel can license without paying, or getting the consent of, a third + party. + +G. "Materials" mean the software, documentation, the software product serial + number, and other collateral, including any updates, that are made available + to You by Intel under this Agreement. Materials include any Redistributables, + Executable Code, Source Code, Sample Source Code, and Pre-Release Materials, + but do not include Third Party Programs. + +H. "Microsoft Platforms" mean any current and future Microsoft operating system + products, Microsoft run-time technologies (such as the .NET Framework), and + Microsoft application platforms (such as Microsoft Office or Microsoft + Dynamics) that Microsoft offers. + +I. "Pre-Release Materials" mean the Materials, or portions of the Materials, + that are identified (in the product release notes, on Intel's download + website for the Materials or elsewhere) or labeled as pre-release, prototype, + alpha or beta code and, as such, are deemed to be pre-release code, which may + not be fully functional or tested and may contain bugs or errors, which Intel + may substantially modify in its development of a production version, and for + which Intel makes no assurances that it will ever develop or make generally + available a production version. Pre-Release Materials are subject to the + terms of Section 4.2. + +J. "Priority Support" means assistance through Intel's Online Service Center for + paid User Types for the Materials. + +K. "Reciprocal Open Source Software" means any software that is subject to a + license which requires that (a) it must be distributed in source code form; + (b) it must be licensed under the same open source license terms; and (c) its + derivative works must be licensed under the same open source license terms. + Examples of this type of license are the GNU General Public License or the + Mozilla Public License. + +L. "Redistributables" mean the files (if any) listed in the "redist.txt", + "redist-rt.txt" or similarly-named text files that may be included in the + Materials. Redistributables include Sample Source Code. + +M. "Sample Source Code" means those portions of the Materials that are Source + Code and are identified as sample code. Sample Source Code may not have been + tested nor validated by Intel and is provided purely as a programming + example. + +N. "Source Code" means the software portion of the Materials provided in human + readable format. + +O. "Term" means either a perpetual or a time limited term for the Materials + that You obtain as specified on Intel's download website, in Intel's + applicable documentation or as controlled by the serial number for the + Materials. + +P. "Third Party Programs" mean the files (if any) listed in the + "third-party-programs.txt" or other similarly-named text file that may be + included in the Materials for the applicable software. + +Q. "User Type" has the meaning specified in Section 2. + +R. "Your Product" means one or more applications, products or projects developed + by or for You using the Materials. + +2. USER TYPES. This Agreement covers both free and paid User Types. Free users + do not receive access to Priority Support. All paid users receive access to + Priority Support upon the payment of fees specified in Section 5. If you are + a paid user, then the type of license You receive will be specified in + writing by Intel directly or by an authorized Intel distributor. You + understand and agree that the following User Types described below are + subject to Your continued compliance with the license grants in Section 3: + +2.1 Free User. You are authorized to run as many Instances as needed for an + unlimited number of users for You, Your company or other legal entity that + you represent. + +2.2 Single Named-User. Subject to payment of appropriate fees, You are + authorized to run as many Instances as needed for a single user. + +2.3 Concurrent User. Subject to payment of appropriate fees, You are authorized + to run as many Instances as needed on a designated network(s) for use by no + more than the authorized number of concurrent users. + +2.4 Site User. Subject to payment of appropriate fees, You are authorized to run + as many Instances as needed for use by any number of concurrent users + located at the specified site or sites specified in the + "site_license_materials.txt" file you receive from Intel. + +3. LICENSE GRANTS. + +3.1 License to the Materials. + + Subject to the terms and conditions of this Agreement, Intel grants You for + the appropriate Term a non-exclusive, worldwide, non-assignable (except as + expressly permitted hereunder), non-sublicensable, limited right and license + for Your applicable User Type: + + A. under its copyrights, to: + + (1) reproduce internally a reasonable number of copies of the Materials for + Your personal or business use; + + (2) use the Materials internally solely for Your personal or business use to + develop Your Product, in accordance with the documentation or text files + included as part of the Materials; + + (3) modify or create Derivative Works of the Redistributables, or any portions, + that are provided to You in Source Code; + + (4) distribute (directly and through Your distributors, resellers, and other + channel partners, if applicable), the Redistributables, including any + modifications to or Derivative Works of the Redistributables made pursuant + to Section 3.1.A(3), or any portions, subject to the following conditions: + + (a) Any distribution of the Redistributables must only be as part of Your + Product which must add significant primary functionality different than + that of the Redistributables themselves; + + (b) You will redistribute the Redistributables originally provided to You by + Intel only in Executable Code subject to a license agreement that + prohibits disassembly and reverse engineering of the Redistributables; + + (c) This distribution right includes a limited right to sublicense only the + Intel copyrights in the Redistributables and only to the extent necessary + to perform, display, and distribute the Redistributables (including Your + modifications and Derivative Works) solely as incorporated in Your + Product; and + + (d) You (i) will be solely responsible to Your customers for any update, + support obligation or other liability which may arise from Your + distribution of Your Product, (ii) will not make any statement that Your + Product is "certified" or that its performance is guaranteed by Intel or + its suppliers, (iii) will not use Intel's or its suppliers' names or + trademarks to market Your Product without written permission from Intel, + (iv) will comply with any additional restrictions which are included in + the text files with the Redistributables and in Section 4 below, (v) will + indemnify, hold harmless, and defend Intel and its suppliers from and + against any claims or lawsuits, including attorney's fees, that arise or + result from Your modifications, Derivative Works or Your distribution of + Your Product; + + and + + B. under Intel's Licensed Patent Claims, to: + (1) make copies of the Materials only as specified in Section 3.1.A(1); + (2) use the Materials only as specified in Section 3.1.A(2); and + (3) offer to distribute, and distribute, but not sell, the Redistributables + only as part of Your Product under Intel's copyright license granted in + Section 3.1(A), but only under the terms of that copyright license and not + as a sale; + And, provided further, that the license under the Licensed Patent Claims does + not and will not apply to, and Intel expressly does not grant You a patent + license in this Agreement to, any modifications to, or Derivative Works of, the + Materials or Redistributables, whether made by You, Your contractor(s), Your + customer(s) (which, for all purposes under this Agreement, will mean either a + customer, reseller, distributor or other channel partner) or any third party, + even if the modifications or Derivative Works are permitted under 3.1.A(3). + +3.2 Third Party Programs and Other Intel Programs Licenses. Third Party + Programs, even if included with the distribution of the Materials, may be + governed by separate license terms, including without limitation, third + party license terms, open source software notices and terms, and/or other + Intel software license terms. These separate license terms solely govern + Your use of the Third Party Programs. + +3.3 Third Party Use. + +A. If you are an entity, Your contractors may use the Materials as specified in + Section 3, provided: (i) their use of the Materials is solely on behalf of + and in support of Your business, (ii) they agree to the terms and conditions + of this Agreement, and (iii) You are solely responsible for their use of the + Materials. + +B. You may utilize a Cloud Provider to host the Materials for You, provided: (i) + the Cloud Provider may only host the Materials for Your exclusive use and may + not use the Materials for any other purpose whatsoever, including the + restriction set forth in Section 4.1(xii); (ii) the Cloud Provider's use of + the Materials must be solely on behalf of and in support of Your Product, and + (iii) You will indemnify, hold harmless, and defend Intel and its suppliers + from and against any claims or lawsuits, including attorney's fees, that + arise or result from Your Cloud Provider's use, misuse or disclosure of the + Materials. + +4. LICENSE CONDITIONS. + +4.1 Restrictions. Except as expressly provided in this Agreement, You may NOT: + (i) use, copy, distribute, or publicly display the Materials; (ii) share, + publish, rent or lease the Materials to any third party; (iii) assign this + Agreement or transfer the Materials; (iv) modify, adapt, or translate the + Materials in whole or in part; (v) reverse engineer, decompile, or + disassemble the Materials, or otherwise attempt to derive the source code + for the software; (vi) work around any technical limitations in the + Materials or attempt to modify or tamper with the normal function of any + license manager that may regulate usage of the Materials; (vii) distribute, + sublicense or transfer any Source Code, of the Materials or Derivative Works + to any third party; (viii) allow Redistributables to run on a platform other + than a Microsoft Platform if according to the accompanying user + documentation the Materials are meant to execute only on a Microsoft + Platform; (ix) remove, minimize, block or modify any notices of Intel or its + suppliers in the Materials; (x) include the Redistributables in malicious, + deceptive, or unlawful programs or products or use the Materials in any way + that is against the law; (xi) modify, create a Derivative Work, link, or + distribute the Materials so that any part of it becomes Reciprocal Open + Source Software; (xii) use the Materials directly or indirectly for SaaS + services or service bureau purposes (i.e., a service that allows use of or + access to the Materials by a third party as a service, such as the + salesforce.com service business model). + +4.2 Pre-Release Materials. If You receive Pre-Release Materials, You may + reproduce a reasonable number of copies, and use the Pre-Release Materials + for evaluation, and testing purposes only. You may not (i) modify or + incorporate the Pre-Release Materials into Your Product; (ii) continue to + use the Pre-Release Materials once a commercial version is released; or + (iii) disclose to any third party any benchmarks, performance results, or + other information relating to the Pre-Release Materials. Intel may waive + these restrictions in writing at its sole discretion; however, if You decide + to use the Pre-Release Materials in Your Product (even with Intel's waiver), + You acknowledge and agree that You are fully responsible for any and all + issues that result. + +4.3 Safety, Critical, and Lifesaving Applications. The Materials may provide + information relevant to safety-critical applications to allow compliance + with functional safety standards or requirements ("Safety-Critical + Applications"). You understand and acknowledge that safety is Your + responsibility. To the extent You use the Materials to create, or as part + of, products used in Safety-Critical Applications it is Your + responsibility to design, manage and assure system-level safeguards to + anticipate, monitor and control system failures, and You agree that You are + solely responsible for all applicable regulatory standards and + safety-related requirements concerning Your use of the Materials in Safety + Critical Applications. Should You use the Materials for Safety-Critical + Applications or in any type of a system or application in which the failure + of the Materials could create a situation where personal injury or death may + occur (e.g., medical systems, life sustaining or lifesaving systems) + ("Lifesaving Applications"), You agree to indemnify, defend, and hold Intel + and its representatives harmless against all claims, costs, damages, and + expenses, including reasonable attorney fees arising in any way out of Your + use of the Materials in Safety-Critical Applications or Lifesaving + Applications and claims of product liability, personal injury or death + associated with those applications; even if such claims allege that Intel + was negligent or strictly liable regarding the design or manufacture of the + Materials or its failure to warn regarding the Materials. + +4.4 Media Format Codecs and Digital Rights Management. You acknowledge and agree + that Your use of the Materials or distribution of the Redistributables with + Your Product as permitted by this Agreement may require You to procure + license(s) from third parties that may hold intellectual property rights + applicable to any media decoding, encoding or transcoding technology (e.g., + the use of an audio or video codec) and/or digital rights management + capabilities of the Materials, if any. Should any such additional licenses + be required, You are solely responsible for obtaining any such licenses and + agree to obtain any such licenses at Your own expense. + +4.5 Materials Transfer. You may only permanently transfer the Materials, and all + of Your rights and obligations under this Agreement, to another party + ("Recipient") solely in conjunction with a change of ownership, merger, + acquisition, sale or transfer of all or substantially all of Your business + or assets, either voluntarily, by operation of law or otherwise subject to + the following: You must notify Intel of the transfer by sending a letter to + Intel: (i) identifying the Recipient and Your legal entities, (ii) + identifying the Materials (i.e., the specific Intel software and version) + and the associated serial numbers to be transferred, (iii) certifying that + You retain no copies of the Materials or portions, (iv) certifying that the + Recipient has agreed in writing to be bound by all of the terms and + conditions of this Agreement, (v) for paid User Types listed in Section 2, + certifying that the Recipient has been notified that in order to receive + support from Intel for the Materials they must notify Intel in writing of + the transfer and provide Intel with the information specified in subsection + (ii) above along with the name and email address of the individual assigned + to use the Materials, and (vi) providing Your email address so that Intel + may confirm receipt of Your letter. The above information can be emailed to + your Intel representative or by letter to: Intel Corporation, 2111 NE 25th + Avenue, Hillsboro, OR 97124, Attn: CPDP Contracts Management, JF2-28. The + Materials will be permanently transferred to the Recipient once Intel + confirms receipt of Your request. + +5. FEES; TAXES. + +5.1 Fees. Upon Your receipt of Intel's or its reseller's invoice, You will pay + Intel or its reseller the license and support fees, if any, for the + Materials in US dollars according to Your User Type + +5.2 Taxes. All payments will be made free and clear without deduction for any + and all present and future taxes imposed by any taxing authority. In the + event that You are prohibited by law from making such payments unless You + deduct or withhold taxes therefrom and remit such taxes to the local taxing + jurisdiction, then You will duly withhold and remit such taxes to the + appropriate taxing authority and will pay to Intel or its reseller its + proportionate share of the remaining net amount after the taxes have been + withheld. You will promptly furnish Intel or its reseller with a copy of an + official tax receipt or other appropriate evidence of any taxes imposed on + payments made under this Agreement, including taxes on any additional + amounts paid. In cases other than taxes referred to above, including but not + limited to sales and use taxes, stamp taxes, value added taxes, property + taxes and other taxes or duties imposed by any taxing authority on or with + respect to this Agreement, the costs of such taxes or duties will be borne + by You. In the event that such taxes or duties are legally imposed initially + on Intel or its reseller, or Intel or its reseller is later assessed by any + taxing authority, then Intel or its reseller will be promptly reimbursed by + You for such taxes or duties. + +6. DATA COLLECTION AND PRIVACY. + +6.1 Data Collection. Certain Materials may generate and collect anonymous data + and/or provisioning data about the Materials and/or the development + environment and transmit the data to Intel as a one-time event during + installation. Optional data may also be collected by the Materials, however, + You will be provided notice of the request to collect optional data and no + optional data will be collected without Your consent. All data collection by + Intel is performed pursuant to relevant privacy laws, including notice and + consent requirements. + +6.2 Intel's Privacy Notice. Intel is committed to respecting Your privacy. To + learn more about Intel's privacy practices, please visit + http://www.intel.com/privacy. + +7. OWNERSHIP. Title to the Materials and all copies remain with Intel or its + suppliers. The Materials are protected by intellectual property rights, + including without limitation, United States copyright laws and international + treaty provisions. You will not remove any copyright or other proprietary + notices from the Materials. You agree to prevent any unauthorized copying of + the Materials. Except as expressly provided herein, no license or right is + granted to You directly or by implication, inducement, estoppel or otherwise; + specifically Intel does not grant any express or implied right to You under + Intel patents, copyrights, trademarks, or trade secrets. + +8. NO WARRANTY AND NO SUPPORT. + +8.1 No Warranty. Disclaimer. Intel disclaims all warranties of any kind and the + terms and remedies provided in this Agreement are instead of any other + warranty or condition, express, implied or statutory, including those + regarding merchantability, fitness for any particular purpose, + non-infringement or any warranty arising out of any course of dealing, usage + of trade, proposal, specification or sample. Intel does not assume (and does + not authorize any person to assume on its behalf) any other liability. + +8.2 No Support; Priority Support for Paid User Types. Intel may make changes to + the Materials, or to items referenced therein, at any time without notice, + but is not obligated to support, update or provide training for the + Materials under the terms of this Agreement. Intel offers Priority Support + for paid User Types. + +9. LIMITATION OF LIABILITY. + +9.1 Intel will not be liable for any of the following losses or damages + (whether such losses or damages were foreseen, foreseeable, known or + otherwise): (i) loss of revenue; (ii) loss of actual or anticipated profits; + (iii) loss of the use of money; (iv) loss of anticipated savings; (v) loss + of business; (vi) loss of opportunity; (vii) loss of goodwill; (viii) loss + of use of the Materials; (ix) loss of reputation; (x) loss of, damage to, or + corruption of data; or (xi) any indirect, incidental special or + consequential loss of damage however caused (including loss or damage of the + type specified in this Section 9). + +9.2 Intel's total cumulative liability to You, including for direct damages for + claims relating to this Agreement (whether for breach of contract, + negligence, or for any other reason), will not exceed the sum paid to Intel + by You in the twelve (12) month period preceding the date such claim arose + for the Materials that are the subject of and directly affected by such + claim. + +9.3 You acknowledge that the limitations of liability provided in this Section 9 + are an essential part of this Agreement. You agree that the limitations of + liability provided in this Agreement with respect to Intel will be conveyed + to and made binding upon any customer of Yours that acquires the + Redistributables, alone or in combination with other items from You. + +10. USER SUBMISSIONS. This Agreement does not obligate You to provide Intel with + materials, information, comments, suggestions or other communications + regarding the Materials. However, You agree that any material, information, + comments, suggestions or other communications You transmit or post to an + Intel website (including but not limited to, submissions to the Priority + Support and/or other customer support websites or online portals) or provide + to Intel under this Agreement are not controlled by the International + Traffic in Arms Regulations (ITAR) or the Export Administration Regulation + (EAR), and if related to the features, functions, performance or use of the + Materials are deemed non-confidential and non-proprietary + ("Communications"). Intel will have no obligations with respect to the + Communications. You hereby grant to Intel a non-exclusive, perpetual, + irrevocable, royalty-free, copyright license to copy, modify, create + Derivative Works, publicly display, disclose, distribute, license and + sublicense through multiple tiers of distribution and licensees, incorporate + and otherwise use the Communications and all data, images, sounds, text, and + other things embodied therein, including Derivative Works thereto, for any + and all commercial or non-commercial purposes. You are prohibited from + posting or transmitting to or from an Intel website or providing to Intel + any unlawful, threatening, libelous, defamatory, obscene, pornographic, or + other material that would violate any law. If You wish to provide Intel with + information that You intend to be treated as confidential information, Intel + requires that such confidential information be provided pursuant to a + non-disclosure agreement ("NDA"); please contact Your Intel representative + to ensure the proper NDA is in place. + + Nothing in this Agreement will be construed as preventing Intel from + reviewing Your Communications and errors or defects in Intel products + discovered while reviewing Your Communications. Furthermore, nothing in this + Agreement will be construed as preventing Intel from implementing + independently-developed enhancements to Intel's own error diagnosis + methodology to detect errors or defects in Intel products discovered while + reviewing Your Communications or to implement bug fixes or enhancements in + Intel products. The foregoing may include the right to include Your + Communications in regression test suites. + +11. NON-DISCLOSURE. Information provided by Intel to You may include information + marked as confidential. You must treat such information as confidential + under the terms of the applicable NDA between Intel and You. If You have not + entered into an NDA with Intel, You must not disclose, distribute or make + use of any information marked as confidential, except as expressly + authorized in writing by Intel. Intel retains all rights in and to its + confidential information specifications, designs, engineering details, + discoveries, inventions, patents, copyrights, trademarks, trade secrets and + other proprietary rights relating to the Materials. Any breach by You of + the confidentiality obligations provided for in this Section 11 will cause + irreparable injury to Intel for which money damages may be inadequate to + compensate Intel for losses arising from such a breach. Intel may obtain + equitable relief, including injunctive relief, if You breach or threaten to + breach Your confidentiality obligations. + +12. TERM AND TERMINATION. This Agreement becomes effective on the date You + accept this Agreement and will continue until terminated as provided for in + this Agreement. If You are using the Materials under a paid User Type with a + limited Term, this Agreement terminates without notice on the last day of + the Term. If you are using the Materials under a free User Type, the Term is + perpetual. The Term for any Pre-Release Materials terminates upon release of + a commercial version. Intel may terminate this Agreement if You are in + breach of any of its terms and conditions and such breach is not cured + within thirty (30) days of written notice from Intel. Upon termination, You + will promptly destroy the Materials and all copies. In the event of + termination of this Agreement, the license grant to any Redistributables + distributed by You in accordance with the terms and conditions of this + Agreement, prior to the effective date of such termination, will survive any + such termination of this Agreement. Sections 1, 3.1.A(4)(d)(v), 3.2, 3.3 + B(iii), 4.3, 5.2, 6, 7, 8, 9, 10, 11, 12 (with respect to these survival + provisions in the last sentence), 13, and 14 will survive expiration or + termination of this Agreement. + +13. U.S. GOVERNMENT RESTRICTED RIGHTS. The technical data and computer software + covered by this license is a "Commercial Item," as such term is defined by + the FAR 2.101 (48 C.F.R. 2.101) and is "commercial computer software" and + "commercial computer software documentation" as specified under FAR 12.212 + (48 C.F.R. 12.212) or DFARS 227.7202 (48 C.F.R. 227.7202), as applicable. + This commercial computer software and related documentation is provided to + end users for use by and on behalf of the U.S. Government, with only those + rights as are granted to all other end users pursuant to the terms and + conditions of this Agreement. + +14. GENERAL PROVISIONS. + +14.1 ENTIRE AGREEMENT. This Agreement contains the complete and exclusive + agreement and understanding between the parties concerning the subject + matter of this Agreement, and supersedes all prior and contemporaneous + proposals, agreements, understanding, negotiations, representations, + warranties, conditions, and communications, oral or written, between the + parties relating to the same subject matter. This Agreement, including + without limitation its termination, has no effect on any signed NDA between + the parties, which remain in full force and effect as separate agreements + to their terms. Each party acknowledges and agrees that in entering into + this Agreement it has not relied on, and will not be entitled to rely on, + any oral or written representations, warranties, conditions, understanding, + or communications between the parties that are not expressly set forth in + this Agreement. The express provisions of this Agreement control over any + course of performance, course of dealing, or usage of the trade + inconsistent with any of the provisions of this Agreement. The provisions + of this Agreement will prevail notwithstanding any different, conflicting, + or additional provisions that may appear on any purchase order, + acknowledgement, invoice, or other writing issued by either party in + connection with this Agreement. No modification or amendment to this + Agreement will be effective unless in writing and signed by authorized + representatives of each party, and must specifically identify this + Agreement by its title and version (e.g., "Intel oneAPI End User License + Agreement (Version October 2020)"); except that Intel may make changes to + the Agreement as it distributes new versions of the Materials. When changes + are made, Intel will make a new version of the Agreement available on its + website. If You received a copy of this Agreement translated into another + language, the English language version of this Agreement will prevail in + the event of any conflict between versions. + +14.2 EXPORT. You acknowledge that the Materials and all related technical + information are subject to export controls and you agree to comply with all + laws and regulations of the United States and other applicable governments + governing export, re-export, import, transfer, distribution, and use of the + Materials. In particular, but without limitation, the Materials may not be + exported or re-exported (a) into any U.S. embargoed countries or (b) to any + person or entity listed on a denial order published by the U.S. government + or any other applicable governments. By using the Materials, You represent + and warrant that You are not located in any such country or on any such + list. You also agree that You will not use the Materials for, or sell or + transfer them to a third party who is known or suspected to be involved in, + any purposes prohibited by the U.S. government or other applicable + governments, including, without limitation, the development, design, + manufacture, or production of nuclear, missile, chemical or biological + weapons. + +14.3 GOVERNING LAW, JURISDICTION, AND VENUE. All disputes arising out of or + related to this Agreement, whether based on contract, tort, or any other + legal or equitable theory, will in all respects be governed by, and + construed and interpreted under, the laws of the United States of America + and the State of Delaware, without reference to conflict of laws + principles. The parties agree that the United Nations Convention on + Contracts for the International Sale of Goods (1980) is specifically + excluded from and will not apply to this Agreement. All disputes arising + out of or related to this Agreement, whether based on contract, tort, or + any other legal or equitable theory, will be subject to the exclusive + jurisdiction of the courts of the State of Delaware or of the Federal + courts sitting in that State. Each party submits to the personal + jurisdiction of those courts and waives all objections to that + jurisdiction and venue for those disputes. + +14.4 SEVERABILITY. The parties intend that if a court holds that any provision + or part of this Agreement is invalid or unenforceable under applicable law, + the court will modify the provision to the minimum extent necessary to make + it valid and enforceable, or if it cannot be made valid and enforceable, + the parties intend that the court will sever and delete the provision or + part from this Agreement. Any change to or deletion of a provision or part + of this Agreement under this Section will not affect the validity or + enforceability of the remainder of this Agreement, which will continue in + full force and effect. diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/METADATA b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..6523cbde32b3c69c667741442e703c839893ae00 --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/METADATA @@ -0,0 +1,23 @@ +Metadata-Version: 2.1 +Name: intel-openmp +Version: 2021.4.0 +Summary: Intel OpenMP* Runtime Library +Home-page: https://software.intel.com/content/www/us/en/develop/tools/compilers/c-compilers.html +Author: Intel Corporation +Author-email: scripting@intel.com +License: Intel End User License Agreement for Developer Tools +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Operating System :: Microsoft :: Windows +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: System Administrators +Classifier: Intended Audience :: Other Audience +Classifier: Intended Audience :: Science/Research +Classifier: Topic :: Software Development :: Libraries +Classifier: License :: Other/Proprietary License +Description-Content-Type: text/markdown + +Intel OpenMP* Runtime Library x86_64 dynamic libraries for Windows*. Intel OpenMP* Runtime Library provides OpenMP API specification support in Intel® C Compiler, Intel® C++ Compiler and Intel® Fortran Compiler. It helps to improve performance by creating multithreaded software using shared memory and running on multi-core processor systems. + + diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/RECORD b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d7b77e17e88d5a849ec26d6c2aea60212b60fa3e --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/RECORD @@ -0,0 +1,14 @@ +../../Library/bin/libiomp5md.dll,sha256=fb4y0HpDmWr7wQnT7w1Jw6GrHhSxBpAyP1Px8963bbs,1942464 +../../Library/bin/libiomp5md.pdb,sha256=PxBCo9RzZlcs42QqJjxeiN7WTsmIixwJmDlDq4JmuVQ,4329472 +../../Library/bin/libiompstubs5md.dll,sha256=tP_TBupONZFtW3qlxmp09G6rbMDnXvFoYhIOJ8qni3g,39872 +../../Library/bin/libomp-fallback-cstring.spv,sha256=5ZWrqt2Sbb-3JiZ-yBuDP81B-TvzAC0YUyGb4omPoNs,348 +../../Library/bin/omptarget.dll,sha256=r5JmJN2yrHJqo8XK5szUFBwTmOjBzvLqA0AITMkBKBA,671680 +../../Library/bin/omptarget.rtl.level0.dll,sha256=kOdnTUmh31L3w2fhXkizKoC6wK7G2gKiMe_BicvZT9o,1460160 +../../Library/bin/omptarget.rtl.opencl.dll,sha256=VEl1YgJ3yVhhcZV5R_YQT-pOY_ukFEl7Lb1negoTS6I,1522616 +../../Library/lib/libomp-fallback-cstring.obj,sha256=wRQM_QB2US_pB82rdLzDbMMR9EnbFnoPnP19Q4s7Ovw,9359 +intel_openmp-2021.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +intel_openmp-2021.4.0.dist-info/LICENSE.txt,sha256=rC4Sj1Honhsc9UnOXr_ywlXx5UL1HbyhVwkyqJgP8xs,34478 +intel_openmp-2021.4.0.dist-info/METADATA,sha256=yRKSXtapM5Hb0uR3pETZtaCMrle9Xip3nkGqTECo5wE,1201 +intel_openmp-2021.4.0.dist-info/RECORD,, +intel_openmp-2021.4.0.dist-info/WHEEL,sha256=8LoR7XFRBQAclYSv48VVSgW4Rfczh8dfa-4Q4WIotnk,128 +intel_openmp-2021.4.0.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/WHEEL b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..278b0906fc77288b5b3f2b8af89a5a4a43f672dc --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-win_amd64 +Tag: py3-none-win_amd64 + diff --git a/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/MLPY/Lib/site-packages/intel_openmp-2021.4.0.dist-info/top_level.txt @@ -0,0 +1 @@ + diff --git a/MLPY/Lib/site-packages/isapi/PyISAPI_loader.dll b/MLPY/Lib/site-packages/isapi/PyISAPI_loader.dll new file mode 100644 index 0000000000000000000000000000000000000000..584cdbb85dbc4852e7d249668927464d09abc211 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/PyISAPI_loader.dll differ diff --git a/MLPY/Lib/site-packages/isapi/README.txt b/MLPY/Lib/site-packages/isapi/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc528624fef2c5eaca545dbd4e19aff986ba6b29 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/README.txt @@ -0,0 +1,7 @@ +A Python ISAPI extension. Contributed by Phillip Frantz, and is +Copyright 2002-2003 by Blackdog Software Pty Ltd. + +See the 'samples' directory, and particularly samples\README.txt + +You can find documentation in the PyWin32.chm file that comes with pywin32 - +you can open this from Pythonwin->Help, or from the start menu. \ No newline at end of file diff --git a/MLPY/Lib/site-packages/isapi/__init__.py b/MLPY/Lib/site-packages/isapi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71823616aef86dae3e2e6a8c5f131fda00cd2c0f --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/__init__.py @@ -0,0 +1,39 @@ +# The Python ISAPI package. + + +# Exceptions thrown by the DLL framework. +class ISAPIError(Exception): + def __init__(self, errno, strerror=None, funcname=None): + # named attributes match IOError etc. + self.errno = errno + self.strerror = strerror + self.funcname = funcname + Exception.__init__(self, errno, strerror, funcname) + + def __str__(self): + if self.strerror is None: + try: + import win32api + + self.strerror = win32api.FormatMessage(self.errno).strip() + except: + self.strerror = "no error message is available" + # str() looks like a win32api error. + return str((self.errno, self.strerror, self.funcname)) + + +class FilterError(ISAPIError): + pass + + +class ExtensionError(ISAPIError): + pass + + +# A little development aid - a filter or extension callback function can +# raise one of these exceptions, and the handler module will be reloaded. +# This means you can change your code without restarting IIS. +# After a reload, your filter/extension will have the GetFilterVersion/ +# GetExtensionVersion function called, but with None as the first arg. +class InternalReloadException(Exception): + pass diff --git a/MLPY/Lib/site-packages/isapi/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8427727cfc57cb7e7cee8629921e1cadf7167ff5 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/__pycache__/install.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/__pycache__/install.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56cc4ed2cd2f71cb94f5ab1a48060ee3af77267c Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/__pycache__/install.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/__pycache__/isapicon.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/__pycache__/isapicon.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982792b81170889afdbb0b779e4accd11e346316 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/__pycache__/isapicon.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/__pycache__/simple.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/__pycache__/simple.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06a5aa3c62fd2b18f62def5cd4eb94c01bf94b89 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/__pycache__/simple.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/__pycache__/threaded_extension.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/__pycache__/threaded_extension.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f24f455ee1df107e602c933a6c4a89178a6aa08 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/__pycache__/threaded_extension.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/doc/isapi.html b/MLPY/Lib/site-packages/isapi/doc/isapi.html new file mode 100644 index 0000000000000000000000000000000000000000..03001a1beb14f70bba35eba161f6224797c82c0d --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/doc/isapi.html @@ -0,0 +1,92 @@ + + + +Introduction to Python ISAPI support + +

Introduction to Python ISAPI support

+ +

See also

+ +

Note: if you are viewing this documentation directly from disk, +most links in this document will fail - you can also find this document in the +CHM file that comes with pywin32, where the links will work + +

Introduction

+This documents Python support for hosting ISAPI exensions and filters inside +Microsoft Internet Information Server (IIS). It assumes a basic understanding +of the ISAPI filter and extension mechanism. +

+In summary, to implement a filter or extension, you provide a Python module +which defines a Filter and/or Extension class. Once your class has been +loaded, IIS/ISAPI will, via an extension DLL, call methods on your class. +

+A filter and a class instance need only provide 3 methods - for filters they +are called GetFilterVersion, HttpFilterProc and +TerminateFilter. For extensions they +are named GetExtensionVersion, HttpExtensionProc and +TerminateExtension. If you are familiar with writing ISAPI +extensions in C/C++, these names and their purpose will be familiar. +

+Most of the work is done in the HttpFilterProc and +HttpExtensionProc methods. These both take a single +parameter - an HTTP_FILTER_CONTEXT and +EXTENSION_CONTROL_BLOCK +object respectively. +

+In addition to these components, there is an 'isapi' package, containing +support facilities (base-classes, exceptions, etc) which can be leveraged +by the extension. + +

Base classes

+There are a number of base classes provided to make writing extensions a little +simpler. Of particular note is isapi.threaded_extension.ThreadPoolExtension. +This implements a thread-pool and informs IIS that the request is progressing +in the background. Your sub-class need only provide a Dispatch +method, which is called on one of the worker threads rather than the thread +that the request came in on. +

+There is base-class for a filter in isapi.simple, but there is no +equivilent threaded filter - filters work under a different model, where +background processing is not possible. +

Samples

+Please see the isapi/samples directory for some sample filters +and extensions. + +

Implementation

+A Python ISAPI filter extension consists of 2 main components: +
    +
  • A DLL used by ISAPI to interface with Python.
  • +
  • A Python script used by that DLL to implement the filter or extension +functionality
  • +
+ +

Extension DLL

+The DLL is usually managed automatically by the isapi.install module. As the +Python script for the extension is installed, a generic DLL provided with +the isapi package is installed next to the script, and IIS configured to +use this DLL. +

+The name of the DLL always has the same base name as the Python script, but +with a leading underscore (_), and an extension of .dll. For example, the +sample "redirector.py" will, when installed, have "_redirector.dll" created +in the same directory. +

+The Python script may provide 2 entry points - methods named __FilterFactory__ +and __ExtensionFactory__, both taking no arguments and returning a filter or +extension object. + +

Using py2exe and the isapi package

+You can instruct py2exe to create a 'frozen' Python ISAPI filter/extension. +In this case, py2exe will create a package with everything you need in one +directory, and the Python source file embedded in the .zip file. +

+In general, you will want to build a seperate installation executable along +with the ISAPI extension. This executable will be built from the same script. +See the ISAPI sample in the py2exe distribution. diff --git a/MLPY/Lib/site-packages/isapi/install.py b/MLPY/Lib/site-packages/isapi/install.py new file mode 100644 index 0000000000000000000000000000000000000000..154f82aff3bfabb28def9d01380b54be2273e7e8 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/install.py @@ -0,0 +1,815 @@ +"""Installation utilities for Python ISAPI filters and extensions.""" + +# this code adapted from "Tomcat JK2 ISAPI redirector", part of Apache +# Created July 2004, Mark Hammond. +import imp +import os +import shutil +import stat +import sys +import traceback + +import pythoncom +import win32api +import winerror +from win32com.client import Dispatch, GetObject +from win32com.client.gencache import EnsureDispatch, EnsureModule + +_APP_INPROC = 0 +_APP_OUTPROC = 1 +_APP_POOLED = 2 +_IIS_OBJECT = "IIS://LocalHost/W3SVC" +_IIS_SERVER = "IIsWebServer" +_IIS_WEBDIR = "IIsWebDirectory" +_IIS_WEBVIRTUALDIR = "IIsWebVirtualDir" +_IIS_FILTERS = "IIsFilters" +_IIS_FILTER = "IIsFilter" + +_DEFAULT_SERVER_NAME = "Default Web Site" +_DEFAULT_HEADERS = "X-Powered-By: Python" +_DEFAULT_PROTECTION = _APP_POOLED + +# Default is for 'execute' only access - ie, only the extension +# can be used. This can be overridden via your install script. +_DEFAULT_ACCESS_EXECUTE = True +_DEFAULT_ACCESS_READ = False +_DEFAULT_ACCESS_WRITE = False +_DEFAULT_ACCESS_SCRIPT = False +_DEFAULT_CONTENT_INDEXED = False +_DEFAULT_ENABLE_DIR_BROWSING = False +_DEFAULT_ENABLE_DEFAULT_DOC = False + +_extensions = [ext for ext, _, _ in imp.get_suffixes()] +is_debug_build = "_d.pyd" in _extensions + +this_dir = os.path.abspath(os.path.dirname(__file__)) + + +class FilterParameters: + Name = None + Description = None + Path = None + Server = None + # Params that control if/how AddExtensionFile is called. + AddExtensionFile = True + AddExtensionFile_Enabled = True + AddExtensionFile_GroupID = None # defaults to Name + AddExtensionFile_CanDelete = True + AddExtensionFile_Description = None # defaults to Description. + + def __init__(self, **kw): + self.__dict__.update(kw) + + +class VirtualDirParameters: + Name = None # Must be provided. + Description = None # defaults to Name + AppProtection = _DEFAULT_PROTECTION + Headers = _DEFAULT_HEADERS + Path = None # defaults to WWW root. + Type = _IIS_WEBVIRTUALDIR + AccessExecute = _DEFAULT_ACCESS_EXECUTE + AccessRead = _DEFAULT_ACCESS_READ + AccessWrite = _DEFAULT_ACCESS_WRITE + AccessScript = _DEFAULT_ACCESS_SCRIPT + ContentIndexed = _DEFAULT_CONTENT_INDEXED + EnableDirBrowsing = _DEFAULT_ENABLE_DIR_BROWSING + EnableDefaultDoc = _DEFAULT_ENABLE_DEFAULT_DOC + DefaultDoc = None # Only set in IIS if not None + ScriptMaps = [] + ScriptMapUpdate = "end" # can be 'start', 'end', 'replace' + Server = None + + def __init__(self, **kw): + self.__dict__.update(kw) + + def is_root(self): + "This virtual directory is a root directory if parent and name are blank" + parent, name = self.split_path() + return not parent and not name + + def split_path(self): + return split_path(self.Name) + + +class ScriptMapParams: + Extension = None + Module = None + Flags = 5 + Verbs = "" + # Params that control if/how AddExtensionFile is called. + AddExtensionFile = True + AddExtensionFile_Enabled = True + AddExtensionFile_GroupID = None # defaults to Name + AddExtensionFile_CanDelete = True + AddExtensionFile_Description = None # defaults to Description. + + def __init__(self, **kw): + self.__dict__.update(kw) + + def __str__(self): + "Format this parameter suitable for IIS" + items = [self.Extension, self.Module, self.Flags] + # IIS gets upset if there is a trailing verb comma, but no verbs + if self.Verbs: + items.append(self.Verbs) + items = [str(item) for item in items] + return ",".join(items) + + +class ISAPIParameters: + ServerName = _DEFAULT_SERVER_NAME + # Description = None + Filters = [] + VirtualDirs = [] + + def __init__(self, **kw): + self.__dict__.update(kw) + + +verbose = 1 # The level - 0 is quiet. + + +def log(level, what): + if verbose >= level: + print(what) + + +# Convert an ADSI COM exception to the Win32 error code embedded in it. +def _GetWin32ErrorCode(com_exc): + hr = com_exc.hresult + # If we have more details in the 'excepinfo' struct, use it. + if com_exc.excepinfo: + hr = com_exc.excepinfo[-1] + if winerror.HRESULT_FACILITY(hr) != winerror.FACILITY_WIN32: + raise + return winerror.SCODE_CODE(hr) + + +class InstallationError(Exception): + pass + + +class ItemNotFound(InstallationError): + pass + + +class ConfigurationError(InstallationError): + pass + + +def FindPath(options, server, name): + if name.lower().startswith("iis://"): + return name + else: + if name and name[0] != "/": + name = "/" + name + return FindWebServer(options, server) + "/ROOT" + name + + +def LocateWebServerPath(description): + """ + Find an IIS web server whose name or comment matches the provided + description (case-insensitive). + + >>> LocateWebServerPath('Default Web Site') # doctest: +SKIP + + or + + >>> LocateWebServerPath('1') #doctest: +SKIP + """ + assert len(description) >= 1, "Server name or comment is required" + iis = GetObject(_IIS_OBJECT) + description = description.lower().strip() + for site in iis: + # Name is generally a number, but no need to assume that. + site_attributes = [ + getattr(site, attr, "").lower().strip() + for attr in ("Name", "ServerComment") + ] + if description in site_attributes: + return site.AdsPath + msg = "No web sites match the description '%s'" % description + raise ItemNotFound(msg) + + +def GetWebServer(description=None): + """ + Load the web server instance (COM object) for a given instance + or description. + If None is specified, the default website is retrieved (indicated + by the identifier 1. + """ + description = description or "1" + path = LocateWebServerPath(description) + server = LoadWebServer(path) + return server + + +def LoadWebServer(path): + try: + server = GetObject(path) + except pythoncom.com_error as details: + msg = details.strerror + if exc.excepinfo and exc.excepinfo[2]: + msg = exc.excepinfo[2] + msg = "WebServer %s: %s" % (path, msg) + raise ItemNotFound(msg) + return server + + +def FindWebServer(options, server_desc): + """ + Legacy function to allow options to define a .server property + to override the other parameter. Use GetWebServer instead. + """ + # options takes precedence + server_desc = options.server or server_desc + # make sure server_desc is unicode (could be mbcs if passed in + # sys.argv). + if server_desc and not isinstance(server_desc, str): + server_desc = server_desc.decode("mbcs") + + # get the server (if server_desc is None, the default site is acquired) + server = GetWebServer(server_desc) + return server.adsPath + + +def split_path(path): + """ + Get the parent path and basename. + + >>> split_path('/') + ['', ''] + + >>> split_path('') + ['', ''] + + >>> split_path('foo') + ['', 'foo'] + + >>> split_path('/foo') + ['', 'foo'] + + >>> split_path('/foo/bar') + ['/foo', 'bar'] + + >>> split_path('foo/bar') + ['/foo', 'bar'] + """ + + if not path.startswith("/"): + path = "/" + path + return path.rsplit("/", 1) + + +def _CreateDirectory(iis_dir, name, params): + # We used to go to lengths to keep an existing virtual directory + # in place. However, in some cases the existing directories got + # into a bad state, and an update failed to get them working. + # So we nuke it first. If this is a problem, we could consider adding + # a --keep-existing option. + try: + # Also seen the Class change to a generic IISObject - so nuke + # *any* existing object, regardless of Class + assert name.strip("/"), "mustn't delete the root!" + iis_dir.Delete("", name) + log(2, "Deleted old directory '%s'" % (name,)) + except pythoncom.com_error: + pass + + newDir = iis_dir.Create(params.Type, name) + log(2, "Creating new directory '%s' in %s..." % (name, iis_dir.Name)) + + friendly = params.Description or params.Name + newDir.AppFriendlyName = friendly + + # Note that the new directory won't be visible in the IIS UI + # unless the directory exists on the filesystem. + try: + path = params.Path or iis_dir.Path + newDir.Path = path + except AttributeError: + # If params.Type is IIS_WEBDIRECTORY, an exception is thrown + pass + newDir.AppCreate2(params.AppProtection) + # XXX - note that these Headers only work in IIS6 and earlier. IIS7 + # only supports them on the w3svc node - not even on individial sites, + # let alone individual extensions in the site! + if params.Headers: + newDir.HttpCustomHeaders = params.Headers + + log(2, "Setting directory options...") + newDir.AccessExecute = params.AccessExecute + newDir.AccessRead = params.AccessRead + newDir.AccessWrite = params.AccessWrite + newDir.AccessScript = params.AccessScript + newDir.ContentIndexed = params.ContentIndexed + newDir.EnableDirBrowsing = params.EnableDirBrowsing + newDir.EnableDefaultDoc = params.EnableDefaultDoc + if params.DefaultDoc is not None: + newDir.DefaultDoc = params.DefaultDoc + newDir.SetInfo() + return newDir + + +def CreateDirectory(params, options): + _CallHook(params, "PreInstall", options) + if not params.Name: + raise ConfigurationError("No Name param") + parent, name = params.split_path() + target_dir = GetObject(FindPath(options, params.Server, parent)) + + if not params.is_root(): + target_dir = _CreateDirectory(target_dir, name, params) + + AssignScriptMaps(params.ScriptMaps, target_dir, params.ScriptMapUpdate) + + _CallHook(params, "PostInstall", options, target_dir) + log(1, "Configured Virtual Directory: %s" % (params.Name,)) + return target_dir + + +def AssignScriptMaps(script_maps, target, update="replace"): + """Updates IIS with the supplied script map information. + + script_maps is a list of ScriptMapParameter objects + + target is an IIS Virtual Directory to assign the script maps to + + update is a string indicating how to update the maps, one of ('start', + 'end', or 'replace') + """ + # determine which function to use to assign script maps + script_map_func = "_AssignScriptMaps" + update.capitalize() + try: + script_map_func = eval(script_map_func) + except NameError: + msg = "Unknown ScriptMapUpdate option '%s'" % update + raise ConfigurationError(msg) + # use the str method to format the script maps for IIS + script_maps = [str(s) for s in script_maps] + # call the correct function + script_map_func(target, script_maps) + target.SetInfo() + + +def get_unique_items(sequence, reference): + "Return items in sequence that can't be found in reference." + return tuple([item for item in sequence if item not in reference]) + + +def _AssignScriptMapsReplace(target, script_maps): + target.ScriptMaps = script_maps + + +def _AssignScriptMapsEnd(target, script_maps): + unique_new_maps = get_unique_items(script_maps, target.ScriptMaps) + target.ScriptMaps = target.ScriptMaps + unique_new_maps + + +def _AssignScriptMapsStart(target, script_maps): + unique_new_maps = get_unique_items(script_maps, target.ScriptMaps) + target.ScriptMaps = unique_new_maps + target.ScriptMaps + + +def CreateISAPIFilter(filterParams, options): + server = FindWebServer(options, filterParams.Server) + _CallHook(filterParams, "PreInstall", options) + try: + filters = GetObject(server + "/Filters") + except pythoncom.com_error as exc: + # Brand new sites don't have the '/Filters' collection - create it. + # Any errors other than 'not found' we shouldn't ignore. + if ( + winerror.HRESULT_FACILITY(exc.hresult) != winerror.FACILITY_WIN32 + or winerror.HRESULT_CODE(exc.hresult) != winerror.ERROR_PATH_NOT_FOUND + ): + raise + server_ob = GetObject(server) + filters = server_ob.Create(_IIS_FILTERS, "Filters") + filters.FilterLoadOrder = "" + filters.SetInfo() + + # As for VirtualDir, delete an existing one. + assert filterParams.Name.strip("/"), "mustn't delete the root!" + try: + filters.Delete(_IIS_FILTER, filterParams.Name) + log(2, "Deleted old filter '%s'" % (filterParams.Name,)) + except pythoncom.com_error: + pass + newFilter = filters.Create(_IIS_FILTER, filterParams.Name) + log(2, "Created new ISAPI filter...") + assert os.path.isfile(filterParams.Path) + newFilter.FilterPath = filterParams.Path + newFilter.FilterDescription = filterParams.Description + newFilter.SetInfo() + load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b] + if filterParams.Name not in load_order: + load_order.append(filterParams.Name) + filters.FilterLoadOrder = ",".join(load_order) + filters.SetInfo() + _CallHook(filterParams, "PostInstall", options, newFilter) + log(1, "Configured Filter: %s" % (filterParams.Name,)) + return newFilter + + +def DeleteISAPIFilter(filterParams, options): + _CallHook(filterParams, "PreRemove", options) + server = FindWebServer(options, filterParams.Server) + ob_path = server + "/Filters" + try: + filters = GetObject(ob_path) + except pythoncom.com_error as details: + # failure to open the filters just means a totally clean IIS install + # (IIS5 at least has no 'Filters' key when freshly installed). + log(2, "ISAPI filter path '%s' did not exist." % (ob_path,)) + return + try: + assert filterParams.Name.strip("/"), "mustn't delete the root!" + filters.Delete(_IIS_FILTER, filterParams.Name) + log(2, "Deleted ISAPI filter '%s'" % (filterParams.Name,)) + except pythoncom.com_error as details: + rc = _GetWin32ErrorCode(details) + if rc != winerror.ERROR_PATH_NOT_FOUND: + raise + log(2, "ISAPI filter '%s' did not exist." % (filterParams.Name,)) + # Remove from the load order + load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b] + if filterParams.Name in load_order: + load_order.remove(filterParams.Name) + filters.FilterLoadOrder = ",".join(load_order) + filters.SetInfo() + _CallHook(filterParams, "PostRemove", options) + log(1, "Deleted Filter: %s" % (filterParams.Name,)) + + +def _AddExtensionFile(module, def_groupid, def_desc, params, options): + group_id = params.AddExtensionFile_GroupID or def_groupid + desc = params.AddExtensionFile_Description or def_desc + try: + ob = GetObject(_IIS_OBJECT) + ob.AddExtensionFile( + module, + params.AddExtensionFile_Enabled, + group_id, + params.AddExtensionFile_CanDelete, + desc, + ) + log(2, "Added extension file '%s' (%s)" % (module, desc)) + except (pythoncom.com_error, AttributeError) as details: + # IIS5 always fails. Probably should upgrade this to + # complain more loudly if IIS6 fails. + log(2, "Failed to add extension file '%s': %s" % (module, details)) + + +def AddExtensionFiles(params, options): + """Register the modules used by the filters/extensions as a trusted + 'extension module' - required by the default IIS6 security settings.""" + # Add each module only once. + added = {} + for vd in params.VirtualDirs: + for smp in vd.ScriptMaps: + if smp.Module not in added and smp.AddExtensionFile: + _AddExtensionFile(smp.Module, vd.Name, vd.Description, smp, options) + added[smp.Module] = True + + for fd in params.Filters: + if fd.Path not in added and fd.AddExtensionFile: + _AddExtensionFile(fd.Path, fd.Name, fd.Description, fd, options) + added[fd.Path] = True + + +def _DeleteExtensionFileRecord(module, options): + try: + ob = GetObject(_IIS_OBJECT) + ob.DeleteExtensionFileRecord(module) + log(2, "Deleted extension file record for '%s'" % module) + except (pythoncom.com_error, AttributeError) as details: + log(2, "Failed to remove extension file '%s': %s" % (module, details)) + + +def DeleteExtensionFileRecords(params, options): + deleted = {} # only remove each .dll once. + for vd in params.VirtualDirs: + for smp in vd.ScriptMaps: + if smp.Module not in deleted and smp.AddExtensionFile: + _DeleteExtensionFileRecord(smp.Module, options) + deleted[smp.Module] = True + + for filter_def in params.Filters: + if filter_def.Path not in deleted and filter_def.AddExtensionFile: + _DeleteExtensionFileRecord(filter_def.Path, options) + deleted[filter_def.Path] = True + + +def CheckLoaderModule(dll_name): + suffix = "" + if is_debug_build: + suffix = "_d" + template = os.path.join(this_dir, "PyISAPI_loader" + suffix + ".dll") + if not os.path.isfile(template): + raise ConfigurationError("Template loader '%s' does not exist" % (template,)) + # We can't do a simple "is newer" check, as the DLL is specific to the + # Python version. So we check the date-time and size are identical, + # and skip the copy in that case. + src_stat = os.stat(template) + try: + dest_stat = os.stat(dll_name) + except os.error: + same = 0 + else: + same = ( + src_stat[stat.ST_SIZE] == dest_stat[stat.ST_SIZE] + and src_stat[stat.ST_MTIME] == dest_stat[stat.ST_MTIME] + ) + if not same: + log(2, "Updating %s->%s" % (template, dll_name)) + shutil.copyfile(template, dll_name) + shutil.copystat(template, dll_name) + else: + log(2, "%s is up to date." % (dll_name,)) + + +def _CallHook(ob, hook_name, options, *extra_args): + func = getattr(ob, hook_name, None) + if func is not None: + args = (ob, options) + extra_args + func(*args) + + +def Install(params, options): + _CallHook(params, "PreInstall", options) + for vd in params.VirtualDirs: + CreateDirectory(vd, options) + + for filter_def in params.Filters: + CreateISAPIFilter(filter_def, options) + + AddExtensionFiles(params, options) + + _CallHook(params, "PostInstall", options) + + +def RemoveDirectory(params, options): + if params.is_root(): + return + try: + directory = GetObject(FindPath(options, params.Server, params.Name)) + except pythoncom.com_error as details: + rc = _GetWin32ErrorCode(details) + if rc != winerror.ERROR_PATH_NOT_FOUND: + raise + log(2, "VirtualDirectory '%s' did not exist" % params.Name) + directory = None + if directory is not None: + # Be robust should IIS get upset about unloading. + try: + directory.AppUnLoad() + except: + exc_val = sys.exc_info()[1] + log(2, "AppUnLoad() for %s failed: %s" % (params.Name, exc_val)) + # Continue trying to delete it. + try: + parent = GetObject(directory.Parent) + parent.Delete(directory.Class, directory.Name) + log(1, "Deleted Virtual Directory: %s" % (params.Name,)) + except: + exc_val = sys.exc_info()[1] + log(1, "Failed to remove directory %s: %s" % (params.Name, exc_val)) + + +def RemoveScriptMaps(vd_params, options): + "Remove script maps from the already installed virtual directory" + parent, name = vd_params.split_path() + target_dir = GetObject(FindPath(options, vd_params.Server, parent)) + installed_maps = list(target_dir.ScriptMaps) + for _map in map(str, vd_params.ScriptMaps): + if _map in installed_maps: + installed_maps.remove(_map) + target_dir.ScriptMaps = installed_maps + target_dir.SetInfo() + + +def Uninstall(params, options): + _CallHook(params, "PreRemove", options) + + DeleteExtensionFileRecords(params, options) + + for vd in params.VirtualDirs: + _CallHook(vd, "PreRemove", options) + + RemoveDirectory(vd, options) + if vd.is_root(): + # if this is installed to the root virtual directory, we can't delete it + # so remove the script maps. + RemoveScriptMaps(vd, options) + + _CallHook(vd, "PostRemove", options) + + for filter_def in params.Filters: + DeleteISAPIFilter(filter_def, options) + _CallHook(params, "PostRemove", options) + + +# Patch up any missing module names in the params, replacing them with +# the DLL name that hosts this extension/filter. +def _PatchParamsModule(params, dll_name, file_must_exist=True): + if file_must_exist: + if not os.path.isfile(dll_name): + raise ConfigurationError("%s does not exist" % (dll_name,)) + + # Patch up all references to the DLL. + for f in params.Filters: + if f.Path is None: + f.Path = dll_name + for d in params.VirtualDirs: + for sm in d.ScriptMaps: + if sm.Module is None: + sm.Module = dll_name + + +def GetLoaderModuleName(mod_name, check_module=None): + # find the name of the DLL hosting us. + # By default, this is "_{module_base_name}.dll" + if hasattr(sys, "frozen"): + # What to do? The .dll knows its name, but this is likely to be + # executed via a .exe, which does not know. + base, ext = os.path.splitext(mod_name) + path, base = os.path.split(base) + # handle the common case of 'foo.exe'/'foow.exe' + if base.endswith("w"): + base = base[:-1] + # For py2exe, we have '_foo.dll' as the standard pyisapi loader - but + # 'foo.dll' is what we use (it just delegates). + # So no leading '_' on the installed name. + dll_name = os.path.abspath(os.path.join(path, base + ".dll")) + else: + base, ext = os.path.splitext(mod_name) + path, base = os.path.split(base) + dll_name = os.path.abspath(os.path.join(path, "_" + base + ".dll")) + # Check we actually have it. + if check_module is None: + check_module = not hasattr(sys, "frozen") + if check_module: + CheckLoaderModule(dll_name) + return dll_name + + +# Note the 'log' params to these 'builtin' args - old versions of pywin32 +# didn't log at all in this function (by intent; anyone calling this was +# responsible). So existing code that calls this function with the old +# signature (ie, without a 'log' param) still gets the same behaviour as +# before... + + +def InstallModule(conf_module_name, params, options, log=lambda *args: None): + "Install the extension" + if not hasattr(sys, "frozen"): + conf_module_name = os.path.abspath(conf_module_name) + if not os.path.isfile(conf_module_name): + raise ConfigurationError("%s does not exist" % (conf_module_name,)) + + loader_dll = GetLoaderModuleName(conf_module_name) + _PatchParamsModule(params, loader_dll) + Install(params, options) + log(1, "Installation complete.") + + +def UninstallModule(conf_module_name, params, options, log=lambda *args: None): + "Remove the extension" + loader_dll = GetLoaderModuleName(conf_module_name, False) + _PatchParamsModule(params, loader_dll, False) + Uninstall(params, options) + log(1, "Uninstallation complete.") + + +standard_arguments = { + "install": InstallModule, + "remove": UninstallModule, +} + + +def build_usage(handler_map): + docstrings = [handler.__doc__ for handler in handler_map.values()] + all_args = dict(zip(iter(handler_map.keys()), docstrings)) + arg_names = "|".join(iter(all_args.keys())) + usage_string = "%prog [options] [" + arg_names + "]\n" + usage_string += "commands:\n" + for arg, desc in all_args.items(): + usage_string += " %-10s: %s" % (arg, desc) + "\n" + return usage_string[:-1] + + +def MergeStandardOptions(options, params): + """ + Take an options object generated by the command line and merge + the values into the IISParameters object. + """ + pass + + +# We support 2 ways of extending our command-line/install support. +# * Many of the installation items allow you to specify "PreInstall", +# "PostInstall", "PreRemove" and "PostRemove" hooks +# All hooks are called with the 'params' object being operated on, and +# the 'optparser' options for this session (ie, the command-line options) +# PostInstall for VirtualDirectories and Filters both have an additional +# param - the ADSI object just created. +# * You can pass your own option parser for us to use, and/or define a map +# with your own custom arg handlers. It is a map of 'arg'->function. +# The function is called with (options, log_fn, arg). The function's +# docstring is used in the usage output. +def HandleCommandLine( + params, + argv=None, + conf_module_name=None, + default_arg="install", + opt_parser=None, + custom_arg_handlers={}, +): + """Perform installation or removal of an ISAPI filter or extension. + + This module handles standard command-line options and configuration + information, and installs, removes or updates the configuration of an + ISAPI filter or extension. + + You must pass your configuration information in params - all other + arguments are optional, and allow you to configure the installation + process. + """ + global verbose + from optparse import OptionParser + + argv = argv or sys.argv + if not conf_module_name: + conf_module_name = sys.argv[0] + # convert to a long name so that if we were somehow registered with + # the "short" version but unregistered with the "long" version we + # still work (that will depend on exactly how the installer was + # started) + try: + conf_module_name = win32api.GetLongPathName(conf_module_name) + except win32api.error as exc: + log( + 2, + "Couldn't determine the long name for %r: %s" % (conf_module_name, exc), + ) + + if opt_parser is None: + # Build our own parser. + parser = OptionParser(usage="") + else: + # The caller is providing their own filter, presumably with their + # own options all setup. + parser = opt_parser + + # build a usage string if we don't have one. + if not parser.get_usage(): + all_handlers = standard_arguments.copy() + all_handlers.update(custom_arg_handlers) + parser.set_usage(build_usage(all_handlers)) + + # allow the user to use uninstall as a synonym for remove if it wasn't + # defined by the custom arg handlers. + all_handlers.setdefault("uninstall", all_handlers["remove"]) + + parser.add_option( + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="don't print status messages to stdout", + ) + parser.add_option( + "-v", + "--verbosity", + action="count", + dest="verbose", + default=1, + help="increase the verbosity of status messages", + ) + parser.add_option( + "", + "--server", + action="store", + help="Specifies the IIS server to install/uninstall on." + " Default is '%s/1'" % (_IIS_OBJECT,), + ) + + (options, args) = parser.parse_args(argv[1:]) + MergeStandardOptions(options, params) + verbose = options.verbose + if not args: + args = [default_arg] + try: + for arg in args: + handler = all_handlers[arg] + handler(conf_module_name, params, options, log) + except (ItemNotFound, InstallationError) as details: + if options.verbose > 1: + traceback.print_exc() + print("%s: %s" % (details.__class__.__name__, details)) + except KeyError: + parser.error("Invalid arg '%s'" % arg) diff --git a/MLPY/Lib/site-packages/isapi/isapicon.py b/MLPY/Lib/site-packages/isapi/isapicon.py new file mode 100644 index 0000000000000000000000000000000000000000..20de1a44c29bd912876505d4cfba72cd44563059 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/isapicon.py @@ -0,0 +1,120 @@ +"""Constants needed by ISAPI filters and extensions.""" +# ====================================================================== +# Copyright 2002-2003 by Blackdog Software Pty Ltd. +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Blackdog Software not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# BLACKDOG SOFTWARE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT SHALL BLACKDOG SOFTWARE BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + +# HTTP reply codes + +HTTP_CONTINUE = 100 +HTTP_SWITCHING_PROTOCOLS = 101 +HTTP_PROCESSING = 102 +HTTP_OK = 200 +HTTP_CREATED = 201 +HTTP_ACCEPTED = 202 +HTTP_NON_AUTHORITATIVE = 203 +HTTP_NO_CONTENT = 204 +HTTP_RESET_CONTENT = 205 +HTTP_PARTIAL_CONTENT = 206 +HTTP_MULTI_STATUS = 207 +HTTP_MULTIPLE_CHOICES = 300 +HTTP_MOVED_PERMANENTLY = 301 +HTTP_MOVED_TEMPORARILY = 302 +HTTP_SEE_OTHER = 303 +HTTP_NOT_MODIFIED = 304 +HTTP_USE_PROXY = 305 +HTTP_TEMPORARY_REDIRECT = 307 +HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 +HTTP_PAYMENT_REQUIRED = 402 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 +HTTP_METHOD_NOT_ALLOWED = 405 +HTTP_NOT_ACCEPTABLE = 406 +HTTP_PROXY_AUTHENTICATION_REQUIRED = 407 +HTTP_REQUEST_TIME_OUT = 408 +HTTP_CONFLICT = 409 +HTTP_GONE = 410 +HTTP_LENGTH_REQUIRED = 411 +HTTP_PRECONDITION_FAILED = 412 +HTTP_REQUEST_ENTITY_TOO_LARGE = 413 +HTTP_REQUEST_URI_TOO_LARGE = 414 +HTTP_UNSUPPORTED_MEDIA_TYPE = 415 +HTTP_RANGE_NOT_SATISFIABLE = 416 +HTTP_EXPECTATION_FAILED = 417 +HTTP_UNPROCESSABLE_ENTITY = 422 +HTTP_INTERNAL_SERVER_ERROR = 500 +HTTP_NOT_IMPLEMENTED = 501 +HTTP_BAD_GATEWAY = 502 +HTTP_SERVICE_UNAVAILABLE = 503 +HTTP_GATEWAY_TIME_OUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +HTTP_VARIANT_ALSO_VARIES = 506 + +HSE_STATUS_SUCCESS = 1 +HSE_STATUS_SUCCESS_AND_KEEP_CONN = 2 +HSE_STATUS_PENDING = 3 +HSE_STATUS_ERROR = 4 + +SF_NOTIFY_SECURE_PORT = 0x00000001 +SF_NOTIFY_NONSECURE_PORT = 0x00000002 +SF_NOTIFY_READ_RAW_DATA = 0x00008000 +SF_NOTIFY_PREPROC_HEADERS = 0x00004000 +SF_NOTIFY_AUTHENTICATION = 0x00002000 +SF_NOTIFY_URL_MAP = 0x00001000 +SF_NOTIFY_ACCESS_DENIED = 0x00000800 +SF_NOTIFY_SEND_RESPONSE = 0x00000040 +SF_NOTIFY_SEND_RAW_DATA = 0x00000400 +SF_NOTIFY_LOG = 0x00000200 +SF_NOTIFY_END_OF_REQUEST = 0x00000080 +SF_NOTIFY_END_OF_NET_SESSION = 0x00000100 + +SF_NOTIFY_ORDER_HIGH = 0x00080000 +SF_NOTIFY_ORDER_MEDIUM = 0x00040000 +SF_NOTIFY_ORDER_LOW = 0x00020000 +SF_NOTIFY_ORDER_DEFAULT = SF_NOTIFY_ORDER_LOW + +SF_NOTIFY_ORDER_MASK = ( + SF_NOTIFY_ORDER_HIGH | SF_NOTIFY_ORDER_MEDIUM | SF_NOTIFY_ORDER_LOW +) + +SF_STATUS_REQ_FINISHED = 134217728 # 0x8000000 +SF_STATUS_REQ_FINISHED_KEEP_CONN = 134217728 + 1 +SF_STATUS_REQ_NEXT_NOTIFICATION = 134217728 + 2 +SF_STATUS_REQ_HANDLED_NOTIFICATION = 134217728 + 3 +SF_STATUS_REQ_ERROR = 134217728 + 4 +SF_STATUS_REQ_READ_NEXT = 134217728 + 5 + +HSE_IO_SYNC = 0x00000001 # for WriteClient +HSE_IO_ASYNC = 0x00000002 # for WriteClient/TF/EU +HSE_IO_DISCONNECT_AFTER_SEND = 0x00000004 # for TF +HSE_IO_SEND_HEADERS = 0x00000008 # for TF +HSE_IO_NODELAY = 0x00001000 # turn off nagling +# These two are only used by VectorSend +HSE_IO_FINAL_SEND = 0x00000010 +HSE_IO_CACHE_RESPONSE = 0x00000020 + +HSE_EXEC_URL_NO_HEADERS = 0x02 +HSE_EXEC_URL_IGNORE_CURRENT_INTERCEPTOR = 0x04 +HSE_EXEC_URL_IGNORE_VALIDATION_AND_RANGE = 0x10 +HSE_EXEC_URL_DISABLE_CUSTOM_ERROR = 0x20 +HSE_EXEC_URL_SSI_CMD = 0x40 +HSE_EXEC_URL_HTTP_CACHE_ELIGIBLE = 0x80 diff --git a/MLPY/Lib/site-packages/isapi/samples/README.txt b/MLPY/Lib/site-packages/isapi/samples/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..cff875873aa92ba1a00ff43e27873b7b1afec1d4 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/README.txt @@ -0,0 +1,20 @@ +In this directory you will find examples of ISAPI filters and extensions. + +The filter loading mechanism works like this: +* IIS loads the special Python "loader" DLL. This DLL will generally have a + leading underscore as part of its name. +* This loader DLL looks for a Python module, by removing the first letter of + the DLL base name. + +This means that an ISAPI extension module consists of 2 key files - the loader +DLL (eg, "_MyIISModule.dll", and a Python module (which for this example +would be "MyIISModule.py") + +When you install an ISAPI extension, the installation code checks to see if +there is a loader DLL for your implementation file - if one does not exist, +or the standard loader is different, it is copied and renamed accordingly. + +We use this mechanism to provide the maximum separation between different +Python extensions installed on the same server - otherwise filter order and +other tricky IIS semantics would need to be replicated. Also, each filter +gets its own thread-pool, etc. diff --git a/MLPY/Lib/site-packages/isapi/samples/__pycache__/advanced.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/samples/__pycache__/advanced.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71106d0f5dc37f189f7fbb644d2abb467c7f5217 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/samples/__pycache__/advanced.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c18cfa39458130e63a95cfe00ec5232145459d5b Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_asynch.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_asynch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..017488682fdbce392cc4e815299f24d583f711ca Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_asynch.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_with_filter.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_with_filter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a1c2f751ace921de04f85a7d7a66f2a46af83cc Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/samples/__pycache__/redirector_with_filter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/samples/__pycache__/test.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/samples/__pycache__/test.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..242fe619ab3bb4b80f75d82ddfd2907e9d14c975 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/samples/__pycache__/test.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/samples/advanced.py b/MLPY/Lib/site-packages/isapi/samples/advanced.py new file mode 100644 index 0000000000000000000000000000000000000000..c10d0c80c9a4fcee6e0de94cb03cc8b4f48c2824 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/advanced.py @@ -0,0 +1,218 @@ +# This extension demonstrates some advanced features of the Python ISAPI +# framework. +# We demonstrate: +# * Reloading your Python module without shutting down IIS (eg, when your +# .py implementation file changes.) +# * Custom command-line handling - both additional options and commands. +# * Using a query string - any part of the URL after a '?' is assumed to +# be "variable names" separated by '&' - we will print the values of +# these server variables. +# * If the tail portion of the URL is "ReportUnhealthy", IIS will be +# notified we are unhealthy via a HSE_REQ_REPORT_UNHEALTHY request. +# Whether this is acted upon depends on if the IIS health-checking +# tools are installed, but you should always see the reason written +# to the Windows event log - see the IIS documentation for more. + +import os +import stat +import sys + +from isapi import isapicon +from isapi.simple import SimpleExtension + +if hasattr(sys, "isapidllhandle"): + import win32traceutil + +# Notes on reloading +# If your HttpFilterProc or HttpExtensionProc functions raises +# 'isapi.InternalReloadException', the framework will not treat it +# as an error but instead will terminate your extension, reload your +# extension module, re-initialize the instance, and re-issue the request. +# The Initialize functions are called with None as their param. The +# return code from the terminate function is ignored. +# +# This is all the framework does to help you. It is up to your code +# when you raise this exception. This sample uses a Win32 "find +# notification". Whenever windows tells us one of the files in the +# directory has changed, we check if the time of our source-file has +# changed, and set a flag. Next imcoming request, we check the flag and +# raise the special exception if set. +# +# The end result is that the module is automatically reloaded whenever +# the source-file changes - you need take no further action to see your +# changes reflected in the running server. + +# The framework only reloads your module - if you have libraries you +# depend on and also want reloaded, you must arrange for this yourself. +# One way of doing this would be to special case the import of these +# modules. Eg: +# -- +# try: +# my_module = reload(my_module) # module already imported - reload it +# except NameError: +# import my_module # first time around - import it. +# -- +# When your module is imported for the first time, the NameError will +# be raised, and the module imported. When the ISAPI framework reloads +# your module, the existing module will avoid the NameError, and allow +# you to reload that module. + +import threading + +import win32con +import win32event +import win32file +import winerror + +from isapi import InternalReloadException + +try: + reload_counter += 1 +except NameError: + reload_counter = 0 + + +# A watcher thread that checks for __file__ changing. +# When it detects it, it simply sets "change_detected" to true. +class ReloadWatcherThread(threading.Thread): + def __init__(self): + self.change_detected = False + self.filename = __file__ + if self.filename.endswith("c") or self.filename.endswith("o"): + self.filename = self.filename[:-1] + self.handle = win32file.FindFirstChangeNotification( + os.path.dirname(self.filename), + False, # watch tree? + win32con.FILE_NOTIFY_CHANGE_LAST_WRITE, + ) + threading.Thread.__init__(self) + + def run(self): + last_time = os.stat(self.filename)[stat.ST_MTIME] + while 1: + try: + rc = win32event.WaitForSingleObject(self.handle, win32event.INFINITE) + win32file.FindNextChangeNotification(self.handle) + except win32event.error as details: + # handle closed - thread should terminate. + if details.winerror != winerror.ERROR_INVALID_HANDLE: + raise + break + this_time = os.stat(self.filename)[stat.ST_MTIME] + if this_time != last_time: + print("Detected file change - flagging for reload.") + self.change_detected = True + last_time = this_time + + def stop(self): + win32file.FindCloseChangeNotification(self.handle) + + +# The ISAPI extension - handles requests in our virtual dir, and sends the +# response to the client. +class Extension(SimpleExtension): + "Python advanced sample Extension" + + def __init__(self): + self.reload_watcher = ReloadWatcherThread() + self.reload_watcher.start() + + def HttpExtensionProc(self, ecb): + # NOTE: If you use a ThreadPoolExtension, you must still perform + # this check in HttpExtensionProc - raising the exception from + # The "Dispatch" method will just cause the exception to be + # rendered to the browser. + if self.reload_watcher.change_detected: + print("Doing reload") + raise InternalReloadException + + url = ecb.GetServerVariable("UNICODE_URL") + if url.endswith("ReportUnhealthy"): + ecb.ReportUnhealthy("I'm a little sick") + + ecb.SendResponseHeaders("200 OK", "Content-Type: text/html\r\n\r\n", 0) + print("", file=ecb) + + qs = ecb.GetServerVariable("QUERY_STRING") + if qs: + queries = qs.split("&") + print("

", file=ecb)
+            for q in queries:
+                val = ecb.GetServerVariable(q, "<no such variable>")
+                print("%s=%r" % (q, val), file=ecb)
+            print("

", file=ecb) + + print("This module has been imported", file=ecb) + print("%d times" % (reload_counter,), file=ecb) + print("", file=ecb) + ecb.close() + return isapicon.HSE_STATUS_SUCCESS + + def TerminateExtension(self, status): + self.reload_watcher.stop() + + +# The entry points for the ISAPI extension. +def __ExtensionFactory__(): + return Extension() + + +# Our special command line customization. +# Pre-install hook for our virtual directory. +def PreInstallDirectory(params, options): + # If the user used our special '--description' option, + # then we override our default. + if options.description: + params.Description = options.description + + +# Post install hook for our entire script +def PostInstall(params, options): + print() + print("The sample has been installed.") + print("Point your browser to /AdvancedPythonSample") + print("If you modify the source file and reload the page,") + print("you should see the reload counter increment") + + +# Handler for our custom 'status' argument. +def status_handler(options, log, arg): + "Query the status of something" + print("Everything seems to be fine!") + + +custom_arg_handlers = {"status": status_handler} + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters(PostInstall=PostInstall) + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name="AdvancedPythonSample", + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + # specify the pre-install hook. + PreInstall=PreInstallDirectory, + ) + params.VirtualDirs = [vd] + # Setup our custom option parser. + from optparse import OptionParser + + parser = OptionParser("") # blank usage, so isapi sets it. + parser.add_option( + "", + "--description", + action="store", + help="custom description to use for the virtual directory", + ) + + HandleCommandLine( + params, opt_parser=parser, custom_arg_handlers=custom_arg_handlers + ) diff --git a/MLPY/Lib/site-packages/isapi/samples/redirector.py b/MLPY/Lib/site-packages/isapi/samples/redirector.py new file mode 100644 index 0000000000000000000000000000000000000000..40698bb2c05f3412da569ea3948eb78b0e090449 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/redirector.py @@ -0,0 +1,125 @@ +# This is a sample ISAPI extension written in Python. +# +# Please see README.txt in this directory, and specifically the +# information about the "loader" DLL - installing this sample will create +# "_redirector.dll" in the current directory. The readme explains this. + +# Executing this script (or any server config script) will install the extension +# into your web server. As the server executes, the PyISAPI framework will load +# this module and create your Extension and Filter objects. + +# This is the simplest possible redirector (or proxy) we can write. The +# extension installs with a mask of '*' in the root of the site. +# As an added bonus though, we optionally show how, on IIS6 and later, we +# can use HSE_ERQ_EXEC_URL to ignore certain requests - in IIS5 and earlier +# we can only do this with an ISAPI filter - see redirector_with_filter for +# an example. If this sample is run on IIS5 or earlier it simply ignores +# any excludes. + +import sys + +from isapi import isapicon, threaded_extension + +try: + from urllib.request import urlopen +except ImportError: + # py3k spelling... + from urllib.request import urlopen + +import win32api + +# sys.isapidllhandle will exist when we are loaded by the IIS framework. +# In this case we redirect our output to the win32traceutil collector. +if hasattr(sys, "isapidllhandle"): + import win32traceutil + +# The site we are proxying. +proxy = "http://www.python.org" + +# Urls we exclude (ie, allow IIS to handle itself) - all are lowered, +# and these entries exist by default on Vista... +excludes = ["/iisstart.htm", "/welcome.png"] + + +# An "io completion" function, called when ecb.ExecURL completes... +def io_callback(ecb, url, cbIO, errcode): + # Get the status of our ExecURL + httpstatus, substatus, win32 = ecb.GetExecURLStatus() + print( + "ExecURL of %r finished with http status %d.%d, win32 status %d (%s)" + % (url, httpstatus, substatus, win32, win32api.FormatMessage(win32).strip()) + ) + # nothing more to do! + ecb.DoneWithSession() + + +# The ISAPI extension - handles all requests in the site. +class Extension(threaded_extension.ThreadPoolExtension): + "Python sample Extension" + + def Dispatch(self, ecb): + # Note that our ThreadPoolExtension base class will catch exceptions + # in our Dispatch method, and write the traceback to the client. + # That is perfect for this sample, so we don't catch our own. + # print 'IIS dispatching "%s"' % (ecb.GetServerVariable("URL"),) + url = ecb.GetServerVariable("URL").decode("ascii") + for exclude in excludes: + if url.lower().startswith(exclude): + print("excluding %s" % url) + if ecb.Version < 0x60000: + print("(but this is IIS5 or earlier - can't do 'excludes')") + else: + ecb.IOCompletion(io_callback, url) + ecb.ExecURL( + None, + None, + None, + None, + None, + isapicon.HSE_EXEC_URL_IGNORE_CURRENT_INTERCEPTOR, + ) + return isapicon.HSE_STATUS_PENDING + + new_url = proxy + url + print("Opening %s" % new_url) + fp = urlopen(new_url) + headers = fp.info() + # subtle py3k breakage: in py3k, str(headers) has normalized \r\n + # back to \n and also stuck an extra \n term. py2k leaves the + # \r\n from the server in tact and finishes with a single term. + if sys.version_info < (3, 0): + header_text = str(headers) + "\r\n" + else: + # take *all* trailing \n off, replace remaining with + # \r\n, then add the 2 trailing \r\n. + header_text = str(headers).rstrip("\n").replace("\n", "\r\n") + "\r\n\r\n" + ecb.SendResponseHeaders("200 OK", header_text, False) + ecb.WriteClient(fp.read()) + ecb.DoneWithSession() + print("Returned data from '%s'" % (new_url,)) + return isapicon.HSE_STATUS_SUCCESS + + +# The entry points for the ISAPI extension. +def __ExtensionFactory__(): + return Extension() + + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters() + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name="/", + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + ) + params.VirtualDirs = [vd] + HandleCommandLine(params) diff --git a/MLPY/Lib/site-packages/isapi/samples/redirector_asynch.py b/MLPY/Lib/site-packages/isapi/samples/redirector_asynch.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4b5e4f77c72d98ff37508f5a254e8e4769cd36 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/redirector_asynch.py @@ -0,0 +1,85 @@ +# This is a sample ISAPI extension written in Python. + +# This is like the other 'redirector' samples, but uses asnch IO when writing +# back to the client (it does *not* use asynch io talking to the remote +# server!) + +import sys +import urllib.error +import urllib.parse +import urllib.request + +from isapi import isapicon, threaded_extension + +# sys.isapidllhandle will exist when we are loaded by the IIS framework. +# In this case we redirect our output to the win32traceutil collector. +if hasattr(sys, "isapidllhandle"): + import win32traceutil + +# The site we are proxying. +proxy = "http://www.python.org" + +# We synchronously read chunks of this size then asynchronously write them. +CHUNK_SIZE = 8192 + + +# The callback made when IIS completes the asynch write. +def io_callback(ecb, fp, cbIO, errcode): + print("IO callback", ecb, fp, cbIO, errcode) + chunk = fp.read(CHUNK_SIZE) + if chunk: + ecb.WriteClient(chunk, isapicon.HSE_IO_ASYNC) + # and wait for the next callback to say this chunk is done. + else: + # eof - say we are complete. + fp.close() + ecb.DoneWithSession() + + +# The ISAPI extension - handles all requests in the site. +class Extension(threaded_extension.ThreadPoolExtension): + "Python sample proxy server - asynch version." + + def Dispatch(self, ecb): + print('IIS dispatching "%s"' % (ecb.GetServerVariable("URL"),)) + url = ecb.GetServerVariable("URL") + + new_url = proxy + url + print("Opening %s" % new_url) + fp = urllib.request.urlopen(new_url) + headers = fp.info() + ecb.SendResponseHeaders("200 OK", str(headers) + "\r\n", False) + # now send the first chunk asynchronously + ecb.ReqIOCompletion(io_callback, fp) + chunk = fp.read(CHUNK_SIZE) + if chunk: + ecb.WriteClient(chunk, isapicon.HSE_IO_ASYNC) + return isapicon.HSE_STATUS_PENDING + # no data - just close things now. + ecb.DoneWithSession() + return isapicon.HSE_STATUS_SUCCESS + + +# The entry points for the ISAPI extension. +def __ExtensionFactory__(): + return Extension() + + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters() + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name="/", + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + ) + params.VirtualDirs = [vd] + HandleCommandLine(params) diff --git a/MLPY/Lib/site-packages/isapi/samples/redirector_with_filter.py b/MLPY/Lib/site-packages/isapi/samples/redirector_with_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..a63b1db13a08bfb838c708a685ff9d690b5c1b44 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/redirector_with_filter.py @@ -0,0 +1,161 @@ +# This is a sample configuration file for an ISAPI filter and extension +# written in Python. +# +# Please see README.txt in this directory, and specifically the +# information about the "loader" DLL - installing this sample will create +# "_redirector_with_filter.dll" in the current directory. The readme explains +# this. + +# Executing this script (or any server config script) will install the extension +# into your web server. As the server executes, the PyISAPI framework will load +# this module and create your Extension and Filter objects. + +# This sample provides sample redirector: +# It is implemented by a filter and an extension, so that some requests can +# be ignored. Compare with 'redirector_simple' which avoids the filter, but +# is unable to selectively ignore certain requests. +# The process is sample uses is: +# * The filter is installed globally, as all filters are. +# * A Virtual Directory named "python" is setup. This dir has our ISAPI +# extension as the only application, mapped to file-extension '*'. Thus, our +# extension handles *all* requests in this directory. +# The basic process is that the filter does URL rewriting, redirecting every +# URL to our Virtual Directory. Our extension then handles this request, +# forwarding the data from the proxied site. +# For example: +# * URL of "index.html" comes in. +# * Filter rewrites this to "/python/index.html" +# * Our extension sees the full "/python/index.html", removes the leading +# portion, and opens and forwards the remote URL. + + +# This sample is very small - it avoid most error handling, etc. It is for +# demonstration purposes only. + +import sys +import urllib.error +import urllib.parse +import urllib.request + +from isapi import isapicon, threaded_extension +from isapi.simple import SimpleFilter + +# sys.isapidllhandle will exist when we are loaded by the IIS framework. +# In this case we redirect our output to the win32traceutil collector. +if hasattr(sys, "isapidllhandle"): + import win32traceutil + +# The site we are proxying. +proxy = "http://www.python.org" +# The name of the virtual directory we install in, and redirect from. +virtualdir = "/python" + +# The key feature of this redirector over the simple redirector is that it +# can choose to ignore certain responses by having the filter not rewrite them +# to our virtual dir. For this sample, we just exclude the IIS help directory. + + +# The ISAPI extension - handles requests in our virtual dir, and sends the +# response to the client. +class Extension(threaded_extension.ThreadPoolExtension): + "Python sample Extension" + + def Dispatch(self, ecb): + # Note that our ThreadPoolExtension base class will catch exceptions + # in our Dispatch method, and write the traceback to the client. + # That is perfect for this sample, so we don't catch our own. + # print 'IIS dispatching "%s"' % (ecb.GetServerVariable("URL"),) + url = ecb.GetServerVariable("URL") + if url.startswith(virtualdir): + new_url = proxy + url[len(virtualdir) :] + print("Opening", new_url) + fp = urllib.request.urlopen(new_url) + headers = fp.info() + ecb.SendResponseHeaders("200 OK", str(headers) + "\r\n", False) + ecb.WriteClient(fp.read()) + ecb.DoneWithSession() + print("Returned data from '%s'!" % (new_url,)) + else: + # this should never happen - we should only see requests that + # start with our virtual directory name. + print("Not proxying '%s'" % (url,)) + + +# The ISAPI filter. +class Filter(SimpleFilter): + "Sample Python Redirector" + filter_flags = isapicon.SF_NOTIFY_PREPROC_HEADERS | isapicon.SF_NOTIFY_ORDER_DEFAULT + + def HttpFilterProc(self, fc): + # print "Filter Dispatch" + nt = fc.NotificationType + if nt != isapicon.SF_NOTIFY_PREPROC_HEADERS: + return isapicon.SF_STATUS_REQ_NEXT_NOTIFICATION + + pp = fc.GetData() + url = pp.GetHeader("url") + # print "URL is '%s'" % (url,) + prefix = virtualdir + if not url.startswith(prefix): + new_url = prefix + url + print("New proxied URL is '%s'" % (new_url,)) + pp.SetHeader("url", new_url) + # For the sake of demonstration, show how the FilterContext + # attribute is used. It always starts out life as None, and + # any assignments made are automatically decref'd by the + # framework during a SF_NOTIFY_END_OF_NET_SESSION notification. + if fc.FilterContext is None: + fc.FilterContext = 0 + fc.FilterContext += 1 + print("This is request number %d on this connection" % fc.FilterContext) + return isapicon.SF_STATUS_REQ_HANDLED_NOTIFICATION + else: + print("Filter ignoring URL '%s'" % (url,)) + + # Some older code that handled SF_NOTIFY_URL_MAP. + # ~ print "Have URL_MAP notify" + # ~ urlmap = fc.GetData() + # ~ print "URI is", urlmap.URL + # ~ print "Path is", urlmap.PhysicalPath + # ~ if urlmap.URL.startswith("/UC/"): + # ~ # Find the /UC/ in the physical path, and nuke it (except + # ~ # as the path is physical, it is \) + # ~ p = urlmap.PhysicalPath + # ~ pos = p.index("\\UC\\") + # ~ p = p[:pos] + p[pos+3:] + # ~ p = r"E:\src\pyisapi\webroot\PyTest\formTest.htm" + # ~ print "New path is", p + # ~ urlmap.PhysicalPath = p + + +# The entry points for the ISAPI extension. +def __FilterFactory__(): + return Filter() + + +def __ExtensionFactory__(): + return Extension() + + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters() + # Setup all filters - these are global to the site. + params.Filters = [ + FilterParameters(Name="PythonRedirector", Description=Filter.__doc__), + ] + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name=virtualdir[1:], + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + ) + params.VirtualDirs = [vd] + HandleCommandLine(params) diff --git a/MLPY/Lib/site-packages/isapi/samples/test.py b/MLPY/Lib/site-packages/isapi/samples/test.py new file mode 100644 index 0000000000000000000000000000000000000000..5e4d899bb677eb7d84e30bc3a048cc6e5a314a5e --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/samples/test.py @@ -0,0 +1,195 @@ +# This extension is used mainly for testing purposes - it is not +# designed to be a simple sample, but instead is a hotch-potch of things +# that attempts to exercise the framework. + +import os +import stat +import sys + +from isapi import isapicon +from isapi.simple import SimpleExtension + +if hasattr(sys, "isapidllhandle"): + import win32traceutil + +# We use the same reload support as 'advanced.py' demonstrates. +import threading + +import win32con +import win32event +import win32file +import winerror + +from isapi import InternalReloadException + + +# A watcher thread that checks for __file__ changing. +# When it detects it, it simply sets "change_detected" to true. +class ReloadWatcherThread(threading.Thread): + def __init__(self): + self.change_detected = False + self.filename = __file__ + if self.filename.endswith("c") or self.filename.endswith("o"): + self.filename = self.filename[:-1] + self.handle = win32file.FindFirstChangeNotification( + os.path.dirname(self.filename), + False, # watch tree? + win32con.FILE_NOTIFY_CHANGE_LAST_WRITE, + ) + threading.Thread.__init__(self) + + def run(self): + last_time = os.stat(self.filename)[stat.ST_MTIME] + while 1: + try: + rc = win32event.WaitForSingleObject(self.handle, win32event.INFINITE) + win32file.FindNextChangeNotification(self.handle) + except win32event.error as details: + # handle closed - thread should terminate. + if details.winerror != winerror.ERROR_INVALID_HANDLE: + raise + break + this_time = os.stat(self.filename)[stat.ST_MTIME] + if this_time != last_time: + print("Detected file change - flagging for reload.") + self.change_detected = True + last_time = this_time + + def stop(self): + win32file.FindCloseChangeNotification(self.handle) + + +def TransmitFileCallback(ecb, hFile, cbIO, errCode): + print("Transmit complete!") + ecb.close() + + +# The ISAPI extension - handles requests in our virtual dir, and sends the +# response to the client. +class Extension(SimpleExtension): + "Python test Extension" + + def __init__(self): + self.reload_watcher = ReloadWatcherThread() + self.reload_watcher.start() + + def HttpExtensionProc(self, ecb): + # NOTE: If you use a ThreadPoolExtension, you must still perform + # this check in HttpExtensionProc - raising the exception from + # The "Dispatch" method will just cause the exception to be + # rendered to the browser. + if self.reload_watcher.change_detected: + print("Doing reload") + raise InternalReloadException + + if ecb.GetServerVariable("UNICODE_URL").endswith("test.py"): + file_flags = ( + win32con.FILE_FLAG_SEQUENTIAL_SCAN | win32con.FILE_FLAG_OVERLAPPED + ) + hfile = win32file.CreateFile( + __file__, + win32con.GENERIC_READ, + 0, + None, + win32con.OPEN_EXISTING, + file_flags, + None, + ) + flags = ( + isapicon.HSE_IO_ASYNC + | isapicon.HSE_IO_DISCONNECT_AFTER_SEND + | isapicon.HSE_IO_SEND_HEADERS + ) + # We pass hFile to the callback simply as a way of keeping it alive + # for the duration of the transmission + try: + ecb.TransmitFile( + TransmitFileCallback, + hfile, + int(hfile), + "200 OK", + 0, + 0, + None, + None, + flags, + ) + except: + # Errors keep this source file open! + hfile.Close() + raise + else: + # default response + ecb.SendResponseHeaders("200 OK", "Content-Type: text/html\r\n\r\n", 0) + print("", file=ecb) + print("The root of this site is at", ecb.MapURLToPath("/"), file=ecb) + print("", file=ecb) + ecb.close() + return isapicon.HSE_STATUS_SUCCESS + + def TerminateExtension(self, status): + self.reload_watcher.stop() + + +# The entry points for the ISAPI extension. +def __ExtensionFactory__(): + return Extension() + + +# Our special command line customization. +# Pre-install hook for our virtual directory. +def PreInstallDirectory(params, options): + # If the user used our special '--description' option, + # then we override our default. + if options.description: + params.Description = options.description + + +# Post install hook for our entire script +def PostInstall(params, options): + print() + print("The sample has been installed.") + print("Point your browser to /PyISAPITest") + + +# Handler for our custom 'status' argument. +def status_handler(options, log, arg): + "Query the status of something" + print("Everything seems to be fine!") + + +custom_arg_handlers = {"status": status_handler} + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters(PostInstall=PostInstall) + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name="PyISAPITest", + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + # specify the pre-install hook. + PreInstall=PreInstallDirectory, + ) + params.VirtualDirs = [vd] + # Setup our custom option parser. + from optparse import OptionParser + + parser = OptionParser("") # blank usage, so isapi sets it. + parser.add_option( + "", + "--description", + action="store", + help="custom description to use for the virtual directory", + ) + + HandleCommandLine( + params, opt_parser=parser, custom_arg_handlers=custom_arg_handlers + ) diff --git a/MLPY/Lib/site-packages/isapi/simple.py b/MLPY/Lib/site-packages/isapi/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..b453bbae015e29ac021e58200211f4e4ef5eb93f --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/simple.py @@ -0,0 +1,70 @@ +"""Simple base-classes for extensions and filters. + +None of the filter and extension functions are considered 'optional' by the +framework. These base-classes provide simple implementations for the +Initialize and Terminate functions, allowing you to omit them, + +It is not necessary to use these base-classes - but if you don't, you +must ensure each of the required methods are implemented. +""" + + +class SimpleExtension: + "Base class for a simple ISAPI extension" + + def __init__(self): + pass + + def GetExtensionVersion(self, vi): + """Called by the ISAPI framework to get the extension version + + The default implementation uses the classes docstring to + set the extension description.""" + # nod to our reload capability - vi is None when we are reloaded. + if vi is not None: + vi.ExtensionDesc = self.__doc__ + + def HttpExtensionProc(self, control_block): + """Called by the ISAPI framework for each extension request. + + sub-classes must provide an implementation for this method. + """ + raise NotImplementedError("sub-classes should override HttpExtensionProc") + + def TerminateExtension(self, status): + """Called by the ISAPI framework as the extension terminates.""" + pass + + +class SimpleFilter: + "Base class for a a simple ISAPI filter" + filter_flags = None + + def __init__(self): + pass + + def GetFilterVersion(self, fv): + """Called by the ISAPI framework to get the extension version + + The default implementation uses the classes docstring to + set the extension description, and uses the classes + filter_flags attribute to set the ISAPI filter flags - you + must specify filter_flags in your class. + """ + if self.filter_flags is None: + raise RuntimeError("You must specify the filter flags") + # nod to our reload capability - fv is None when we are reloaded. + if fv is not None: + fv.Flags = self.filter_flags + fv.FilterDesc = self.__doc__ + + def HttpFilterProc(self, fc): + """Called by the ISAPI framework for each filter request. + + sub-classes must provide an implementation for this method. + """ + raise NotImplementedError("sub-classes should override HttpExtensionProc") + + def TerminateFilter(self, status): + """Called by the ISAPI framework as the filter terminates.""" + pass diff --git a/MLPY/Lib/site-packages/isapi/test/README.txt b/MLPY/Lib/site-packages/isapi/test/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..18643dd75754377d57e7f4443ad8108397deb999 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/test/README.txt @@ -0,0 +1,3 @@ +This is a directory for tests of the PyISAPI framework. + +For demos, please see the pyisapi 'samples' directory. \ No newline at end of file diff --git a/MLPY/Lib/site-packages/isapi/test/__pycache__/extension_simple.cpython-39.pyc b/MLPY/Lib/site-packages/isapi/test/__pycache__/extension_simple.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb51b5050bfb51ebad163585d3833a7e80e3b0e4 Binary files /dev/null and b/MLPY/Lib/site-packages/isapi/test/__pycache__/extension_simple.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/isapi/test/extension_simple.py b/MLPY/Lib/site-packages/isapi/test/extension_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..64bd71fd22328f6fe6ec01ed44ec610854496fa4 --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/test/extension_simple.py @@ -0,0 +1,119 @@ +# This is an ISAPI extension purely for testing purposes. It is NOT +# a 'demo' (even though it may be useful!) +# +# Install this extension, then point your browser to: +# "http://localhost/pyisapi_test/test1" +# This will execute the method 'test1' below. See below for the list of +# test methods that are acceptable. + +import urllib.error +import urllib.parse +import urllib.request + +# If we have no console (eg, am running from inside IIS), redirect output +# somewhere useful - in this case, the standard win32 trace collector. +import win32api +import winerror + +from isapi import ExtensionError, isapicon, threaded_extension +from isapi.simple import SimpleFilter + +try: + win32api.GetConsoleTitle() +except win32api.error: + # No console - redirect + import win32traceutil + + +# The ISAPI extension - handles requests in our virtual dir, and sends the +# response to the client. +class Extension(threaded_extension.ThreadPoolExtension): + "Python ISAPI Tester" + + def Dispatch(self, ecb): + print('Tester dispatching "%s"' % (ecb.GetServerVariable("URL"),)) + url = ecb.GetServerVariable("URL") + test_name = url.split("/")[-1] + meth = getattr(self, test_name, None) + if meth is None: + raise AttributeError("No test named '%s'" % (test_name,)) + result = meth(ecb) + if result is None: + # This means the test finalized everything + return + ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n", False) + print("Finished running test ", test_name, "", file=ecb) + print("

", file=ecb)
+        print(result, file=ecb)
+        print("
", file=ecb) + print("", file=ecb) + ecb.DoneWithSession() + + def test1(self, ecb): + try: + ecb.GetServerVariable("foo bar") + raise RuntimeError("should have failed!") + except ExtensionError as err: + assert err.errno == winerror.ERROR_INVALID_INDEX, err + return "worked!" + + def test_long_vars(self, ecb): + qs = ecb.GetServerVariable("QUERY_STRING") + # Our implementation has a default buffer size of 8k - so we test + # the code that handles an overflow by ensuring there are more + # than 8k worth of chars in the URL. + expected_query = "x" * 8500 + if len(qs) == 0: + # Just the URL with no query part - redirect to myself, but with + # a huge query portion. + me = ecb.GetServerVariable("URL") + headers = "Location: " + me + "?" + expected_query + "\r\n\r\n" + ecb.SendResponseHeaders("301 Moved", headers) + ecb.DoneWithSession() + return None + if qs == expected_query: + return "Total length of variable is %d - test worked!" % (len(qs),) + else: + return "Unexpected query portion! Got %d chars, expected %d" % ( + len(qs), + len(expected_query), + ) + + def test_unicode_vars(self, ecb): + # We need to check that we are running IIS6! This seems the only + # effective way from an extension. + ver = float(ecb.GetServerVariable("SERVER_SOFTWARE").split("/")[1]) + if ver < 6.0: + return "This is IIS version %g - unicode only works in IIS6 and later" % ver + + us = ecb.GetServerVariable("UNICODE_SERVER_NAME") + if not isinstance(us, str): + raise RuntimeError("unexpected type!") + if us != str(ecb.GetServerVariable("SERVER_NAME")): + raise RuntimeError("Unicode and non-unicode values were not the same") + return "worked!" + + +# The entry points for the ISAPI extension. +def __ExtensionFactory__(): + return Extension() + + +if __name__ == "__main__": + # If run from the command-line, install ourselves. + from isapi.install import * + + params = ISAPIParameters() + # Setup the virtual directories - this is a list of directories our + # extension uses - in this case only 1. + # Each extension has a "script map" - this is the mapping of ISAPI + # extensions. + sm = [ScriptMapParams(Extension="*", Flags=0)] + vd = VirtualDirParameters( + Name="pyisapi_test", + Description=Extension.__doc__, + ScriptMaps=sm, + ScriptMapUpdate="replace", + ) + params.VirtualDirs = [vd] + HandleCommandLine(params) diff --git a/MLPY/Lib/site-packages/isapi/threaded_extension.py b/MLPY/Lib/site-packages/isapi/threaded_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..b31c8c9e70d2d51e4f8fc538db251b16544b88ea --- /dev/null +++ b/MLPY/Lib/site-packages/isapi/threaded_extension.py @@ -0,0 +1,189 @@ +"""An ISAPI extension base class implemented using a thread-pool.""" +# $Id$ + +import sys +import threading +import time +import traceback + +from pywintypes import OVERLAPPED +from win32event import INFINITE +from win32file import ( + CloseHandle, + CreateIoCompletionPort, + GetQueuedCompletionStatus, + PostQueuedCompletionStatus, +) +from win32security import SetThreadToken + +import isapi.simple +from isapi import ExtensionError, isapicon + +ISAPI_REQUEST = 1 +ISAPI_SHUTDOWN = 2 + + +class WorkerThread(threading.Thread): + def __init__(self, extension, io_req_port): + self.running = False + self.io_req_port = io_req_port + self.extension = extension + threading.Thread.__init__(self) + # We wait 15 seconds for a thread to terminate, but if it fails to, + # we don't want the process to hang at exit waiting for it... + self.setDaemon(True) + + def run(self): + self.running = True + while self.running: + errCode, bytes, key, overlapped = GetQueuedCompletionStatus( + self.io_req_port, INFINITE + ) + if key == ISAPI_SHUTDOWN and overlapped is None: + break + + # Let the parent extension handle the command. + dispatcher = self.extension.dispatch_map.get(key) + if dispatcher is None: + raise RuntimeError("Bad request '%s'" % (key,)) + + dispatcher(errCode, bytes, key, overlapped) + + def call_handler(self, cblock): + self.extension.Dispatch(cblock) + + +# A generic thread-pool based extension, using IO Completion Ports. +# Sub-classes can override one method to implement a simple extension, or +# may leverage the CompletionPort to queue their own requests, and implement a +# fully asynch extension. +class ThreadPoolExtension(isapi.simple.SimpleExtension): + "Base class for an ISAPI extension based around a thread-pool" + max_workers = 20 + worker_shutdown_wait = 15000 # 15 seconds for workers to quit... + + def __init__(self): + self.workers = [] + # extensible dispatch map, for sub-classes that need to post their + # own requests to the completion port. + # Each of these functions is called with the result of + # GetQueuedCompletionStatus for our port. + self.dispatch_map = { + ISAPI_REQUEST: self.DispatchConnection, + } + + def GetExtensionVersion(self, vi): + isapi.simple.SimpleExtension.GetExtensionVersion(self, vi) + # As per Q192800, the CompletionPort should be created with the number + # of processors, even if the number of worker threads is much larger. + # Passing 0 means the system picks the number. + self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0) + # start up the workers + self.workers = [] + for i in range(self.max_workers): + worker = WorkerThread(self, self.io_req_port) + worker.start() + self.workers.append(worker) + + def HttpExtensionProc(self, control_block): + overlapped = OVERLAPPED() + overlapped.object = control_block + PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped) + return isapicon.HSE_STATUS_PENDING + + def TerminateExtension(self, status): + for worker in self.workers: + worker.running = False + for worker in self.workers: + PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None) + # wait for them to terminate - pity we aren't using 'native' threads + # as then we could do a smart wait - but now we need to poll.... + end_time = time.time() + self.worker_shutdown_wait / 1000 + alive = self.workers + while alive: + if time.time() > end_time: + # xxx - might be nice to log something here. + break + time.sleep(0.2) + alive = [w for w in alive if w.is_alive()] + self.dispatch_map = {} # break circles + CloseHandle(self.io_req_port) + + # This is the one operation the base class supports - a simple + # Connection request. We setup the thread-token, and dispatch to the + # sub-class's 'Dispatch' method. + def DispatchConnection(self, errCode, bytes, key, overlapped): + control_block = overlapped.object + # setup the correct user for this request + hRequestToken = control_block.GetImpersonationToken() + SetThreadToken(None, hRequestToken) + try: + try: + self.Dispatch(control_block) + except: + self.HandleDispatchError(control_block) + finally: + # reset the security context + SetThreadToken(None, None) + + def Dispatch(self, ecb): + """Overridden by the sub-class to handle connection requests. + + This class creates a thread-pool using a Windows completion port, + and dispatches requests via this port. Sub-classes can generally + implement each connection request using blocking reads and writes, and + the thread-pool will still provide decent response to the end user. + + The sub-class can set a max_workers attribute (default is 20). Note + that this generally does *not* mean 20 threads will all be concurrently + running, via the magic of Windows completion ports. + + There is no default implementation - sub-classes must implement this. + """ + raise NotImplementedError("sub-classes should override Dispatch") + + def HandleDispatchError(self, ecb): + """Handles errors in the Dispatch method. + + When a Dispatch method call fails, this method is called to handle + the exception. The default implementation formats the traceback + in the browser. + """ + ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR + # control_block.LogData = "we failed!" + exc_typ, exc_val, exc_tb = sys.exc_info() + limit = None + try: + try: + import cgi + + ecb.SendResponseHeaders( + "200 OK", "Content-type: text/html\r\n\r\n", False + ) + print(file=ecb) + print("

Traceback (most recent call last):

", file=ecb) + list = traceback.format_tb( + exc_tb, limit + ) + traceback.format_exception_only(exc_typ, exc_val) + print( + "
%s%s
" + % ( + cgi.escape("".join(list[:-1])), + cgi.escape(list[-1]), + ), + file=ecb, + ) + except ExtensionError: + # The client disconnected without reading the error body - + # its probably not a real browser at the other end, ignore it. + pass + except: + print("FAILED to render the error message!") + traceback.print_exc() + print("ORIGINAL extension error:") + traceback.print_exception(exc_typ, exc_val, exc_tb) + finally: + # holding tracebacks in a local of a frame that may itself be + # part of a traceback used to be evil and cause leaks! + exc_tb = None + ecb.DoneWithSession() diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/INSTALLER b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/LICENSE.txt b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/METADATA b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..265cc32e1355ea1aa0f6c1794e84c8f2f636606b --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/METADATA @@ -0,0 +1,76 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 3.1.4 +Summary: A very fast and expressive template engine. +Maintainer-email: Pallets +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Typing :: Typed +Requires-Dist: MarkupSafe>=2.0 +Requires-Dist: Babel>=2.7 ; extra == "i18n" +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Chat, https://discord.gg/pallets +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Source, https://github.com/pallets/jinja/ +Provides-Extra: i18n + +# Jinja + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +## In A Nutshell + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +## Donate + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, [please +donate today][]. + +[please donate today]: https://palletsprojects.com/donate + diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/RECORD b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..10874b816350dd6a71d8ebe9c112407d3428fd3b --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/RECORD @@ -0,0 +1,57 @@ +jinja2-3.1.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jinja2-3.1.4.dist-info/LICENSE.txt,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +jinja2-3.1.4.dist-info/METADATA,sha256=R_brzpPQVBvpGcsm-WbrtgotO7suQ1D0F-qkhTzeEfY,2640 +jinja2-3.1.4.dist-info/RECORD,, +jinja2-3.1.4.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +jinja2-3.1.4.dist-info/entry_points.txt,sha256=OL85gYU1eD8cuPlikifFngXpeBjaxl6rIJ8KkC_3r-I,58 +jinja2/__init__.py,sha256=wIl45IM20KGw-kfr7jJhaBxxX5g4-kihlBYjxopX7Pw,1928 +jinja2/__pycache__/__init__.cpython-39.pyc,, +jinja2/__pycache__/_identifier.cpython-39.pyc,, +jinja2/__pycache__/async_utils.cpython-39.pyc,, +jinja2/__pycache__/bccache.cpython-39.pyc,, +jinja2/__pycache__/compiler.cpython-39.pyc,, +jinja2/__pycache__/constants.cpython-39.pyc,, +jinja2/__pycache__/debug.cpython-39.pyc,, +jinja2/__pycache__/defaults.cpython-39.pyc,, +jinja2/__pycache__/environment.cpython-39.pyc,, +jinja2/__pycache__/exceptions.cpython-39.pyc,, +jinja2/__pycache__/ext.cpython-39.pyc,, +jinja2/__pycache__/filters.cpython-39.pyc,, +jinja2/__pycache__/idtracking.cpython-39.pyc,, +jinja2/__pycache__/lexer.cpython-39.pyc,, +jinja2/__pycache__/loaders.cpython-39.pyc,, +jinja2/__pycache__/meta.cpython-39.pyc,, +jinja2/__pycache__/nativetypes.cpython-39.pyc,, +jinja2/__pycache__/nodes.cpython-39.pyc,, +jinja2/__pycache__/optimizer.cpython-39.pyc,, +jinja2/__pycache__/parser.cpython-39.pyc,, +jinja2/__pycache__/runtime.cpython-39.pyc,, +jinja2/__pycache__/sandbox.cpython-39.pyc,, +jinja2/__pycache__/tests.cpython-39.pyc,, +jinja2/__pycache__/utils.cpython-39.pyc,, +jinja2/__pycache__/visitor.cpython-39.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=JXKWCAXmTx0iZB4-hAsF50vgjxw_RJTjiLOlGGTBso0,2477 +jinja2/bccache.py,sha256=gh0qs9rulnXo0PhX5jTJy2UHzI8wFnQ63o_vw7nhzRg,14061 +jinja2/compiler.py,sha256=dpV-n6_iQUP4uSwlXwGUavJmwjvXdyxKzJ-AonFjPBk,72271 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=xhFkmxO0CESA76Ki5tz4XWq9yzGu-t0p93JCCVBVNps,61538 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=igsBH7c6C0byHaOtMbE-ugpt4GjLGgR-ywskyXtKgq8,31877 +jinja2/filters.py,sha256=bKeqjFjjz88TkHVLSyyMIEB75CzAN6b3Airgx0phJDg,54611 +jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704 +jinja2/lexer.py,sha256=xnWWXhPndHFsoqzpc5VTjheDE9JuKk9MUo9DZkrM8Os,29754 +jinja2/loaders.py,sha256=ru0GIWHo5KiHJi7_MoI_LvGDoBBvP6rd0hiC1ReaTwk,23167 +jinja2/meta.py,sha256=OTDPkaFvU2Hgvx-6akz7154F8BIWaRmvJcBFvwopHww,4397 +jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210 +jinja2/nodes.py,sha256=m1Duzcr6qhZI8JQ6VyJgUNinjAf5bQzijSmDnMsvUx8,34579 +jinja2/optimizer.py,sha256=rJnCRlQ7pZsEEmMhsQDgC_pKyDHxP5TPS6zVPGsgcu8,1651 +jinja2/parser.py,sha256=DV1iF1FR2Rsaj_5zl8rmx7j6Bj4S8iLHoYsvJ0bfEis,39890 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=POXT3tKNKJRENx2CymwUsOOXH2JwGPjW702njB5__cQ,33435 +jinja2/sandbox.py,sha256=TJjBNS9qRJ2ZgBMWdAgRBpyDLOHea2kT-2mk4PrjYx0,14616 +jinja2/tests.py,sha256=VLsBhVFnWg-PxSBz1MhRnNWgP1ovXk3neO1FLQMeC9Q,5926 +jinja2/utils.py,sha256=nV7IpWLvRCMyHW1irBAK8CIPAnOFfkb2ukggDBjbBEY,23952 +jinja2/visitor.py,sha256=EcnL1PIwf_4RVCOMxsRNuR8AXHbS1qfAdMOE2ngKJz4,3557 diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/WHEEL b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/entry_points.txt b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..abc3eae3b3bc573957cf7401711948799b3465c0 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2-3.1.4.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[babel.extractors] +jinja2=jinja2.ext:babel_extract[i18n] + diff --git a/MLPY/Lib/site-packages/jinja2/__init__.py b/MLPY/Lib/site-packages/jinja2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f0b5b286ce29334eadb4f93947971818e69b43d --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/__init__.py @@ -0,0 +1,38 @@ +"""Jinja is a template engine written in pure Python. It provides a +non-XML syntax that supports inline expressions and an optional +sandboxed environment. +""" + +from .bccache import BytecodeCache as BytecodeCache +from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache +from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache +from .environment import Environment as Environment +from .environment import Template as Template +from .exceptions import TemplateAssertionError as TemplateAssertionError +from .exceptions import TemplateError as TemplateError +from .exceptions import TemplateNotFound as TemplateNotFound +from .exceptions import TemplateRuntimeError as TemplateRuntimeError +from .exceptions import TemplatesNotFound as TemplatesNotFound +from .exceptions import TemplateSyntaxError as TemplateSyntaxError +from .exceptions import UndefinedError as UndefinedError +from .loaders import BaseLoader as BaseLoader +from .loaders import ChoiceLoader as ChoiceLoader +from .loaders import DictLoader as DictLoader +from .loaders import FileSystemLoader as FileSystemLoader +from .loaders import FunctionLoader as FunctionLoader +from .loaders import ModuleLoader as ModuleLoader +from .loaders import PackageLoader as PackageLoader +from .loaders import PrefixLoader as PrefixLoader +from .runtime import ChainableUndefined as ChainableUndefined +from .runtime import DebugUndefined as DebugUndefined +from .runtime import make_logging_undefined as make_logging_undefined +from .runtime import StrictUndefined as StrictUndefined +from .runtime import Undefined as Undefined +from .utils import clear_caches as clear_caches +from .utils import is_undefined as is_undefined +from .utils import pass_context as pass_context +from .utils import pass_environment as pass_environment +from .utils import pass_eval_context as pass_eval_context +from .utils import select_autoescape as select_autoescape + +__version__ = "3.1.4" diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72f24dd0f1aeb578a9e72e29280c0326e9166a4b Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/_identifier.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/_identifier.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03b89b262e067031b31090433a699c76febfbc11 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/_identifier.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/async_utils.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/async_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7acbf135ab4e1a7eb642bd1cd529481532f93dae Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/async_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/bccache.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/bccache.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e22f14c4069545114fb0548c2a6dc0d93029cae Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/bccache.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/compiler.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/compiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0b91d9002e186f3a21260773dda2a5d11aedd7b Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/compiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/constants.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44dee7e58bfd865ecc9d25af441182c1234cef0c Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/constants.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/debug.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/debug.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a171d7883b0c5ef1272cbba35080987cd8a7185 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/debug.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/defaults.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/defaults.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b44ebcdbfbabfd10f051713a335142d7a83bcb Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/defaults.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/environment.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/environment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a8bc41b1311ac5c6eecec7adbeb6e58aa35c997 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/environment.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d19f2511a569fbaf4f45606ca34ddc77309e359 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/ext.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/ext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9a190bb84394906b2993069b16665557e4972c0 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/ext.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/filters.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c08a8fb3f1078a30c4123fb9da1579f55816e4e Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/idtracking.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/idtracking.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cfbc3e762775fa16ef8b025489a3c4bc89c2eab Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/idtracking.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/lexer.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/lexer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8213b947ba9fcaa494b29705086f0f5f80678055 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/lexer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/loaders.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/loaders.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..321dfd37bc793f86f3c278b6eea45b1cf2dd67f6 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/loaders.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/meta.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/meta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d66501afcbe1eea32965bd16fe3a8f4806d8818b Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/meta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/nativetypes.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/nativetypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..294fe4cb6a9a9bb6deaf719f8d622e9bd6d0ae7d Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/nativetypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/nodes.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/nodes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71c88ae3088f78b8045dc1207695c66e782a3971 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/nodes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/optimizer.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/optimizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f841aa97b6d7c9b748b420bd4746ca457fd2d39 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/optimizer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/parser.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/parser.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..659fa28de779a4cb5756cde4505ab7c36d0f71ed Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/parser.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/runtime.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/runtime.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c55ef8fd70f6cdf19e62d759e74e9ced5586990c Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/runtime.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/sandbox.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/sandbox.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6efc8ebb9d113bea0324e18ad87f4fd288f641e9 Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/sandbox.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/tests.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/tests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea30f73ecf68fe2f49f2dbd77c182828ecff28c Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/tests.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83aa326a0944f064ad4b4ecebe5aa37309bc7eca Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/__pycache__/visitor.cpython-39.pyc b/MLPY/Lib/site-packages/jinja2/__pycache__/visitor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4c211704e6a8792f866a90fe3ce27a79dfbbf9a Binary files /dev/null and b/MLPY/Lib/site-packages/jinja2/__pycache__/visitor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/jinja2/_identifier.py b/MLPY/Lib/site-packages/jinja2/_identifier.py new file mode 100644 index 0000000000000000000000000000000000000000..928c1503c7d414a8a86bbf5a82c68d42cb089bd2 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/_identifier.py @@ -0,0 +1,6 @@ +import re + +# generated by scripts/generate_identifier_pattern.py +pattern = re.compile( + r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 +) diff --git a/MLPY/Lib/site-packages/jinja2/async_utils.py b/MLPY/Lib/site-packages/jinja2/async_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e65219e497b0f101fa552752d7b56dc364357e4c --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/async_utils.py @@ -0,0 +1,84 @@ +import inspect +import typing as t +from functools import WRAPPER_ASSIGNMENTS +from functools import wraps + +from .utils import _PassArg +from .utils import pass_eval_context + +V = t.TypeVar("V") + + +def async_variant(normal_func): # type: ignore + def decorator(async_func): # type: ignore + pass_arg = _PassArg.from_obj(normal_func) + need_eval_context = pass_arg is None + + if pass_arg is _PassArg.environment: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].is_async) + + else: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].environment.is_async) + + # Take the doc and annotations from the sync function, but the + # name from the async function. Pallets-Sphinx-Themes + # build_function_directive expects __wrapped__ to point to the + # sync function. + async_func_attrs = ("__module__", "__name__", "__qualname__") + normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs)) + + @wraps(normal_func, assigned=normal_func_attrs) + @wraps(async_func, assigned=async_func_attrs, updated=()) + def wrapper(*args, **kwargs): # type: ignore + b = is_async(args) + + if need_eval_context: + args = args[1:] + + if b: + return async_func(*args, **kwargs) + + return normal_func(*args, **kwargs) + + if need_eval_context: + wrapper = pass_eval_context(wrapper) + + wrapper.jinja_async_variant = True # type: ignore[attr-defined] + return wrapper + + return decorator + + +_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)} + + +async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V": + # Avoid a costly call to isawaitable + if type(value) in _common_primitives: + return t.cast("V", value) + + if inspect.isawaitable(value): + return await t.cast("t.Awaitable[V]", value) + + return t.cast("V", value) + + +async def auto_aiter( + iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> "t.AsyncIterator[V]": + if hasattr(iterable, "__aiter__"): + async for item in t.cast("t.AsyncIterable[V]", iterable): + yield item + else: + for item in iterable: + yield item + + +async def auto_to_list( + value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> t.List["V"]: + return [x async for x in auto_aiter(value)] diff --git a/MLPY/Lib/site-packages/jinja2/bccache.py b/MLPY/Lib/site-packages/jinja2/bccache.py new file mode 100644 index 0000000000000000000000000000000000000000..ada8b099ff251ea9c6da4c42e1383f37e359f06a --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/bccache.py @@ -0,0 +1,408 @@ +"""The optional bytecode cache system. This is useful if you have very +complex template situations and the compilation of all those templates +slows down your application too much. + +Situations where this is useful are often forking web applications that +are initialized on the first request. +""" + +import errno +import fnmatch +import marshal +import os +import pickle +import stat +import sys +import tempfile +import typing as t +from hashlib import sha1 +from io import BytesIO +from types import CodeType + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .environment import Environment + + class _MemcachedClient(te.Protocol): + def get(self, key: str) -> bytes: ... + + def set( + self, key: str, value: bytes, timeout: t.Optional[int] = None + ) -> None: ... + + +bc_version = 5 +# Magic bytes to identify Jinja bytecode cache files. Contains the +# Python major and minor version to avoid loading incompatible bytecode +# if a project upgrades its Python version. +bc_magic = ( + b"j2" + + pickle.dumps(bc_version, 2) + + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) +) + + +class Bucket: + """Buckets are used to store the bytecode for one template. It's created + and initialized by the bytecode cache and passed to the loading functions. + + The buckets get an internal checksum from the cache assigned and use this + to automatically reject outdated cache material. Individual bytecode + cache subclasses don't have to care about cache invalidation. + """ + + def __init__(self, environment: "Environment", key: str, checksum: str) -> None: + self.environment = environment + self.key = key + self.checksum = checksum + self.reset() + + def reset(self) -> None: + """Resets the bucket (unloads the bytecode).""" + self.code: t.Optional[CodeType] = None + + def load_bytecode(self, f: t.BinaryIO) -> None: + """Loads bytecode from a file or file like object.""" + # make sure the magic header is correct + magic = f.read(len(bc_magic)) + if magic != bc_magic: + self.reset() + return + # the source code of the file changed, we need to reload + checksum = pickle.load(f) + if self.checksum != checksum: + self.reset() + return + # if marshal_load fails then we need to reload + try: + self.code = marshal.load(f) + except (EOFError, ValueError, TypeError): + self.reset() + return + + def write_bytecode(self, f: t.IO[bytes]) -> None: + """Dump the bytecode into the file or file like object passed.""" + if self.code is None: + raise TypeError("can't write empty bucket") + f.write(bc_magic) + pickle.dump(self.checksum, f, 2) + marshal.dump(self.code, f) + + def bytecode_from_string(self, string: bytes) -> None: + """Load bytecode from bytes.""" + self.load_bytecode(BytesIO(string)) + + def bytecode_to_string(self) -> bytes: + """Return the bytecode as bytes.""" + out = BytesIO() + self.write_bytecode(out) + return out.getvalue() + + +class BytecodeCache: + """To implement your own bytecode cache you have to subclass this class + and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of + these methods are passed a :class:`~jinja2.bccache.Bucket`. + + A very basic bytecode cache that saves the bytecode on the file system:: + + from os import path + + class MyCache(BytecodeCache): + + def __init__(self, directory): + self.directory = directory + + def load_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + if path.exists(filename): + with open(filename, 'rb') as f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + with open(filename, 'wb') as f: + bucket.write_bytecode(f) + + A more advanced version of a filesystem based bytecode cache is part of + Jinja. + """ + + def load_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to load bytecode into a + bucket. If they are not able to find code in the cache for the + bucket, it must not do anything. + """ + raise NotImplementedError() + + def dump_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to write the bytecode + from a bucket back to the cache. If it unable to do so it must not + fail silently but raise an exception. + """ + raise NotImplementedError() + + def clear(self) -> None: + """Clears the cache. This method is not used by Jinja but should be + implemented to allow applications to clear the bytecode cache used + by a particular environment. + """ + + def get_cache_key( + self, name: str, filename: t.Optional[t.Union[str]] = None + ) -> str: + """Returns the unique hash key for this template name.""" + hash = sha1(name.encode("utf-8")) + + if filename is not None: + hash.update(f"|{filename}".encode()) + + return hash.hexdigest() + + def get_source_checksum(self, source: str) -> str: + """Returns a checksum for the source.""" + return sha1(source.encode("utf-8")).hexdigest() + + def get_bucket( + self, + environment: "Environment", + name: str, + filename: t.Optional[str], + source: str, + ) -> Bucket: + """Return a cache bucket for the given template. All arguments are + mandatory but filename may be `None`. + """ + key = self.get_cache_key(name, filename) + checksum = self.get_source_checksum(source) + bucket = Bucket(environment, key, checksum) + self.load_bytecode(bucket) + return bucket + + def set_bucket(self, bucket: Bucket) -> None: + """Put the bucket into the cache.""" + self.dump_bytecode(bucket) + + +class FileSystemBytecodeCache(BytecodeCache): + """A bytecode cache that stores bytecode on the filesystem. It accepts + two arguments: The directory where the cache items are stored and a + pattern string that is used to build the filename. + + If no directory is specified a default cache directory is selected. On + Windows the user's temp directory is used, on UNIX systems a directory + is created for the user in the system temp directory. + + The pattern can be used to have multiple separate caches operate on the + same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` + is replaced with the cache key. + + >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') + + This bytecode cache supports clearing of the cache using the clear method. + """ + + def __init__( + self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache" + ) -> None: + if directory is None: + directory = self._get_default_cache_dir() + self.directory = directory + self.pattern = pattern + + def _get_default_cache_dir(self) -> str: + def _unsafe_dir() -> "te.NoReturn": + raise RuntimeError( + "Cannot determine safe temp directory. You " + "need to explicitly provide one." + ) + + tmpdir = tempfile.gettempdir() + + # On windows the temporary directory is used specific unless + # explicitly forced otherwise. We can just use that. + if os.name == "nt": + return tmpdir + if not hasattr(os, "getuid"): + _unsafe_dir() + + dirname = f"_jinja2-cache-{os.getuid()}" + actual_dir = os.path.join(tmpdir, dirname) + + try: + os.mkdir(actual_dir, stat.S_IRWXU) + except OSError as e: + if e.errno != errno.EEXIST: + raise + try: + os.chmod(actual_dir, stat.S_IRWXU) + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + except OSError as e: + if e.errno != errno.EEXIST: + raise + + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + + return actual_dir + + def _get_cache_filename(self, bucket: Bucket) -> str: + return os.path.join(self.directory, self.pattern % (bucket.key,)) + + def load_bytecode(self, bucket: Bucket) -> None: + filename = self._get_cache_filename(bucket) + + # Don't test for existence before opening the file, since the + # file could disappear after the test before the open. + try: + f = open(filename, "rb") + except (FileNotFoundError, IsADirectoryError, PermissionError): + # PermissionError can occur on Windows when an operation is + # in progress, such as calling clear(). + return + + with f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket: Bucket) -> None: + # Write to a temporary file, then rename to the real name after + # writing. This avoids another process reading the file before + # it is fully written. + name = self._get_cache_filename(bucket) + f = tempfile.NamedTemporaryFile( + mode="wb", + dir=os.path.dirname(name), + prefix=os.path.basename(name), + suffix=".tmp", + delete=False, + ) + + def remove_silent() -> None: + try: + os.remove(f.name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + pass + + try: + with f: + bucket.write_bytecode(f) + except BaseException: + remove_silent() + raise + + try: + os.replace(f.name, name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + remove_silent() + except BaseException: + remove_silent() + raise + + def clear(self) -> None: + # imported lazily here because google app-engine doesn't support + # write access on the file system and the function does not exist + # normally. + from os import remove + + files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",)) + for filename in files: + try: + remove(os.path.join(self.directory, filename)) + except OSError: + pass + + +class MemcachedBytecodeCache(BytecodeCache): + """This class implements a bytecode cache that uses a memcache cache for + storing the information. It does not enforce a specific memcache library + (tummy's memcache or cmemcache) but will accept any class that provides + the minimal interface required. + + Libraries compatible with this class: + + - `cachelib `_ + - `python-memcached `_ + + (Unfortunately the django cache interface is not compatible because it + does not support storing binary data, only text. You can however pass + the underlying cache client to the bytecode cache which is available + as `django.core.cache.cache._client`.) + + The minimal interface for the client passed to the constructor is this: + + .. class:: MinimalClientInterface + + .. method:: set(key, value[, timeout]) + + Stores the bytecode in the cache. `value` is a string and + `timeout` the timeout of the key. If timeout is not provided + a default timeout or no timeout should be assumed, if it's + provided it's an integer with the number of seconds the cache + item should exist. + + .. method:: get(key) + + Returns the value for the cache key. If the item does not + exist in the cache the return value must be `None`. + + The other arguments to the constructor are the prefix for all keys that + is added before the actual cache key and the timeout for the bytecode in + the cache system. We recommend a high (or no) timeout. + + This bytecode cache does not support clearing of used items in the cache. + The clear method is a no-operation function. + + .. versionadded:: 2.7 + Added support for ignoring memcache errors through the + `ignore_memcache_errors` parameter. + """ + + def __init__( + self, + client: "_MemcachedClient", + prefix: str = "jinja2/bytecode/", + timeout: t.Optional[int] = None, + ignore_memcache_errors: bool = True, + ): + self.client = client + self.prefix = prefix + self.timeout = timeout + self.ignore_memcache_errors = ignore_memcache_errors + + def load_bytecode(self, bucket: Bucket) -> None: + try: + code = self.client.get(self.prefix + bucket.key) + except Exception: + if not self.ignore_memcache_errors: + raise + else: + bucket.bytecode_from_string(code) + + def dump_bytecode(self, bucket: Bucket) -> None: + key = self.prefix + bucket.key + value = bucket.bytecode_to_string() + + try: + if self.timeout is not None: + self.client.set(key, value, self.timeout) + else: + self.client.set(key, value) + except Exception: + if not self.ignore_memcache_errors: + raise diff --git a/MLPY/Lib/site-packages/jinja2/compiler.py b/MLPY/Lib/site-packages/jinja2/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..274071750f09ff467cfbb74075a30ae2fa207ecf --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/compiler.py @@ -0,0 +1,1960 @@ +"""Compiles nodes from the parser into Python code.""" + +import typing as t +from contextlib import contextmanager +from functools import update_wrapper +from io import StringIO +from itertools import chain +from keyword import iskeyword as is_python_keyword + +from markupsafe import escape +from markupsafe import Markup + +from . import nodes +from .exceptions import TemplateAssertionError +from .idtracking import Symbols +from .idtracking import VAR_LOAD_ALIAS +from .idtracking import VAR_LOAD_PARAMETER +from .idtracking import VAR_LOAD_RESOLVE +from .idtracking import VAR_LOAD_UNDEFINED +from .nodes import EvalContext +from .optimizer import Optimizer +from .utils import _PassArg +from .utils import concat +from .visitor import NodeVisitor + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .environment import Environment + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) + +operators = { + "eq": "==", + "ne": "!=", + "gt": ">", + "gteq": ">=", + "lt": "<", + "lteq": "<=", + "in": "in", + "notin": "not in", +} + + +def optimizeconst(f: F) -> F: + def new_func( + self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any + ) -> t.Any: + # Only optimize if the frame is not volatile + if self.optimizer is not None and not frame.eval_ctx.volatile: + new_node = self.optimizer.visit(node, frame.eval_ctx) + + if new_node != node: + return self.visit(new_node, frame) + + return f(self, node, frame, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + +def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed and op in self.environment.intercepted_binops # type: ignore + ): + self.write(f"environment.call_binop(context, {op!r}, ") + self.visit(node.left, frame) + self.write(", ") + self.visit(node.right, frame) + else: + self.write("(") + self.visit(node.left, frame) + self.write(f" {op} ") + self.visit(node.right, frame) + + self.write(")") + + return visitor + + +def _make_unop( + op: str, +) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed and op in self.environment.intercepted_unops # type: ignore + ): + self.write(f"environment.call_unop(context, {op!r}, ") + self.visit(node.node, frame) + else: + self.write("(" + op) + self.visit(node.node, frame) + + self.write(")") + + return visitor + + +def generate( + node: nodes.Template, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, +) -> t.Optional[str]: + """Generate the python source for a node tree.""" + if not isinstance(node, nodes.Template): + raise TypeError("Can't compile non template nodes") + + generator = environment.code_generator_class( + environment, name, filename, stream, defer_init, optimized + ) + generator.visit(node) + + if stream is None: + return generator.stream.getvalue() # type: ignore + + return None + + +def has_safe_repr(value: t.Any) -> bool: + """Does the node have a safe representation?""" + if value is None or value is NotImplemented or value is Ellipsis: + return True + + if type(value) in {bool, int, float, complex, range, str, Markup}: + return True + + if type(value) in {tuple, list, set, frozenset}: + return all(has_safe_repr(v) for v in value) + + if type(value) is dict: # noqa E721 + return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items()) + + return False + + +def find_undeclared( + nodes: t.Iterable[nodes.Node], names: t.Iterable[str] +) -> t.Set[str]: + """Check if the names passed are accessed undeclared. The return value + is a set of all the undeclared names from the sequence of names found. + """ + visitor = UndeclaredNameVisitor(names) + try: + for node in nodes: + visitor.visit(node) + except VisitorExit: + pass + return visitor.undeclared + + +class MacroRef: + def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None: + self.node = node + self.accesses_caller = False + self.accesses_kwargs = False + self.accesses_varargs = False + + +class Frame: + """Holds compile time information for us.""" + + def __init__( + self, + eval_ctx: EvalContext, + parent: t.Optional["Frame"] = None, + level: t.Optional[int] = None, + ) -> None: + self.eval_ctx = eval_ctx + + # the parent of this frame + self.parent = parent + + if parent is None: + self.symbols = Symbols(level=level) + + # in some dynamic inheritance situations the compiler needs to add + # write tests around output statements. + self.require_output_check = False + + # inside some tags we are using a buffer rather than yield statements. + # this for example affects {% filter %} or {% macro %}. If a frame + # is buffered this variable points to the name of the list used as + # buffer. + self.buffer: t.Optional[str] = None + + # the name of the block we're in, otherwise None. + self.block: t.Optional[str] = None + + else: + self.symbols = Symbols(parent.symbols, level=level) + self.require_output_check = parent.require_output_check + self.buffer = parent.buffer + self.block = parent.block + + # a toplevel frame is the root + soft frames such as if conditions. + self.toplevel = False + + # the root frame is basically just the outermost frame, so no if + # conditions. This information is used to optimize inheritance + # situations. + self.rootlevel = False + + # variables set inside of loops and blocks should not affect outer frames, + # but they still needs to be kept track of as part of the active context. + self.loop_frame = False + self.block_frame = False + + # track whether the frame is being used in an if-statement or conditional + # expression as it determines which errors should be raised during runtime + # or compile time. + self.soft_frame = False + + def copy(self) -> "Frame": + """Create a copy of the current one.""" + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.symbols = self.symbols.copy() + return rv + + def inner(self, isolated: bool = False) -> "Frame": + """Return an inner frame.""" + if isolated: + return Frame(self.eval_ctx, level=self.symbols.level + 1) + return Frame(self.eval_ctx, self) + + def soft(self) -> "Frame": + """Return a soft frame. A soft frame may not be modified as + standalone thing as it shares the resources with the frame it + was created of, but it's not a rootlevel frame any longer. + + This is only used to implement if-statements and conditional + expressions. + """ + rv = self.copy() + rv.rootlevel = False + rv.soft_frame = True + return rv + + __copy__ = copy + + +class VisitorExit(RuntimeError): + """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" + + +class DependencyFinderVisitor(NodeVisitor): + """A visitor that collects filter and test calls.""" + + def __init__(self) -> None: + self.filters: t.Set[str] = set() + self.tests: t.Set[str] = set() + + def visit_Filter(self, node: nodes.Filter) -> None: + self.generic_visit(node) + self.filters.add(node.name) + + def visit_Test(self, node: nodes.Test) -> None: + self.generic_visit(node) + self.tests.add(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting at blocks.""" + + +class UndeclaredNameVisitor(NodeVisitor): + """A visitor that checks if a name is accessed without being + declared. This is different from the frame visitor as it will + not stop at closure frames. + """ + + def __init__(self, names: t.Iterable[str]) -> None: + self.names = set(names) + self.undeclared: t.Set[str] = set() + + def visit_Name(self, node: nodes.Name) -> None: + if node.ctx == "load" and node.name in self.names: + self.undeclared.add(node.name) + if self.undeclared == self.names: + raise VisitorExit() + else: + self.names.discard(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting a blocks.""" + + +class CompilerExit(Exception): + """Raised if the compiler encountered a situation where it just + doesn't make sense to further process the code. Any block that + raises such an exception is not further processed. + """ + + +class CodeGenerator(NodeVisitor): + def __init__( + self, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, + ) -> None: + if stream is None: + stream = StringIO() + self.environment = environment + self.name = name + self.filename = filename + self.stream = stream + self.created_block_context = False + self.defer_init = defer_init + self.optimizer: t.Optional[Optimizer] = None + + if optimized: + self.optimizer = Optimizer(environment) + + # aliases for imports + self.import_aliases: t.Dict[str, str] = {} + + # a registry for all blocks. Because blocks are moved out + # into the global python scope they are registered here + self.blocks: t.Dict[str, nodes.Block] = {} + + # the number of extends statements so far + self.extends_so_far = 0 + + # some templates have a rootlevel extends. In this case we + # can safely assume that we're a child template and do some + # more optimizations. + self.has_known_extends = False + + # the current line number + self.code_lineno = 1 + + # registry of all filters and tests (global, not block local) + self.tests: t.Dict[str, str] = {} + self.filters: t.Dict[str, str] = {} + + # the debug information + self.debug_info: t.List[t.Tuple[int, int]] = [] + self._write_debug_info: t.Optional[int] = None + + # the number of new lines before the next write() + self._new_lines = 0 + + # the line number of the last written statement + self._last_line = 0 + + # true if nothing was written so far. + self._first_write = True + + # used by the `temporary_identifier` method to get new + # unique, temporary identifier + self._last_identifier = 0 + + # the current indentation + self._indentation = 0 + + # Tracks toplevel assignments + self._assign_stack: t.List[t.Set[str]] = [] + + # Tracks parameter definition blocks + self._param_def_block: t.List[t.Set[str]] = [] + + # Tracks the current context. + self._context_reference_stack = ["context"] + + @property + def optimized(self) -> bool: + return self.optimizer is not None + + # -- Various compilation helpers + + def fail(self, msg: str, lineno: int) -> "te.NoReturn": + """Fail with a :exc:`TemplateAssertionError`.""" + raise TemplateAssertionError(msg, lineno, self.name, self.filename) + + def temporary_identifier(self) -> str: + """Get a new unique identifier.""" + self._last_identifier += 1 + return f"t_{self._last_identifier}" + + def buffer(self, frame: Frame) -> None: + """Enable buffering for the frame from that point onwards.""" + frame.buffer = self.temporary_identifier() + self.writeline(f"{frame.buffer} = []") + + def return_buffer_contents( + self, frame: Frame, force_unescaped: bool = False + ) -> None: + """Return the buffer contents of the frame.""" + if not force_unescaped: + if frame.eval_ctx.volatile: + self.writeline("if context.eval_ctx.autoescape:") + self.indent() + self.writeline(f"return Markup(concat({frame.buffer}))") + self.outdent() + self.writeline("else:") + self.indent() + self.writeline(f"return concat({frame.buffer})") + self.outdent() + return + elif frame.eval_ctx.autoescape: + self.writeline(f"return Markup(concat({frame.buffer}))") + return + self.writeline(f"return concat({frame.buffer})") + + def indent(self) -> None: + """Indent by one.""" + self._indentation += 1 + + def outdent(self, step: int = 1) -> None: + """Outdent by step.""" + self._indentation -= step + + def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None: + """Yield or write into the frame buffer.""" + if frame.buffer is None: + self.writeline("yield ", node) + else: + self.writeline(f"{frame.buffer}.append(", node) + + def end_write(self, frame: Frame) -> None: + """End the writing process started by `start_write`.""" + if frame.buffer is not None: + self.write(")") + + def simple_write( + self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None + ) -> None: + """Simple shortcut for start_write + write + end_write.""" + self.start_write(frame, node) + self.write(s) + self.end_write(frame) + + def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None: + """Visit a list of nodes as block in a frame. If the current frame + is no buffer a dummy ``if 0: yield None`` is written automatically. + """ + try: + self.writeline("pass") + for node in nodes: + self.visit(node, frame) + except CompilerExit: + pass + + def write(self, x: str) -> None: + """Write a string into the output stream.""" + if self._new_lines: + if not self._first_write: + self.stream.write("\n" * self._new_lines) + self.code_lineno += self._new_lines + if self._write_debug_info is not None: + self.debug_info.append((self._write_debug_info, self.code_lineno)) + self._write_debug_info = None + self._first_write = False + self.stream.write(" " * self._indentation) + self._new_lines = 0 + self.stream.write(x) + + def writeline( + self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0 + ) -> None: + """Combination of newline and write.""" + self.newline(node, extra) + self.write(x) + + def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None: + """Add one or more newlines before the next write.""" + self._new_lines = max(self._new_lines, 1 + extra) + if node is not None and node.lineno != self._last_line: + self._write_debug_info = node.lineno + self._last_line = node.lineno + + def signature( + self, + node: t.Union[nodes.Call, nodes.Filter, nodes.Test], + frame: Frame, + extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + ) -> None: + """Writes a function call to the stream for the current node. + A leading comma is added automatically. The extra keyword + arguments may not include python keywords otherwise a syntax + error could occur. The extra keyword arguments should be given + as python dict. + """ + # if any of the given keyword arguments is a python keyword + # we have to make sure that no invalid call is created. + kwarg_workaround = any( + is_python_keyword(t.cast(str, k)) + for k in chain((x.key for x in node.kwargs), extra_kwargs or ()) + ) + + for arg in node.args: + self.write(", ") + self.visit(arg, frame) + + if not kwarg_workaround: + for kwarg in node.kwargs: + self.write(", ") + self.visit(kwarg, frame) + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f", {key}={value}") + if node.dyn_args: + self.write(", *") + self.visit(node.dyn_args, frame) + + if kwarg_workaround: + if node.dyn_kwargs is not None: + self.write(", **dict({") + else: + self.write(", **{") + for kwarg in node.kwargs: + self.write(f"{kwarg.key!r}: ") + self.visit(kwarg.value, frame) + self.write(", ") + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f"{key!r}: {value}, ") + if node.dyn_kwargs is not None: + self.write("}, **") + self.visit(node.dyn_kwargs, frame) + self.write(")") + else: + self.write("}") + + elif node.dyn_kwargs is not None: + self.write(", **") + self.visit(node.dyn_kwargs, frame) + + def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None: + """Find all filter and test names used in the template and + assign them to variables in the compiled namespace. Checking + that the names are registered with the environment is done when + compiling the Filter and Test nodes. If the node is in an If or + CondExpr node, the check is done at runtime instead. + + .. versionchanged:: 3.0 + Filters and tests in If and CondExpr nodes are checked at + runtime instead of compile time. + """ + visitor = DependencyFinderVisitor() + + for node in nodes: + visitor.visit(node) + + for id_map, names, dependency in ( + (self.filters, visitor.filters, "filters"), + ( + self.tests, + visitor.tests, + "tests", + ), + ): + for name in sorted(names): + if name not in id_map: + id_map[name] = self.temporary_identifier() + + # add check during runtime that dependencies used inside of executed + # blocks are defined, as this step may be skipped during compile time + self.writeline("try:") + self.indent() + self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]") + self.outdent() + self.writeline("except KeyError:") + self.indent() + self.writeline("@internalcode") + self.writeline(f"def {id_map[name]}(*unused):") + self.indent() + self.writeline( + f'raise TemplateRuntimeError("No {dependency[:-1]}' + f' named {name!r} found.")' + ) + self.outdent() + self.outdent() + + def enter_frame(self, frame: Frame) -> None: + undefs = [] + for target, (action, param) in frame.symbols.loads.items(): + if action == VAR_LOAD_PARAMETER: + pass + elif action == VAR_LOAD_RESOLVE: + self.writeline(f"{target} = {self.get_resolve_func()}({param!r})") + elif action == VAR_LOAD_ALIAS: + self.writeline(f"{target} = {param}") + elif action == VAR_LOAD_UNDEFINED: + undefs.append(target) + else: + raise NotImplementedError("unknown load instruction") + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None: + if not with_python_scope: + undefs = [] + for target in frame.symbols.loads: + undefs.append(target) + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str: + return async_value if self.environment.is_async else sync_value + + def func(self, name: str) -> str: + return f"{self.choose_async()}def {name}" + + def macro_body( + self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame + ) -> t.Tuple[Frame, MacroRef]: + """Dump the function def of a macro or call block.""" + frame = frame.inner() + frame.symbols.analyze_node(node) + macro_ref = MacroRef(node) + + explicit_caller = None + skip_special_params = set() + args = [] + + for idx, arg in enumerate(node.args): + if arg.name == "caller": + explicit_caller = idx + if arg.name in ("kwargs", "varargs"): + skip_special_params.add(arg.name) + args.append(frame.symbols.ref(arg.name)) + + undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) + + if "caller" in undeclared: + # In older Jinja versions there was a bug that allowed caller + # to retain the special behavior even if it was mentioned in + # the argument list. However thankfully this was only really + # working if it was the last argument. So we are explicitly + # checking this now and error out if it is anywhere else in + # the argument list. + if explicit_caller is not None: + try: + node.defaults[explicit_caller - len(node.args)] + except IndexError: + self.fail( + "When defining macros or call blocks the " + 'special "caller" argument must be omitted ' + "or be given a default.", + node.lineno, + ) + else: + args.append(frame.symbols.declare_parameter("caller")) + macro_ref.accesses_caller = True + if "kwargs" in undeclared and "kwargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("kwargs")) + macro_ref.accesses_kwargs = True + if "varargs" in undeclared and "varargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("varargs")) + macro_ref.accesses_varargs = True + + # macros are delayed, they never require output checks + frame.require_output_check = False + frame.symbols.analyze_node(node) + self.writeline(f"{self.func('macro')}({', '.join(args)}):", node) + self.indent() + + self.buffer(frame) + self.enter_frame(frame) + + self.push_parameter_definitions(frame) + for idx, arg in enumerate(node.args): + ref = frame.symbols.ref(arg.name) + self.writeline(f"if {ref} is missing:") + self.indent() + try: + default = node.defaults[idx - len(node.args)] + except IndexError: + self.writeline( + f'{ref} = undefined("parameter {arg.name!r} was not provided",' + f" name={arg.name!r})" + ) + else: + self.writeline(f"{ref} = ") + self.visit(default, frame) + self.mark_parameter_stored(ref) + self.outdent() + self.pop_parameter_definitions() + + self.blockvisit(node.body, frame) + self.return_buffer_contents(frame, force_unescaped=True) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + return frame, macro_ref + + def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None: + """Dump the macro definition for the def created by macro_body.""" + arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) + name = getattr(macro_ref.node, "name", None) + if len(macro_ref.node.args) == 1: + arg_tuple += "," + self.write( + f"Macro(environment, macro, {name!r}, ({arg_tuple})," + f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r}," + f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)" + ) + + def position(self, node: nodes.Node) -> str: + """Return a human readable position for the node.""" + rv = f"line {node.lineno}" + if self.name is not None: + rv = f"{rv} in {self.name!r}" + return rv + + def dump_local_context(self, frame: Frame) -> str: + items_kv = ", ".join( + f"{name!r}: {target}" + for name, target in frame.symbols.dump_stores().items() + ) + return f"{{{items_kv}}}" + + def write_commons(self) -> None: + """Writes a common preamble that is used by root and block functions. + Primarily this sets up common local helpers and enforces a generator + through a dead branch. + """ + self.writeline("resolve = context.resolve_or_missing") + self.writeline("undefined = environment.undefined") + self.writeline("concat = environment.concat") + # always use the standard Undefined class for the implicit else of + # conditional expressions + self.writeline("cond_expr_undefined = Undefined") + self.writeline("if 0: yield None") + + def push_parameter_definitions(self, frame: Frame) -> None: + """Pushes all parameter targets from the given frame into a local + stack that permits tracking of yet to be assigned parameters. In + particular this enables the optimization from `visit_Name` to skip + undefined expressions for parameters in macros as macros can reference + otherwise unbound parameters. + """ + self._param_def_block.append(frame.symbols.dump_param_targets()) + + def pop_parameter_definitions(self) -> None: + """Pops the current parameter definitions set.""" + self._param_def_block.pop() + + def mark_parameter_stored(self, target: str) -> None: + """Marks a parameter in the current parameter definitions as stored. + This will skip the enforced undefined checks. + """ + if self._param_def_block: + self._param_def_block[-1].discard(target) + + def push_context_reference(self, target: str) -> None: + self._context_reference_stack.append(target) + + def pop_context_reference(self) -> None: + self._context_reference_stack.pop() + + def get_context_ref(self) -> str: + return self._context_reference_stack[-1] + + def get_resolve_func(self) -> str: + target = self._context_reference_stack[-1] + if target == "context": + return "resolve" + return f"{target}.resolve" + + def derive_context(self, frame: Frame) -> str: + return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})" + + def parameter_is_undeclared(self, target: str) -> bool: + """Checks if a given target is an undeclared parameter.""" + if not self._param_def_block: + return False + return target in self._param_def_block[-1] + + def push_assign_tracking(self) -> None: + """Pushes a new layer for assignment tracking.""" + self._assign_stack.append(set()) + + def pop_assign_tracking(self, frame: Frame) -> None: + """Pops the topmost level for assignment tracking and updates the + context variables if necessary. + """ + vars = self._assign_stack.pop() + if ( + not frame.block_frame + and not frame.loop_frame + and not frame.toplevel + or not vars + ): + return + public_names = [x for x in vars if x[:1] != "_"] + if len(vars) == 1: + name = next(iter(vars)) + ref = frame.symbols.ref(name) + if frame.loop_frame: + self.writeline(f"_loop_vars[{name!r}] = {ref}") + return + if frame.block_frame: + self.writeline(f"_block_vars[{name!r}] = {ref}") + return + self.writeline(f"context.vars[{name!r}] = {ref}") + else: + if frame.loop_frame: + self.writeline("_loop_vars.update({") + elif frame.block_frame: + self.writeline("_block_vars.update({") + else: + self.writeline("context.vars.update({") + for idx, name in enumerate(vars): + if idx: + self.write(", ") + ref = frame.symbols.ref(name) + self.write(f"{name!r}: {ref}") + self.write("})") + if not frame.block_frame and not frame.loop_frame and public_names: + if len(public_names) == 1: + self.writeline(f"context.exported_vars.add({public_names[0]!r})") + else: + names_str = ", ".join(map(repr, public_names)) + self.writeline(f"context.exported_vars.update(({names_str}))") + + # -- Statement Visitors + + def visit_Template( + self, node: nodes.Template, frame: t.Optional[Frame] = None + ) -> None: + assert frame is None, "no root frame allowed" + eval_ctx = EvalContext(self.environment, self.name) + + from .runtime import async_exported + from .runtime import exported + + if self.environment.is_async: + exported_names = sorted(exported + async_exported) + else: + exported_names = sorted(exported) + + self.writeline("from jinja2.runtime import " + ", ".join(exported_names)) + + # if we want a deferred initialization we cannot move the + # environment into a local name + envenv = "" if self.defer_init else ", environment=environment" + + # do we have an extends tag at all? If not, we can save some + # overhead by just not processing any inheritance code. + have_extends = node.find(nodes.Extends) is not None + + # find all blocks + for block in node.find_all(nodes.Block): + if block.name in self.blocks: + self.fail(f"block {block.name!r} defined twice", block.lineno) + self.blocks[block.name] = block + + # find all imports and import them + for import_ in node.find_all(nodes.ImportedName): + if import_.importname not in self.import_aliases: + imp = import_.importname + self.import_aliases[imp] = alias = self.temporary_identifier() + if "." in imp: + module, obj = imp.rsplit(".", 1) + self.writeline(f"from {module} import {obj} as {alias}") + else: + self.writeline(f"import {imp} as {alias}") + + # add the load name + self.writeline(f"name = {self.name!r}") + + # generate the root render function. + self.writeline( + f"{self.func('root')}(context, missing=missing{envenv}):", extra=1 + ) + self.indent() + self.write_commons() + + # process the root + frame = Frame(eval_ctx) + if "self" in find_undeclared(node.body, ("self",)): + ref = frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + frame.symbols.analyze_node(node) + frame.toplevel = frame.rootlevel = True + frame.require_output_check = have_extends and not self.has_known_extends + if have_extends: + self.writeline("parent_template = None") + self.enter_frame(frame) + self.pull_dependencies(node.body) + self.blockvisit(node.body, frame) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + # make sure that the parent root is called. + if have_extends: + if not self.has_known_extends: + self.indent() + self.writeline("if parent_template is not None:") + self.indent() + if not self.environment.is_async: + self.writeline("yield from parent_template.root_render_func(context)") + else: + self.writeline( + "async for event in parent_template.root_render_func(context):" + ) + self.indent() + self.writeline("yield event") + self.outdent() + self.outdent(1 + (not self.has_known_extends)) + + # at this point we now have the blocks collected and can visit them too. + for name, block in self.blocks.items(): + self.writeline( + f"{self.func('block_' + name)}(context, missing=missing{envenv}):", + block, + 1, + ) + self.indent() + self.write_commons() + # It's important that we do not make this frame a child of the + # toplevel template. This would cause a variety of + # interesting issues with identifier tracking. + block_frame = Frame(eval_ctx) + block_frame.block_frame = True + undeclared = find_undeclared(block.body, ("self", "super")) + if "self" in undeclared: + ref = block_frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + if "super" in undeclared: + ref = block_frame.symbols.declare_parameter("super") + self.writeline(f"{ref} = context.super({name!r}, block_{name})") + block_frame.symbols.analyze_node(block) + block_frame.block = name + self.writeline("_block_vars = {}") + self.enter_frame(block_frame) + self.pull_dependencies(block.body) + self.blockvisit(block.body, block_frame) + self.leave_frame(block_frame, with_python_scope=True) + self.outdent() + + blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks) + self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1) + debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info) + self.writeline(f"debug_info = {debug_kv_str!r}") + + def visit_Block(self, node: nodes.Block, frame: Frame) -> None: + """Call a block and register it for the template.""" + level = 0 + if frame.toplevel: + # if we know that we are a child template, there is no need to + # check if we are one + if self.has_known_extends: + return + if self.extends_so_far > 0: + self.writeline("if parent_template is None:") + self.indent() + level += 1 + + if node.scoped: + context = self.derive_context(frame) + else: + context = self.get_context_ref() + + if node.required: + self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node) + self.indent() + self.writeline( + f'raise TemplateRuntimeError("Required block {node.name!r} not found")', + node, + ) + self.outdent() + + if not self.environment.is_async and frame.buffer is None: + self.writeline( + f"yield from context.blocks[{node.name!r}][0]({context})", node + ) + else: + self.writeline( + f"{self.choose_async()}for event in" + f" context.blocks[{node.name!r}][0]({context}):", + node, + ) + self.indent() + self.simple_write("event", frame) + self.outdent() + + self.outdent(level) + + def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None: + """Calls the extender.""" + if not frame.toplevel: + self.fail("cannot use extend from a non top-level scope", node.lineno) + + # if the number of extends statements in general is zero so + # far, we don't have to add a check if something extended + # the template before this one. + if self.extends_so_far > 0: + # if we have a known extends we just add a template runtime + # error into the generated code. We could catch that at compile + # time too, but i welcome it not to confuse users by throwing the + # same error at different times just "because we can". + if not self.has_known_extends: + self.writeline("if parent_template is not None:") + self.indent() + self.writeline('raise TemplateRuntimeError("extended multiple times")') + + # if we have a known extends already we don't need that code here + # as we know that the template execution will end here. + if self.has_known_extends: + raise CompilerExit() + else: + self.outdent() + + self.writeline("parent_template = environment.get_template(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + self.writeline("for name, parent_block in parent_template.blocks.items():") + self.indent() + self.writeline("context.blocks.setdefault(name, []).append(parent_block)") + self.outdent() + + # if this extends statement was in the root level we can take + # advantage of that information and simplify the generated code + # in the top level from this point onwards + if frame.rootlevel: + self.has_known_extends = True + + # and now we have one more + self.extends_so_far += 1 + + def visit_Include(self, node: nodes.Include, frame: Frame) -> None: + """Handles includes.""" + if node.ignore_missing: + self.writeline("try:") + self.indent() + + func_name = "get_or_select_template" + if isinstance(node.template, nodes.Const): + if isinstance(node.template.value, str): + func_name = "get_template" + elif isinstance(node.template.value, (tuple, list)): + func_name = "select_template" + elif isinstance(node.template, (nodes.Tuple, nodes.List)): + func_name = "select_template" + + self.writeline(f"template = environment.{func_name}(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + if node.ignore_missing: + self.outdent() + self.writeline("except TemplateNotFound:") + self.indent() + self.writeline("pass") + self.outdent() + self.writeline("else:") + self.indent() + + skip_event_yield = False + if node.with_context: + self.writeline( + f"{self.choose_async()}for event in template.root_render_func(" + "template.new_context(context.get_all(), True," + f" {self.dump_local_context(frame)})):" + ) + elif self.environment.is_async: + self.writeline( + "for event in (await template._get_default_module_async())" + "._body_stream:" + ) + else: + self.writeline("yield from template._get_default_module()._body_stream") + skip_event_yield = True + + if not skip_event_yield: + self.indent() + self.simple_write("event", frame) + self.outdent() + + if node.ignore_missing: + self.outdent() + + def _import_common( + self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame + ) -> None: + self.write(f"{self.choose_async('await ')}environment.get_template(") + self.visit(node.template, frame) + self.write(f", {self.name!r}).") + + if node.with_context: + f_name = f"make_module{self.choose_async('_async')}" + self.write( + f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})" + ) + else: + self.write(f"_get_default_module{self.choose_async('_async')}(context)") + + def visit_Import(self, node: nodes.Import, frame: Frame) -> None: + """Visit regular imports.""" + self.writeline(f"{frame.symbols.ref(node.target)} = ", node) + if frame.toplevel: + self.write(f"context.vars[{node.target!r}] = ") + + self._import_common(node, frame) + + if frame.toplevel and not node.target.startswith("_"): + self.writeline(f"context.exported_vars.discard({node.target!r})") + + def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None: + """Visit named imports.""" + self.newline(node) + self.write("included_template = ") + self._import_common(node, frame) + var_names = [] + discarded_names = [] + for name in node.names: + if isinstance(name, tuple): + name, alias = name + else: + alias = name + self.writeline( + f"{frame.symbols.ref(alias)} =" + f" getattr(included_template, {name!r}, missing)" + ) + self.writeline(f"if {frame.symbols.ref(alias)} is missing:") + self.indent() + message = ( + "the template {included_template.__name__!r}" + f" (imported on {self.position(node)})" + f" does not export the requested name {name!r}" + ) + self.writeline( + f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})" + ) + self.outdent() + if frame.toplevel: + var_names.append(alias) + if not alias.startswith("_"): + discarded_names.append(alias) + + if var_names: + if len(var_names) == 1: + name = var_names[0] + self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}") + else: + names_kv = ", ".join( + f"{name!r}: {frame.symbols.ref(name)}" for name in var_names + ) + self.writeline(f"context.vars.update({{{names_kv}}})") + if discarded_names: + if len(discarded_names) == 1: + self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})") + else: + names_str = ", ".join(map(repr, discarded_names)) + self.writeline( + f"context.exported_vars.difference_update(({names_str}))" + ) + + def visit_For(self, node: nodes.For, frame: Frame) -> None: + loop_frame = frame.inner() + loop_frame.loop_frame = True + test_frame = frame.inner() + else_frame = frame.inner() + + # try to figure out if we have an extended loop. An extended loop + # is necessary if the loop is in recursive mode if the special loop + # variable is accessed in the body if the body is a scoped block. + extended_loop = ( + node.recursive + or "loop" + in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",)) + or any(block.scoped for block in node.find_all(nodes.Block)) + ) + + loop_ref = None + if extended_loop: + loop_ref = loop_frame.symbols.declare_parameter("loop") + + loop_frame.symbols.analyze_node(node, for_branch="body") + if node.else_: + else_frame.symbols.analyze_node(node, for_branch="else") + + if node.test: + loop_filter_func = self.temporary_identifier() + test_frame.symbols.analyze_node(node, for_branch="test") + self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test) + self.indent() + self.enter_frame(test_frame) + self.writeline(self.choose_async("async for ", "for ")) + self.visit(node.target, loop_frame) + self.write(" in ") + self.write(self.choose_async("auto_aiter(fiter)", "fiter")) + self.write(":") + self.indent() + self.writeline("if ", node.test) + self.visit(node.test, test_frame) + self.write(":") + self.indent() + self.writeline("yield ") + self.visit(node.target, loop_frame) + self.outdent(3) + self.leave_frame(test_frame, with_python_scope=True) + + # if we don't have an recursive loop we have to find the shadowed + # variables at that point. Because loops can be nested but the loop + # variable is a special one we have to enforce aliasing for it. + if node.recursive: + self.writeline( + f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node + ) + self.indent() + self.buffer(loop_frame) + + # Use the same buffer for the else frame + else_frame.buffer = loop_frame.buffer + + # make sure the loop variable is a special one and raise a template + # assertion error if a loop tries to write to loop + if extended_loop: + self.writeline(f"{loop_ref} = missing") + + for name in node.find_all(nodes.Name): + if name.ctx == "store" and name.name == "loop": + self.fail( + "Can't assign to special loop variable in for-loop target", + name.lineno, + ) + + if node.else_: + iteration_indicator = self.temporary_identifier() + self.writeline(f"{iteration_indicator} = 1") + + self.writeline(self.choose_async("async for ", "for "), node) + self.visit(node.target, loop_frame) + if extended_loop: + self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(") + else: + self.write(" in ") + + if node.test: + self.write(f"{loop_filter_func}(") + if node.recursive: + self.write("reciter") + else: + if self.environment.is_async and not extended_loop: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async and not extended_loop: + self.write(")") + if node.test: + self.write(")") + + if node.recursive: + self.write(", undefined, loop_render_func, depth):") + else: + self.write(", undefined):" if extended_loop else ":") + + self.indent() + self.enter_frame(loop_frame) + + self.writeline("_loop_vars = {}") + self.blockvisit(node.body, loop_frame) + if node.else_: + self.writeline(f"{iteration_indicator} = 0") + self.outdent() + self.leave_frame( + loop_frame, with_python_scope=node.recursive and not node.else_ + ) + + if node.else_: + self.writeline(f"if {iteration_indicator}:") + self.indent() + self.enter_frame(else_frame) + self.blockvisit(node.else_, else_frame) + self.leave_frame(else_frame) + self.outdent() + + # if the node was recursive we have to return the buffer contents + # and start the iteration code + if node.recursive: + self.return_buffer_contents(loop_frame) + self.outdent() + self.start_write(frame, node) + self.write(f"{self.choose_async('await ')}loop(") + if self.environment.is_async: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async: + self.write(")") + self.write(", loop)") + self.end_write(frame) + + # at the end of the iteration, clear any assignments made in the + # loop from the top level + if self._assign_stack: + self._assign_stack[-1].difference_update(loop_frame.symbols.stores) + + def visit_If(self, node: nodes.If, frame: Frame) -> None: + if_frame = frame.soft() + self.writeline("if ", node) + self.visit(node.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(node.body, if_frame) + self.outdent() + for elif_ in node.elif_: + self.writeline("elif ", elif_) + self.visit(elif_.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(elif_.body, if_frame) + self.outdent() + if node.else_: + self.writeline("else:") + self.indent() + self.blockvisit(node.else_, if_frame) + self.outdent() + + def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None: + macro_frame, macro_ref = self.macro_body(node, frame) + self.newline() + if frame.toplevel: + if not node.name.startswith("_"): + self.write(f"context.exported_vars.add({node.name!r})") + self.writeline(f"context.vars[{node.name!r}] = ") + self.write(f"{frame.symbols.ref(node.name)} = ") + self.macro_def(macro_ref, macro_frame) + + def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None: + call_frame, macro_ref = self.macro_body(node, frame) + self.writeline("caller = ") + self.macro_def(macro_ref, call_frame) + self.start_write(frame, node) + self.visit_Call(node.call, frame, forward_caller=True) + self.end_write(frame) + + def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None: + filter_frame = frame.inner() + filter_frame.symbols.analyze_node(node) + self.enter_frame(filter_frame) + self.buffer(filter_frame) + self.blockvisit(node.body, filter_frame) + self.start_write(frame, node) + self.visit_Filter(node.filter, filter_frame) + self.end_write(frame) + self.leave_frame(filter_frame) + + def visit_With(self, node: nodes.With, frame: Frame) -> None: + with_frame = frame.inner() + with_frame.symbols.analyze_node(node) + self.enter_frame(with_frame) + for target, expr in zip(node.targets, node.values): + self.newline() + self.visit(target, with_frame) + self.write(" = ") + self.visit(expr, frame) + self.blockvisit(node.body, with_frame) + self.leave_frame(with_frame) + + def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None: + self.newline(node) + self.visit(node.node, frame) + + class _FinalizeInfo(t.NamedTuple): + const: t.Optional[t.Callable[..., str]] + src: t.Optional[str] + + @staticmethod + def _default_finalize(value: t.Any) -> t.Any: + """The default finalize function if the environment isn't + configured with one. Or, if the environment has one, this is + called on that function's output for constants. + """ + return str(value) + + _finalize: t.Optional[_FinalizeInfo] = None + + def _make_finalize(self) -> _FinalizeInfo: + """Build the finalize function to be used on constants and at + runtime. Cached so it's only created once for all output nodes. + + Returns a ``namedtuple`` with the following attributes: + + ``const`` + A function to finalize constant data at compile time. + + ``src`` + Source code to output around nodes to be evaluated at + runtime. + """ + if self._finalize is not None: + return self._finalize + + finalize: t.Optional[t.Callable[..., t.Any]] + finalize = default = self._default_finalize + src = None + + if self.environment.finalize: + src = "environment.finalize(" + env_finalize = self.environment.finalize + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(env_finalize) # type: ignore + ) + finalize = None + + if pass_arg is None: + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(value)) + + else: + src = f"{src}{pass_arg}, " + + if pass_arg == "environment": + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(self.environment, value)) + + self._finalize = self._FinalizeInfo(finalize, src) + return self._finalize + + def _output_const_repr(self, group: t.Iterable[t.Any]) -> str: + """Given a group of constant values converted from ``Output`` + child nodes, produce a string to write to the template module + source. + """ + return repr(concat(group)) + + def _output_child_to_const( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> str: + """Try to optimize a child of an ``Output`` node by trying to + convert it to constant, finalized data at compile time. + + If :exc:`Impossible` is raised, the node is not constant and + will be evaluated at runtime. Any other exception will also be + evaluated at runtime for easier debugging. + """ + const = node.as_const(frame.eval_ctx) + + if frame.eval_ctx.autoescape: + const = escape(const) + + # Template data doesn't go through finalize. + if isinstance(node, nodes.TemplateData): + return str(const) + + return finalize.const(const) # type: ignore + + def _output_child_pre( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code before visiting a child of an + ``Output`` node. + """ + if frame.eval_ctx.volatile: + self.write("(escape if context.eval_ctx.autoescape else str)(") + elif frame.eval_ctx.autoescape: + self.write("escape(") + else: + self.write("str(") + + if finalize.src is not None: + self.write(finalize.src) + + def _output_child_post( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code after visiting a child of an + ``Output`` node. + """ + self.write(")") + + if finalize.src is not None: + self.write(")") + + def visit_Output(self, node: nodes.Output, frame: Frame) -> None: + # If an extends is active, don't render outside a block. + if frame.require_output_check: + # A top-level extends is known to exist at compile time. + if self.has_known_extends: + return + + self.writeline("if parent_template is None:") + self.indent() + + finalize = self._make_finalize() + body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = [] + + # Evaluate constants at compile time if possible. Each item in + # body will be either a list of static data or a node to be + # evaluated at runtime. + for child in node.nodes: + try: + if not ( + # If the finalize function requires runtime context, + # constants can't be evaluated at compile time. + finalize.const + # Unless it's basic template data that won't be + # finalized anyway. + or isinstance(child, nodes.TemplateData) + ): + raise nodes.Impossible() + + const = self._output_child_to_const(child, frame, finalize) + except (nodes.Impossible, Exception): + # The node was not constant and needs to be evaluated at + # runtime. Or another error was raised, which is easier + # to debug at runtime. + body.append(child) + continue + + if body and isinstance(body[-1], list): + body[-1].append(const) + else: + body.append([const]) + + if frame.buffer is not None: + if len(body) == 1: + self.writeline(f"{frame.buffer}.append(") + else: + self.writeline(f"{frame.buffer}.extend((") + + self.indent() + + for item in body: + if isinstance(item, list): + # A group of constant data to join and output. + val = self._output_const_repr(item) + + if frame.buffer is None: + self.writeline("yield " + val) + else: + self.writeline(val + ",") + else: + if frame.buffer is None: + self.writeline("yield ", item) + else: + self.newline(item) + + # A node to be evaluated at runtime. + self._output_child_pre(item, frame, finalize) + self.visit(item, frame) + self._output_child_post(item, frame, finalize) + + if frame.buffer is not None: + self.write(",") + + if frame.buffer is not None: + self.outdent() + self.writeline(")" if len(body) == 1 else "))") + + if frame.require_output_check: + self.outdent() + + def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None: + self.push_assign_tracking() + self.newline(node) + self.visit(node.target, frame) + self.write(" = ") + self.visit(node.node, frame) + self.pop_assign_tracking(frame) + + def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None: + self.push_assign_tracking() + block_frame = frame.inner() + # This is a special case. Since a set block always captures we + # will disable output checks. This way one can use set blocks + # toplevel even in extended templates. + block_frame.require_output_check = False + block_frame.symbols.analyze_node(node) + self.enter_frame(block_frame) + self.buffer(block_frame) + self.blockvisit(node.body, block_frame) + self.newline(node) + self.visit(node.target, frame) + self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") + if node.filter is not None: + self.visit_Filter(node.filter, block_frame) + else: + self.write(f"concat({block_frame.buffer})") + self.write(")") + self.pop_assign_tracking(frame) + self.leave_frame(block_frame) + + # -- Expression Visitors + + def visit_Name(self, node: nodes.Name, frame: Frame) -> None: + if node.ctx == "store" and ( + frame.toplevel or frame.loop_frame or frame.block_frame + ): + if self._assign_stack: + self._assign_stack[-1].add(node.name) + ref = frame.symbols.ref(node.name) + + # If we are looking up a variable we might have to deal with the + # case where it's undefined. We can skip that case if the load + # instruction indicates a parameter which are always defined. + if node.ctx == "load": + load = frame.symbols.find_load(ref) + if not ( + load is not None + and load[0] == VAR_LOAD_PARAMETER + and not self.parameter_is_undeclared(ref) + ): + self.write( + f"(undefined(name={node.name!r}) if {ref} is missing else {ref})" + ) + return + + self.write(ref) + + def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None: + # NSRefs can only be used to store values; since they use the normal + # `foo.bar` notation they will be parsed as a normal attribute access + # when used anywhere but in a `set` context + ref = frame.symbols.ref(node.name) + self.writeline(f"if not isinstance({ref}, Namespace):") + self.indent() + self.writeline( + "raise TemplateRuntimeError" + '("cannot assign attribute on non-namespace object")' + ) + self.outdent() + self.writeline(f"{ref}[{node.attr!r}]") + + def visit_Const(self, node: nodes.Const, frame: Frame) -> None: + val = node.as_const(frame.eval_ctx) + if isinstance(val, float): + self.write(str(val)) + else: + self.write(repr(val)) + + def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None: + try: + self.write(repr(node.as_const(frame.eval_ctx))) + except nodes.Impossible: + self.write( + f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})" + ) + + def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None: + self.write("(") + idx = -1 + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write(",)" if idx == 0 else ")") + + def visit_List(self, node: nodes.List, frame: Frame) -> None: + self.write("[") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write("]") + + def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None: + self.write("{") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item.key, frame) + self.write(": ") + self.visit(item.value, frame) + self.write("}") + + visit_Add = _make_binop("+") + visit_Sub = _make_binop("-") + visit_Mul = _make_binop("*") + visit_Div = _make_binop("/") + visit_FloorDiv = _make_binop("//") + visit_Pow = _make_binop("**") + visit_Mod = _make_binop("%") + visit_And = _make_binop("and") + visit_Or = _make_binop("or") + visit_Pos = _make_unop("+") + visit_Neg = _make_unop("-") + visit_Not = _make_unop("not ") + + @optimizeconst + def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None: + if frame.eval_ctx.volatile: + func_name = "(markup_join if context.eval_ctx.volatile else str_join)" + elif frame.eval_ctx.autoescape: + func_name = "markup_join" + else: + func_name = "str_join" + self.write(f"{func_name}((") + for arg in node.nodes: + self.visit(arg, frame) + self.write(", ") + self.write("))") + + @optimizeconst + def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None: + self.write("(") + self.visit(node.expr, frame) + for op in node.ops: + self.visit(op, frame) + self.write(")") + + def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None: + self.write(f" {operators[node.op]} ") + self.visit(node.expr, frame) + + @optimizeconst + def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getattr(") + self.visit(node.node, frame) + self.write(f", {node.attr!r})") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None: + # slices bypass the environment getitem method. + if isinstance(node.arg, nodes.Slice): + self.visit(node.node, frame) + self.write("[") + self.visit(node.arg, frame) + self.write("]") + else: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getitem(") + self.visit(node.node, frame) + self.write(", ") + self.visit(node.arg, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None: + if node.start is not None: + self.visit(node.start, frame) + self.write(":") + if node.stop is not None: + self.visit(node.stop, frame) + if node.step is not None: + self.write(":") + self.visit(node.step, frame) + + @contextmanager + def _filter_test_common( + self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool + ) -> t.Iterator[None]: + if self.environment.is_async: + self.write("(await auto_await(") + + if is_filter: + self.write(f"{self.filters[node.name]}(") + func = self.environment.filters.get(node.name) + else: + self.write(f"{self.tests[node.name]}(") + func = self.environment.tests.get(node.name) + + # When inside an If or CondExpr frame, allow the filter to be + # undefined at compile time and only raise an error if it's + # actually called at runtime. See pull_dependencies. + if func is None and not frame.soft_frame: + type_name = "filter" if is_filter else "test" + self.fail(f"No {type_name} named {node.name!r}.", node.lineno) + + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(func) # type: ignore + ) + + if pass_arg is not None: + self.write(f"{pass_arg}, ") + + # Back to the visitor function to handle visiting the target of + # the filter or test. + yield + + self.signature(node, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None: + with self._filter_test_common(node, frame, True): + # if the filter node is None we are inside a filter block + # and want to write to the current buffer + if node.node is not None: + self.visit(node.node, frame) + elif frame.eval_ctx.volatile: + self.write( + f"(Markup(concat({frame.buffer}))" + f" if context.eval_ctx.autoescape else concat({frame.buffer}))" + ) + elif frame.eval_ctx.autoescape: + self.write(f"Markup(concat({frame.buffer}))") + else: + self.write(f"concat({frame.buffer})") + + @optimizeconst + def visit_Test(self, node: nodes.Test, frame: Frame) -> None: + with self._filter_test_common(node, frame, False): + self.visit(node.node, frame) + + @optimizeconst + def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None: + frame = frame.soft() + + def write_expr2() -> None: + if node.expr2 is not None: + self.visit(node.expr2, frame) + return + + self.write( + f'cond_expr_undefined("the inline if-expression on' + f" {self.position(node)} evaluated to false and no else" + f' section was defined.")' + ) + + self.write("(") + self.visit(node.expr1, frame) + self.write(" if ") + self.visit(node.test, frame) + self.write(" else ") + write_expr2() + self.write(")") + + @optimizeconst + def visit_Call( + self, node: nodes.Call, frame: Frame, forward_caller: bool = False + ) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + if self.environment.sandboxed: + self.write("environment.call(context, ") + else: + self.write("context.call(") + self.visit(node.node, frame) + extra_kwargs = {"caller": "caller"} if forward_caller else None + loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {} + block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {} + if extra_kwargs: + extra_kwargs.update(loop_kwargs, **block_kwargs) + elif loop_kwargs or block_kwargs: + extra_kwargs = dict(loop_kwargs, **block_kwargs) + self.signature(node, frame, extra_kwargs) + self.write(")") + if self.environment.is_async: + self.write("))") + + def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None: + self.write(node.key + "=") + self.visit(node.value, frame) + + # -- Unused nodes for extensions + + def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None: + self.write("Markup(") + self.visit(node.expr, frame) + self.write(")") + + def visit_MarkSafeIfAutoescape( + self, node: nodes.MarkSafeIfAutoescape, frame: Frame + ) -> None: + self.write("(Markup if context.eval_ctx.autoescape else identity)(") + self.visit(node.expr, frame) + self.write(")") + + def visit_EnvironmentAttribute( + self, node: nodes.EnvironmentAttribute, frame: Frame + ) -> None: + self.write("environment." + node.name) + + def visit_ExtensionAttribute( + self, node: nodes.ExtensionAttribute, frame: Frame + ) -> None: + self.write(f"environment.extensions[{node.identifier!r}].{node.name}") + + def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None: + self.write(self.import_aliases[node.importname]) + + def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None: + self.write(node.name) + + def visit_ContextReference( + self, node: nodes.ContextReference, frame: Frame + ) -> None: + self.write("context") + + def visit_DerivedContextReference( + self, node: nodes.DerivedContextReference, frame: Frame + ) -> None: + self.write(self.derive_context(frame)) + + def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None: + self.writeline("continue", node) + + def visit_Break(self, node: nodes.Break, frame: Frame) -> None: + self.writeline("break", node) + + def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None: + scope_frame = frame.inner() + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + + def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None: + ctx = self.temporary_identifier() + self.writeline(f"{ctx} = {self.derive_context(frame)}") + self.writeline(f"{ctx}.vars = ") + self.visit(node.context, frame) + self.push_context_reference(ctx) + + scope_frame = frame.inner(isolated=True) + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + self.pop_context_reference() + + def visit_EvalContextModifier( + self, node: nodes.EvalContextModifier, frame: Frame + ) -> None: + for keyword in node.options: + self.writeline(f"context.eval_ctx.{keyword.key} = ") + self.visit(keyword.value, frame) + try: + val = keyword.value.as_const(frame.eval_ctx) + except nodes.Impossible: + frame.eval_ctx.volatile = True + else: + setattr(frame.eval_ctx, keyword.key, val) + + def visit_ScopedEvalContextModifier( + self, node: nodes.ScopedEvalContextModifier, frame: Frame + ) -> None: + old_ctx_name = self.temporary_identifier() + saved_ctx = frame.eval_ctx.save() + self.writeline(f"{old_ctx_name} = context.eval_ctx.save()") + self.visit_EvalContextModifier(node, frame) + for child in node.body: + self.visit(child, frame) + frame.eval_ctx.revert(saved_ctx) + self.writeline(f"context.eval_ctx.revert({old_ctx_name})") diff --git a/MLPY/Lib/site-packages/jinja2/constants.py b/MLPY/Lib/site-packages/jinja2/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..41a1c23b0a7fe134b1f662545876eb65b31b071e --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/constants.py @@ -0,0 +1,20 @@ +#: list of lorem ipsum words used by the lipsum() helper function +LOREM_IPSUM_WORDS = """\ +a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at +auctor augue bibendum blandit class commodo condimentum congue consectetuer +consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend +elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames +faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac +hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum +justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem +luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie +mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non +nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque +penatibus per pharetra phasellus placerat platea porta porttitor posuere +potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus +ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit +sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor +tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices +ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus +viverra volutpat vulputate""" diff --git a/MLPY/Lib/site-packages/jinja2/debug.py b/MLPY/Lib/site-packages/jinja2/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..7ed7e9297e01b87c4e999d19d48a4265b38b574f --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/debug.py @@ -0,0 +1,191 @@ +import sys +import typing as t +from types import CodeType +from types import TracebackType + +from .exceptions import TemplateSyntaxError +from .utils import internal_code +from .utils import missing + +if t.TYPE_CHECKING: + from .runtime import Context + + +def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException: + """Rewrite the current exception to replace any tracebacks from + within compiled template code with tracebacks that look like they + came from the template source. + + This must be called within an ``except`` block. + + :param source: For ``TemplateSyntaxError``, the original source if + known. + :return: The original exception with the rewritten traceback. + """ + _, exc_value, tb = sys.exc_info() + exc_value = t.cast(BaseException, exc_value) + tb = t.cast(TracebackType, tb) + + if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: + exc_value.translated = True + exc_value.source = source + # Remove the old traceback, otherwise the frames from the + # compiler still show up. + exc_value.with_traceback(None) + # Outside of runtime, so the frame isn't executing template + # code, but it still needs to point at the template. + tb = fake_traceback( + exc_value, None, exc_value.filename or "", exc_value.lineno + ) + else: + # Skip the frame for the render function. + tb = tb.tb_next + + stack = [] + + # Build the stack of traceback object, replacing any in template + # code with the source file and line information. + while tb is not None: + # Skip frames decorated with @internalcode. These are internal + # calls that aren't useful in template debugging output. + if tb.tb_frame.f_code in internal_code: + tb = tb.tb_next + continue + + template = tb.tb_frame.f_globals.get("__jinja_template__") + + if template is not None: + lineno = template.get_corresponding_lineno(tb.tb_lineno) + fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + tb_next = None + + # Assign tb_next in reverse to avoid circular references. + for tb in reversed(stack): + tb.tb_next = tb_next + tb_next = tb + + return exc_value.with_traceback(tb_next) + + +def fake_traceback( # type: ignore + exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int +) -> TracebackType: + """Produce a new traceback object that looks like it came from the + template source instead of the compiled code. The filename, line + number, and location name will point to the template, and the local + variables will be the current template context. + + :param exc_value: The original exception to be re-raised to create + the new traceback. + :param tb: The original traceback to get the local variables and + code info from. + :param filename: The template filename. + :param lineno: The line number in the template source. + """ + if tb is not None: + # Replace the real locals with the context that would be + # available at that point in the template. + locals = get_template_locals(tb.tb_frame.f_locals) + locals.pop("__jinja_exception__", None) + else: + locals = {} + + globals = { + "__name__": filename, + "__file__": filename, + "__jinja_exception__": exc_value, + } + # Raise an exception at the correct line number. + code: CodeType = compile( + "\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec" + ) + + # Build a new code object that points to the template file and + # replaces the location with a block name. + location = "template" + + if tb is not None: + function = tb.tb_frame.f_code.co_name + + if function == "root": + location = "top-level template code" + elif function.startswith("block_"): + location = f"block {function[6:]!r}" + + if sys.version_info >= (3, 8): + code = code.replace(co_name=location) + else: + code = CodeType( + code.co_argcount, + code.co_kwonlyargcount, + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + code.co_consts, + code.co_names, + code.co_varnames, + code.co_filename, + location, + code.co_firstlineno, + code.co_lnotab, + code.co_freevars, + code.co_cellvars, + ) + + # Execute the new code, which is guaranteed to raise, and return + # the new traceback without this frame. + try: + exec(code, globals, locals) + except BaseException: + return sys.exc_info()[2].tb_next # type: ignore + + +def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]: + """Based on the runtime locals, get the context that would be + available at that point in the template. + """ + # Start with the current template context. + ctx: "t.Optional[Context]" = real_locals.get("context") + + if ctx is not None: + data: t.Dict[str, t.Any] = ctx.get_all().copy() + else: + data = {} + + # Might be in a derived context that only sets local variables + # rather than pushing a context. Local variables follow the scheme + # l_depth_name. Find the highest-depth local that has a value for + # each name. + local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {} + + for name, value in real_locals.items(): + if not name.startswith("l_") or value is missing: + # Not a template variable, or no longer relevant. + continue + + try: + _, depth_str, name = name.split("_", 2) + depth = int(depth_str) + except ValueError: + continue + + cur_depth = local_overrides.get(name, (-1,))[0] + + if cur_depth < depth: + local_overrides[name] = (depth, value) + + # Modify the context with any derived context. + for name, (_, value) in local_overrides.items(): + if value is missing: + data.pop(name, None) + else: + data[name] = value + + return data diff --git a/MLPY/Lib/site-packages/jinja2/defaults.py b/MLPY/Lib/site-packages/jinja2/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..638cad3d2d8907330bde56e2b76c9b185c523b45 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/defaults.py @@ -0,0 +1,48 @@ +import typing as t + +from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 +from .tests import TESTS as DEFAULT_TESTS # noqa: F401 +from .utils import Cycler +from .utils import generate_lorem_ipsum +from .utils import Joiner +from .utils import Namespace + +if t.TYPE_CHECKING: + import typing_extensions as te + +# defaults for the parser / lexer +BLOCK_START_STRING = "{%" +BLOCK_END_STRING = "%}" +VARIABLE_START_STRING = "{{" +VARIABLE_END_STRING = "}}" +COMMENT_START_STRING = "{#" +COMMENT_END_STRING = "#}" +LINE_STATEMENT_PREFIX: t.Optional[str] = None +LINE_COMMENT_PREFIX: t.Optional[str] = None +TRIM_BLOCKS = False +LSTRIP_BLOCKS = False +NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n" +KEEP_TRAILING_NEWLINE = False + +# default filters, tests and namespace + +DEFAULT_NAMESPACE = { + "range": range, + "dict": dict, + "lipsum": generate_lorem_ipsum, + "cycler": Cycler, + "joiner": Joiner, + "namespace": Namespace, +} + +# default policies +DEFAULT_POLICIES: t.Dict[str, t.Any] = { + "compiler.ascii_str": True, + "urlize.rel": "noopener", + "urlize.target": None, + "urlize.extra_schemes": None, + "truncate.leeway": 5, + "json.dumps_function": None, + "json.dumps_kwargs": {"sort_keys": True}, + "ext.i18n.trimmed": False, +} diff --git a/MLPY/Lib/site-packages/jinja2/environment.py b/MLPY/Lib/site-packages/jinja2/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3be0bed08d710f2a93079d62552d63fb09b627 --- /dev/null +++ b/MLPY/Lib/site-packages/jinja2/environment.py @@ -0,0 +1,1675 @@ +"""Classes for managing templates and their runtime and compile time +options. +""" + +import os +import typing +import typing as t +import weakref +from collections import ChainMap +from functools import lru_cache +from functools import partial +from functools import reduce +from types import CodeType + +from markupsafe import Markup + +from . import nodes +from .compiler import CodeGenerator +from .compiler import generate +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import DEFAULT_FILTERS # type: ignore[attr-defined] +from .defaults import DEFAULT_NAMESPACE +from .defaults import DEFAULT_POLICIES +from .defaults import DEFAULT_TESTS # type: ignore[attr-defined] +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .lexer import get_lexer +from .lexer import Lexer +from .lexer import TokenStream +from .nodes import EvalContext +from .parser import Parser +from .runtime import Context +from .runtime import new_context +from .runtime import Undefined +from .utils import _PassArg +from .utils import concat +from .utils import consume +from .utils import import_string +from .utils import internalcode +from .utils import LRUCache +from .utils import missing + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .bccache import BytecodeCache + from .ext import Extension + from .loaders import BaseLoader + +_env_bound = t.TypeVar("_env_bound", bound="Environment") + + +# for direct template usage we have up to ten living environments +@lru_cache(maxsize=10) +def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound: + """Return a new spontaneous environment. A spontaneous environment + is used for templates created directly rather than through an + existing environment. + + :param cls: Environment class to create. + :param args: Positional arguments passed to environment. + """ + env = cls(*args) + env.shared = True + return env + + +def create_cache( + size: int, +) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[t.Any]", str], "Template"]]: + """Return the cache class for the given size.""" + if size == 0: + return None + + if size < 0: + return {} + + return LRUCache(size) # type: ignore + + +def copy_cache( + cache: t.Optional[t.MutableMapping[t.Any, t.Any]], +) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[t.Any]", str], "Template"]]: + """Create an empty copy of the given cache.""" + if cache is None: + return None + + if type(cache) is dict: # noqa E721 + return {} + + return LRUCache(cache.capacity) # type: ignore + + +def load_extensions( + environment: "Environment", + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]], +) -> t.Dict[str, "Extension"]: + """Load the extensions from the list and bind it to the environment. + Returns a dict of instantiated extensions. + """ + result = {} + + for extension in extensions: + if isinstance(extension, str): + extension = t.cast(t.Type["Extension"], import_string(extension)) + + result[extension.identifier] = extension(environment) + + return result + + +def _environment_config_check(environment: "Environment") -> "Environment": + """Perform a sanity check on the environment.""" + assert issubclass( + environment.undefined, Undefined + ), "'undefined' must be a subclass of 'jinja2.Undefined'." + assert ( + environment.block_start_string + != environment.variable_start_string + != environment.comment_start_string + ), "block, variable and comment start strings must be different." + assert environment.newline_sequence in { + "\r", + "\r\n", + "\n", + }, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'." + return environment + + +class Environment: + r"""The core component of Jinja is the `Environment`. It contains + important shared variables like configuration, filters, tests, + globals and others. Instances of this class may be modified if + they are not shared and if no template was loaded so far. + Modifications on environments after the first template was loaded + will lead to surprising effects and undefined behavior. + + Here are the possible initialization parameters: + + `block_start_string` + The string marking the beginning of a block. Defaults to ``'{%'``. + + `block_end_string` + The string marking the end of a block. Defaults to ``'%}'``. + + `variable_start_string` + The string marking the beginning of a print statement. + Defaults to ``'{{'``. + + `variable_end_string` + The string marking the end of a print statement. Defaults to + ``'}}'``. + + `comment_start_string` + The string marking the beginning of a comment. Defaults to ``'{#'``. + + `comment_end_string` + The string marking the end of a comment. Defaults to ``'#}'``. + + `line_statement_prefix` + If given and a string, this will be used as prefix for line based + statements. See also :ref:`line-statements`. + + `line_comment_prefix` + If given and a string, this will be used as prefix for line based + comments. See also :ref:`line-statements`. + + .. versionadded:: 2.2 + + `trim_blocks` + If this is set to ``True`` the first newline after a block is + removed (block, not variable tag!). Defaults to `False`. + + `lstrip_blocks` + If this is set to ``True`` leading spaces and tabs are stripped + from the start of a line to a block. Defaults to `False`. + + `newline_sequence` + The sequence that starts a newline. Must be one of ``'\r'``, + ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a + useful default for Linux and OS X systems as well as web + applications. + + `keep_trailing_newline` + Preserve the trailing newline when rendering templates. + The default is ``False``, which causes a single newline, + if present, to be stripped from the end of the template. + + .. versionadded:: 2.7 + + `extensions` + List of Jinja extensions to use. This can either be import paths + as strings or extension classes. For more information have a + look at :ref:`the extensions documentation `. + + `optimized` + should the optimizer be enabled? Default is ``True``. + + `undefined` + :class:`Undefined` or a subclass of it that is used to represent + undefined values in the template. + + `finalize` + A callable that can be used to process the result of a variable + expression before it is output. For example one can convert + ``None`` implicitly into an empty string here. + + `autoescape` + If set to ``True`` the XML/HTML autoescaping feature is enabled by + default. For more details about autoescaping see + :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also + be a callable that is passed the template name and has to + return ``True`` or ``False`` depending on autoescape should be + enabled by default. + + .. versionchanged:: 2.4 + `autoescape` can now be a function + + `loader` + The template loader for this environment. + + `cache_size` + The size of the cache. Per default this is ``400`` which means + that if more than 400 templates are loaded the loader will clean + out the least recently used template. If the cache size is set to + ``0`` templates are recompiled all the time, if the cache size is + ``-1`` the cache will not be cleaned. + + .. versionchanged:: 2.8 + The cache size was increased to 400 from a low 50. + + `auto_reload` + Some loaders load templates from locations where the template + sources may change (ie: file system or database). If + ``auto_reload`` is set to ``True`` (default) every time a template is + requested the loader checks if the source changed and if yes, it + will reload the template. For higher performance it's possible to + disable that. + + `bytecode_cache` + If set to a bytecode cache object, this object will provide a + cache for the internal Jinja bytecode so that templates don't + have to be parsed if they were not changed. + + See :ref:`bytecode-cache` for more information. + + `enable_async` + If set to true this enables async template execution which + allows using async functions and generators. + """ + + #: if this environment is sandboxed. Modifying this variable won't make + #: the environment sandboxed though. For a real sandboxed environment + #: have a look at jinja2.sandbox. This flag alone controls the code + #: generation by the compiler. + sandboxed = False + + #: True if the environment is just an overlay + overlayed = False + + #: the environment this environment is linked to if it is an overlay + linked_to: t.Optional["Environment"] = None + + #: shared environments have this set to `True`. A shared environment + #: must not be modified + shared = False + + #: the class that is used for code generation. See + #: :class:`~jinja2.compiler.CodeGenerator` for more information. + code_generator_class: t.Type["CodeGenerator"] = CodeGenerator + + concat = "".join + + #: the context class that is used for templates. See + #: :class:`~jinja2.runtime.Context` for more information. + context_class: t.Type[Context] = Context + + template_class: t.Type["Template"] + + def __init__( + self, + block_start_string: str = BLOCK_START_STRING, + block_end_string: str = BLOCK_END_STRING, + variable_start_string: str = VARIABLE_START_STRING, + variable_end_string: str = VARIABLE_END_STRING, + comment_start_string: str = COMMENT_START_STRING, + comment_end_string: str = COMMENT_END_STRING, + line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX, + line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX, + trim_blocks: bool = TRIM_BLOCKS, + lstrip_blocks: bool = LSTRIP_BLOCKS, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE, + keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (), + optimized: bool = True, + undefined: t.Type[Undefined] = Undefined, + finalize: t.Optional[t.Callable[..., t.Any]] = None, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False, + loader: t.Optional["BaseLoader"] = None, + cache_size: int = 400, + auto_reload: bool = True, + bytecode_cache: t.Optional["BytecodeCache"] = None, + enable_async: bool = False, + ): + # !!Important notice!! + # The constructor accepts quite a few arguments that should be + # passed by keyword rather than position. However it's important to + # not change the order of arguments because it's used at least + # internally in those cases: + # - spontaneous environments (i18n extension and Template) + # - unittests + # If parameter changes are required only add parameters at the end + # and don't change the arguments (or the defaults!) of the arguments + # existing already. + + # lexer / parser information + self.block_start_string = block_start_string + self.block_end_string = block_end_string + self.variable_start_string = variable_start_string + self.variable_end_string = variable_end_string + self.comment_start_string = comment_start_string + self.comment_end_string = comment_end_string + self.line_statement_prefix = line_statement_prefix + self.line_comment_prefix = line_comment_prefix + self.trim_blocks = trim_blocks + self.lstrip_blocks = lstrip_blocks + self.newline_sequence = newline_sequence + self.keep_trailing_newline = keep_trailing_newline + + # runtime information + self.undefined: t.Type[Undefined] = undefined + self.optimized = optimized + self.finalize = finalize + self.autoescape = autoescape + + # defaults + self.filters = DEFAULT_FILTERS.copy() + self.tests = DEFAULT_TESTS.copy() + self.globals = DEFAULT_NAMESPACE.copy() + + # set the loader provided + self.loader = loader + self.cache = create_cache(cache_size) + self.bytecode_cache = bytecode_cache + self.auto_reload = auto_reload + + # configurable policies + self.policies = DEFAULT_POLICIES.copy() + + # load extensions + self.extensions = load_extensions(self, extensions) + + self.is_async = enable_async + _environment_config_check(self) + + def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None: + """Adds an extension after the environment was created. + + .. versionadded:: 2.5 + """ + self.extensions.update(load_extensions(self, [extension])) + + def extend(self, **attributes: t.Any) -> None: + """Add the items to the instance of the environment if they do not exist + yet. This is used by :ref:`extensions ` to register + callbacks and configuration values without breaking inheritance. + """ + for key, value in attributes.items(): + if not hasattr(self, key): + setattr(self, key, value) + + def overlay( + self, + block_start_string: str = missing, + block_end_string: str = missing, + variable_start_string: str = missing, + variable_end_string: str = missing, + comment_start_string: str = missing, + comment_end_string: str = missing, + line_statement_prefix: t.Optional[str] = missing, + line_comment_prefix: t.Optional[str] = missing, + trim_blocks: bool = missing, + lstrip_blocks: bool = missing, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = missing, + keep_trailing_newline: bool = missing, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing, + optimized: bool = missing, + undefined: t.Type[Undefined] = missing, + finalize: t.Optional[t.Callable[..., t.Any]] = missing, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing, + loader: t.Optional["BaseLoader"] = missing, + cache_size: int = missing, + auto_reload: bool = missing, + bytecode_cache: t.Optional["BytecodeCache"] = missing, + enable_async: bool = False, + ) -> "Environment": + """Create a new overlay environment that shares all the data with the + current environment except for cache and the overridden attributes. + Extensions cannot be removed for an overlayed environment. An overlayed + environment automatically gets all the extensions of the environment it + is linked to plus optional extra extensions. + + Creating overlays should happen after the initial environment was set + up completely. Not all attributes are truly linked, some are just + copied over so modifications on the original environment may not shine + through. + + .. versionchanged:: 3.1.2 + Added the ``newline_sequence``,, ``keep_trailing_newline``, + and ``enable_async`` parameters to match ``__init__``. + """ + args = dict(locals()) + del args["self"], args["cache_size"], args["extensions"], args["enable_async"] + + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.overlayed = True + rv.linked_to = self + + for key, value in args.items(): + if value is not missing: + setattr(rv, key, value) + + if cache_size is not missing: + rv.cache = create_cache(cache_size) + else: + rv.cache = copy_cache(self.cache) + + rv.extensions = {} + for key, value in self.extensions.items(): + rv.extensions[key] = value.bind(rv) + if extensions is not missing: + rv.extensions.update(load_extensions(rv, extensions)) + + if enable_async is not missing: + rv.is_async = enable_async + + return _environment_config_check(rv) + + @property + def lexer(self) -> Lexer: + """The lexer for this environment.""" + return get_lexer(self) + + def iter_extensions(self) -> t.Iterator["Extension"]: + """Iterates over the extensions by priority.""" + return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) + + def getitem( + self, obj: t.Any, argument: t.Union[str, t.Any] + ) -> t.Union[t.Any, Undefined]: + """Get an item or attribute of an object but prefer the item.""" + try: + return obj[argument] + except (AttributeError, TypeError, LookupError): + if isinstance(argument, str): + try: + attr = str(argument) + except Exception: + pass + else: + try: + return getattr(obj, attr) + except AttributeError: + pass + return self.undefined(obj=obj, name=argument) + + def getattr(self, obj: t.Any, attribute: str) -> t.Any: + """Get an item or attribute of an object but prefer the attribute. + Unlike :meth:`getitem` the attribute *must* be a string. + """ + try: + return getattr(obj, attribute) + except AttributeError: + pass + try: + return obj[attribute] + except (TypeError, LookupError, AttributeError): + return self.undefined(obj=obj, name=attribute) + + def _filter_test_common( + self, + name: t.Union[str, Undefined], + value: t.Any, + args: t.Optional[t.Sequence[t.Any]], + kwargs: t.Optional[t.Mapping[str, t.Any]], + context: t.Optional[Context], + eval_ctx: t.Optional[EvalContext], + is_filter: bool, + ) -> t.Any: + if is_filter: + env_map = self.filters + type_name = "filter" + else: + env_map = self.tests + type_name = "test" + + func = env_map.get(name) # type: ignore + + if func is None: + msg = f"No {type_name} named {name!r}." + + if isinstance(name, Undefined): + try: + name._fail_with_undefined_error() + except Exception as e: + msg = f"{msg} ({e}; did you forget to quote the callable name?)" + + raise TemplateRuntimeError(msg) + + args = [value, *(args if args is not None else ())] + kwargs = kwargs if kwargs is not None else {} + pass_arg = _PassArg.from_obj(func) + + if pass_arg is _PassArg.context: + if context is None: + raise TemplateRuntimeError( + f"Attempted to invoke a context {type_name} without context." + ) + + args.insert(0, context) + elif pass_arg is _PassArg.eval_context: + if eval_ctx is None: + if context is not None: + eval_ctx = context.eval_ctx + else: + eval_ctx = EvalContext(self) + + args.insert(0, eval_ctx) + elif pass_arg is _PassArg.environment: + args.insert(0, self) + + return func(*args, **kwargs) + + def call_filter( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a filter on a value the same way the compiler does. + + This might return a coroutine if the filter is running from an + environment in async mode and the filter supports async + execution. It's your responsibility to await this if needed. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, True + ) + + def call_test( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a test on a value the same way the compiler does. + + This might return a coroutine if the test is running from an + environment in async mode and the test supports async execution. + It's your responsibility to await this if needed. + + .. versionchanged:: 3.0 + Tests support ``@pass_context``, etc. decorators. Added + the ``context`` and ``eval_ctx`` parameters. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, False + ) + + @internalcode + def parse( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> nodes.Template: + """Parse the sourcecode and return the abstract syntax tree. This + tree of nodes is used by the compiler to convert the template into + executable source- or bytecode. This is useful for debugging or to + extract information from templates. + + If you are :ref:`developing Jinja extensions ` + this gives you a good overview of the node tree generated. + """ + try: + return self._parse(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def _parse( + self, source: str, name: t.Optional[str], filename: t.Optional[str] + ) -> nodes.Template: + """Internal parsing function used by `parse` and `compile`.""" + return Parser(self, source, name, filename).parse() + + def lex( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> t.Iterator[t.Tuple[int, str, str]]: + """Lex the given sourcecode and return a generator that yields + tokens as tuples in the form ``(lineno, token_type, value)``. + This can be useful for :ref:`extension development ` + and debugging templates. + + This does not perform preprocessing. If you want the preprocessing + of the extensions to be applied you have to filter source through + the :meth:`preprocess` method. + """ + source = str(source) + try: + return self.lexer.tokeniter(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def preprocess( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> str: + """Preprocesses the source with all extensions. This is automatically + called for all parsing and compiling methods but *not* for :meth:`lex` + because there you usually only want the actual source tokenized. + """ + return reduce( + lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), + str(source), + ) + + def _tokenize( + self, + source: str, + name: t.Optional[str], + filename: t.Optional[str] = None, + state: t.Optional[str] = None, + ) -> TokenStream: + """Called by the parser to do the preprocessing and filtering + for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. + """ + source = self.preprocess(source, name, filename) + stream = self.lexer.tokenize(source, name, filename, state) + + for ext in self.iter_extensions(): + stream = ext.filter_stream(stream) # type: ignore + + if not isinstance(stream, TokenStream): + stream = TokenStream(stream, name, filename) + + return stream + + def _generate( + self, + source: nodes.Template, + name: t.Optional[str], + filename: t.Optional[str], + defer_init: bool = False, + ) -> str: + """Internal hook that can be overridden to hook a different generate + method in. + + .. versionadded:: 2.5 + """ + return generate( # type: ignore + source, + self, + name, + filename, + defer_init=defer_init, + optimized=self.optimized, + ) + + def _compile(self, source: str, filename: str) -> CodeType: + """Internal hook that can be overridden to hook a different compile + method in. + + .. versionadded:: 2.5 + """ + return compile(source, filename, "exec") + + @typing.overload + def compile( # type: ignore + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[False]" = False, + defer_init: bool = False, + ) -> CodeType: ... + + @typing.overload + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[True]" = ..., + defer_init: bool = False, + ) -> str: ... + + @internalcode + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: bool = False, + defer_init: bool = False, + ) -> t.Union[str, CodeType]: + """Compile a node or template source code. The `name` parameter is + the load name of the template after it was joined using + :meth:`join_path` if necessary, not the filename on the file system. + the `filename` parameter is the estimated filename of the template on + the file system. If the template came from a database or memory this + can be omitted. + + The return value of this method is a python code object. If the `raw` + parameter is `True` the return value will be a string with python + code equivalent to the bytecode returned otherwise. This method is + mainly used internally. + + `defer_init` is use internally to aid the module code generator. This + causes the generated code to be able to import without the global + environment variable to be set. + + .. versionadded:: 2.4 + `defer_init` parameter added. + """ + source_hint = None + try: + if isinstance(source, str): + source_hint = source + source = self._parse(source, name, filename) + source = self._generate(source, name, filename, defer_init=defer_init) + if raw: + return source + if filename is None: + filename = "