diff --git a/.gitattributes b/.gitattributes index 983a95884fdc730a566667fa7144bf22dc8b3485..e98601997668f313b1d6b5a29dfba6712c3e2326 100644 --- a/.gitattributes +++ b/.gitattributes @@ -75,3 +75,5 @@ MLPY/Library/bin/mkl_vml_mc3.1.dll filter=lfs diff=lfs merge=lfs -text MLPY/Library/bin/omptarget.rtl.level0.dll filter=lfs diff=lfs merge=lfs -text MLPY/Library/bin/omptarget.rtl.opencl.dll filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/PyWin32.chm filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text diff --git a/MLPY/Lib/site-packages/__pycache__/isympy.cpython-39.pyc b/MLPY/Lib/site-packages/__pycache__/isympy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2d7701910b2786f83629762d8de9619925f34ad Binary files /dev/null and b/MLPY/Lib/site-packages/__pycache__/isympy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/__pycache__/pythoncom.cpython-39.pyc b/MLPY/Lib/site-packages/__pycache__/pythoncom.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f5d7221f52b497a30dcb4aaeb823bafaaf16ca3 Binary files /dev/null and b/MLPY/Lib/site-packages/__pycache__/pythoncom.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/__pycache__/six.cpython-39.pyc b/MLPY/Lib/site-packages/__pycache__/six.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40dedbf1f39495de486ee7113376e7edb915d7fe Binary files /dev/null and b/MLPY/Lib/site-packages/__pycache__/six.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/__pycache__/typing_extensions.cpython-39.pyc b/MLPY/Lib/site-packages/__pycache__/typing_extensions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..147e596573af37f5a669379758ef67e527d61b3f Binary files /dev/null and b/MLPY/Lib/site-packages/__pycache__/typing_extensions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/_distutils_hack/__init__.py b/MLPY/Lib/site-packages/_distutils_hack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f40996a67efe9e38a6b68242efc2f10fc89e471 --- /dev/null +++ b/MLPY/Lib/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,128 @@ +import sys +import os +import re +import importlib +import warnings + + +is_pypy = '__pypy__' in sys.builtin_module_names + + +warnings.filterwarnings('ignore', + r'.+ distutils\b.+ deprecated', + DeprecationWarning) + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils.") + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + warnings.warn("Setuptools is replacing distutils.") + mods = [name for name in sys.modules if re.match(r'distutils\b', name)] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') + return which == 'local' + + +def ensure_local_distutils(): + clear_distutils() + distutils = importlib.import_module('setuptools._distutils') + distutils.__name__ = 'distutils' + sys.modules['distutils'] = distutils + + # sanity check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + if path is not None: + return + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + import importlib.abc + import importlib.util + + class DistutilsLoader(importlib.abc.Loader): + + def create_module(self, spec): + return importlib.import_module('setuptools._distutils') + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader('distutils', DistutilsLoader()) + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @staticmethod + def pip_imported_during_build(): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + return any( + frame.f_globals['__file__'].endswith('setup.py') + for frame, line in traceback.walk_stack(None) + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass diff --git a/MLPY/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e79d905142ce6ece63e09ce7c299f774234b4d Binary files /dev/null and b/MLPY/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-39.pyc b/MLPY/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec62c89c3a39afe2afcd56c1b835a7e5c1d7cecc Binary files /dev/null and b/MLPY/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/_distutils_hack/override.py b/MLPY/Lib/site-packages/_distutils_hack/override.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc433a4a55e3b41fa31089918fb62096092f89f --- /dev/null +++ b/MLPY/Lib/site-packages/_distutils_hack/override.py @@ -0,0 +1 @@ +__import__('_distutils_hack').do_override() diff --git a/MLPY/Lib/site-packages/_yaml/__init__.py b/MLPY/Lib/site-packages/_yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694 --- /dev/null +++ b/MLPY/Lib/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/MLPY/Lib/site-packages/_yaml/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/_yaml/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d64b7220644444e1c9b272ba65c4e0b39c3ae2 Binary files /dev/null and b/MLPY/Lib/site-packages/_yaml/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/__init__.py b/MLPY/Lib/site-packages/absl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bd1cd51810385ca0e5e9fed3fb9a804febf27e --- /dev/null +++ b/MLPY/Lib/site-packages/absl/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/absl/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/absl/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d15d97453dcbf781e68f7c1b9dac0928ef82e359 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/__pycache__/app.cpython-39.pyc b/MLPY/Lib/site-packages/absl/__pycache__/app.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3a63c9e407aeffdc7f3161d9a0ccfbac165cad0 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/__pycache__/app.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/__pycache__/command_name.cpython-39.pyc b/MLPY/Lib/site-packages/absl/__pycache__/command_name.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb5693f8856e1066c6b21298ee8c4cb73503148b Binary files /dev/null and b/MLPY/Lib/site-packages/absl/__pycache__/command_name.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/app.py b/MLPY/Lib/site-packages/absl/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d12397b31a9093dd0f9a8622c72a16d31fcb4fa9 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/app.py @@ -0,0 +1,480 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic entry point for Abseil Python applications. + +To use this module, define a ``main`` function with a single ``argv`` argument +and call ``app.run(main)``. For example:: + + def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + if __name__ == '__main__': + app.run(main) +""" + +import collections +import errno +import os +import pdb +import sys +import textwrap +import traceback + +from absl import command_name +from absl import flags +from absl import logging + +try: + import faulthandler +except ImportError: + faulthandler = None + +FLAGS = flags.FLAGS + +flags.DEFINE_boolean('run_with_pdb', False, 'Set to true for PDB debug mode') +flags.DEFINE_boolean('pdb_post_mortem', False, + 'Set to true to handle uncaught exceptions with PDB ' + 'post mortem.') +flags.DEFINE_alias('pdb', 'pdb_post_mortem') +flags.DEFINE_boolean('run_with_profiling', False, + 'Set to true for profiling the script. ' + 'Execution will be slower, and the output format might ' + 'change over time.') +flags.DEFINE_string('profile_file', None, + 'Dump profile information to a file (for python -m ' + 'pstats). Implies --run_with_profiling.') +flags.DEFINE_boolean('use_cprofile_for_profiling', True, + 'Use cProfile instead of the profile module for ' + 'profiling. This has no effect unless ' + '--run_with_profiling is set.') +flags.DEFINE_boolean('only_check_args', False, + 'Set to true to validate args and exit.', + allow_hide_cpp=True) + + +# If main() exits via an abnormal exception, call into these +# handlers before exiting. +EXCEPTION_HANDLERS = [] + + +class Error(Exception): + pass + + +class UsageError(Error): + """Exception raised when the arguments supplied by the user are invalid. + + Raise this when the arguments supplied are invalid from the point of + view of the application. For example when two mutually exclusive + flags have been supplied or when there are not enough non-flag + arguments. It is distinct from flags.Error which covers the lower + level of parsing and validating individual flags. + """ + + def __init__(self, message, exitcode=1): + super(UsageError, self).__init__(message) + self.exitcode = exitcode + + +class HelpFlag(flags.BooleanFlag): + """Special boolean flag that displays usage and raises SystemExit.""" + NAME = 'help' + SHORT_NAME = '?' + + def __init__(self): + super(HelpFlag, self).__init__( + self.NAME, False, 'show this help', + short_name=self.SHORT_NAME, allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + usage(shorthelp=True, writeto_stdout=True) + # Advertise --helpfull on stdout, since usage() was on stdout. + print() + print('Try --helpfull to get a list of all flags.') + sys.exit(1) + + +class HelpshortFlag(HelpFlag): + """--helpshort is an alias for --help.""" + NAME = 'helpshort' + SHORT_NAME = None + + +class HelpfullFlag(flags.BooleanFlag): + """Display help for flags in the main module and all dependent modules.""" + + def __init__(self): + super(HelpfullFlag, self).__init__( + 'helpfull', False, 'show full help', allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + usage(writeto_stdout=True) + sys.exit(1) + + +class HelpXMLFlag(flags.BooleanFlag): + """Similar to HelpfullFlag, but generates output in XML format.""" + + def __init__(self): + super(HelpXMLFlag, self).__init__( + 'helpxml', False, 'like --helpfull, but generates XML output', + allow_hide_cpp=True) + + def parse(self, arg): + if self._parse(arg): + flags.FLAGS.write_help_in_xml_format(sys.stdout) + sys.exit(1) + + +def parse_flags_with_usage(args): + """Tries to parse the flags, print usage, and exit if unparsable. + + Args: + args: [str], a non-empty list of the command line arguments including + program name. + + Returns: + [str], a non-empty list of remaining command line arguments after parsing + flags, including program name. + """ + try: + return FLAGS(args) + except flags.Error as error: + message = str(error) + if '\n' in message: + final_message = 'FATAL Flags parsing error:\n%s\n' % textwrap.indent( + message, ' ') + else: + final_message = 'FATAL Flags parsing error: %s\n' % message + sys.stderr.write(final_message) + sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n') + sys.exit(1) + + +_define_help_flags_called = False + + +def define_help_flags(): + """Registers help flags. Idempotent.""" + # Use a global to ensure idempotence. + global _define_help_flags_called + + if not _define_help_flags_called: + flags.DEFINE_flag(HelpFlag()) + flags.DEFINE_flag(HelpshortFlag()) # alias for --help + flags.DEFINE_flag(HelpfullFlag()) + flags.DEFINE_flag(HelpXMLFlag()) + _define_help_flags_called = True + + +def _register_and_parse_flags_with_usage( + argv=None, + flags_parser=parse_flags_with_usage, +): + """Registers help flags, parses arguments and shows usage if appropriate. + + This also calls sys.exit(0) if flag --only_check_args is True. + + Args: + argv: [str], a non-empty list of the command line arguments including + program name, sys.argv is used if None. + flags_parser: Callable[[List[Text]], Any], the function used to parse flags. + The return value of this function is passed to `main` untouched. + It must guarantee FLAGS is parsed after this function is called. + + Returns: + The return value of `flags_parser`. When using the default `flags_parser`, + it returns the following: + [str], a non-empty list of remaining command line arguments after parsing + flags, including program name. + + Raises: + Error: Raised when flags_parser is called, but FLAGS is not parsed. + SystemError: Raised when it's called more than once. + """ + if _register_and_parse_flags_with_usage.done: + raise SystemError('Flag registration can be done only once.') + + define_help_flags() + + original_argv = sys.argv if argv is None else argv + args_to_main = flags_parser(original_argv) + if not FLAGS.is_parsed(): + raise Error('FLAGS must be parsed after flags_parser is called.') + + # Exit when told so. + if FLAGS.only_check_args: + sys.exit(0) + # Immediately after flags are parsed, bump verbosity to INFO if the flag has + # not been set. + if FLAGS['verbosity'].using_default_value: + FLAGS.verbosity = 0 + _register_and_parse_flags_with_usage.done = True + + return args_to_main + +_register_and_parse_flags_with_usage.done = False + + +def _run_main(main, argv): + """Calls main, optionally with pdb or profiler.""" + if FLAGS.run_with_pdb: + sys.exit(pdb.runcall(main, argv)) + elif FLAGS.run_with_profiling or FLAGS.profile_file: + # Avoid import overhead since most apps (including performance-sensitive + # ones) won't be run with profiling. + # pylint: disable=g-import-not-at-top + import atexit + if FLAGS.use_cprofile_for_profiling: + import cProfile as profile + else: + import profile + profiler = profile.Profile() + if FLAGS.profile_file: + atexit.register(profiler.dump_stats, FLAGS.profile_file) + else: + atexit.register(profiler.print_stats) + sys.exit(profiler.runcall(main, argv)) + else: + sys.exit(main(argv)) + + +def _call_exception_handlers(exception): + """Calls any installed exception handlers.""" + for handler in EXCEPTION_HANDLERS: + try: + if handler.wants(exception): + handler.handle(exception) + except: # pylint: disable=bare-except + try: + # We don't want to stop for exceptions in the exception handlers but + # we shouldn't hide them either. + logging.error(traceback.format_exc()) + except: # pylint: disable=bare-except + # In case even the logging statement fails, ignore. + pass + + +def run( + main, + argv=None, + flags_parser=parse_flags_with_usage, +): + """Begins executing the program. + + Args: + main: The main function to execute. It takes an single argument "argv", + which is a list of command line arguments with parsed flags removed. + The return value is passed to `sys.exit`, and so for example + a return value of 0 or None results in a successful termination, whereas + a return value of 1 results in abnormal termination. + For more details, see https://docs.python.org/3/library/sys#sys.exit + argv: A non-empty list of the command line arguments including program name, + sys.argv is used if None. + flags_parser: Callable[[List[Text]], Any], the function used to parse flags. + The return value of this function is passed to `main` untouched. + It must guarantee FLAGS is parsed after this function is called. + Should be passed as a keyword-only arg which will become mandatory in a + future release. + - Parses command line flags with the flag module. + - If there are any errors, prints usage(). + - Calls main() with the remaining arguments. + - If main() raises a UsageError, prints usage and the error message. + """ + try: + args = _run_init( + sys.argv if argv is None else argv, + flags_parser, + ) + while _init_callbacks: + callback = _init_callbacks.popleft() + callback() + try: + _run_main(main, args) + except UsageError as error: + usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode) + except: + exc = sys.exc_info()[1] + # Don't try to post-mortem debug successful SystemExits, since those + # mean there wasn't actually an error. In particular, the test framework + # raises SystemExit(False) even if all tests passed. + if isinstance(exc, SystemExit) and not exc.code: + raise + + # Check the tty so that we don't hang waiting for input in an + # non-interactive scenario. + if FLAGS.pdb_post_mortem and sys.stdout.isatty(): + traceback.print_exc() + print() + print(' *** Entering post-mortem debugging ***') + print() + pdb.post_mortem() + raise + except Exception as e: + _call_exception_handlers(e) + raise + +# Callbacks which have been deferred until after _run_init has been called. +_init_callbacks = collections.deque() + + +def call_after_init(callback): + """Calls the given callback only once ABSL has finished initialization. + + If ABSL has already finished initialization when ``call_after_init`` is + called then the callback is executed immediately, otherwise `callback` is + stored to be executed after ``app.run`` has finished initializing (aka. just + before the main function is called). + + If called after ``app.run``, this is equivalent to calling ``callback()`` in + the caller thread. If called before ``app.run``, callbacks are run + sequentially (in an undefined order) in the same thread as ``app.run``. + + Args: + callback: a callable to be called once ABSL has finished initialization. + This may be immediate if initialization has already finished. It + takes no arguments and returns nothing. + """ + if _run_init.done: + callback() + else: + _init_callbacks.append(callback) + + +def _run_init( + argv, + flags_parser, +): + """Does one-time initialization and re-parses flags on rerun.""" + if _run_init.done: + return flags_parser(argv) + command_name.make_process_name_useful() + # Set up absl logging handler. + logging.use_absl_handler() + args = _register_and_parse_flags_with_usage( + argv=argv, + flags_parser=flags_parser, + ) + if faulthandler: + try: + faulthandler.enable() + except Exception: # pylint: disable=broad-except + # Some tests verify stderr output very closely, so don't print anything. + # Disabled faulthandler is a low-impact error. + pass + _run_init.done = True + return args + + +_run_init.done = False + + +def usage(shorthelp=False, writeto_stdout=False, detailed_error=None, + exitcode=None): + """Writes __main__'s docstring to stderr with some help text. + + Args: + shorthelp: bool, if True, prints only flags from the main module, + rather than all flags. + writeto_stdout: bool, if True, writes help message to stdout, + rather than to stderr. + detailed_error: str, additional detail about why usage info was presented. + exitcode: optional integer, if set, exits with this status code after + writing help. + """ + if writeto_stdout: + stdfile = sys.stdout + else: + stdfile = sys.stderr + + doc = sys.modules['__main__'].__doc__ + if not doc: + doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] + doc = flags.text_wrap(doc, indent=' ', firstline_indent='') + else: + # Replace all '%s' with sys.argv[0], and all '%%' with '%'. + num_specifiers = doc.count('%') - 2 * doc.count('%%') + try: + doc %= (sys.argv[0],) * num_specifiers + except (OverflowError, TypeError, ValueError): + # Just display the docstring as-is. + pass + if shorthelp: + flag_str = FLAGS.main_module_help() + else: + flag_str = FLAGS.get_help() + try: + stdfile.write(doc) + if flag_str: + stdfile.write('\nflags:\n') + stdfile.write(flag_str) + stdfile.write('\n') + if detailed_error is not None: + stdfile.write('\n%s\n' % detailed_error) + except IOError as e: + # We avoid printing a huge backtrace if we get EPIPE, because + # "foo.par --help | less" is a frequent use case. + if e.errno != errno.EPIPE: + raise + if exitcode is not None: + sys.exit(exitcode) + + +class ExceptionHandler(object): + """Base exception handler from which other may inherit.""" + + def wants(self, exc): + """Returns whether this handler wants to handle the exception or not. + + This base class returns True for all exceptions by default. Override in + subclass if it wants to be more selective. + + Args: + exc: Exception, the current exception. + """ + del exc # Unused. + return True + + def handle(self, exc): + """Do something with the current exception. + + Args: + exc: Exception, the current exception + + This method must be overridden. + """ + raise NotImplementedError() + + +def install_exception_handler(handler): + """Installs an exception handler. + + Args: + handler: ExceptionHandler, the exception handler to install. + + Raises: + TypeError: Raised when the handler was not of the correct type. + + All installed exception handlers will be called if main() exits via + an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt, + FlagsError or UsageError. + """ + if not isinstance(handler, ExceptionHandler): + raise TypeError('handler of type %s does not inherit from ExceptionHandler' + % type(handler)) + EXCEPTION_HANDLERS.append(handler) diff --git a/MLPY/Lib/site-packages/absl/app.pyi b/MLPY/Lib/site-packages/absl/app.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fe5e44809915f3dbd56b23207781a2219d86f842 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/app.pyi @@ -0,0 +1,99 @@ + +from typing import Any, Callable, Collection, Iterable, List, NoReturn, Optional, Text, TypeVar, Union, overload + +from absl.flags import _flag + + +_MainArgs = TypeVar('_MainArgs') +_Exc = TypeVar('_Exc', bound=Exception) + + +class ExceptionHandler(): + + def wants(self, exc: _Exc) -> bool: + ... + + def handle(self, exc: _Exc): + ... + + +EXCEPTION_HANDLERS: List[ExceptionHandler] = ... + + +class HelpFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +class HelpshortFlag(HelpFlag): + ... + + +class HelpfullFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +class HelpXMLFlag(_flag.BooleanFlag): + def __init__(self): + ... + + +def define_help_flags() -> None: + ... + + +@overload +def usage(shorthelp: Union[bool, int] = ..., + writeto_stdout: Union[bool, int] = ..., + detailed_error: Optional[Any] = ..., + exitcode: None = ...) -> None: + ... + + +@overload +def usage(shorthelp: Union[bool, int] = ..., + writeto_stdout: Union[bool, int] = ..., + detailed_error: Optional[Any] = ..., + exitcode: int = ...) -> NoReturn: + ... + + +def install_exception_handler(handler: ExceptionHandler) -> None: + ... + + +class Error(Exception): + ... + + +class UsageError(Error): + exitcode: int + + +def parse_flags_with_usage(args: List[Text]) -> List[Text]: + ... + + +def call_after_init(callback: Callable[[], Any]) -> None: + ... + + +# Without the flag_parser argument, `main` should require a List[Text]. +@overload +def run( + main: Callable[[List[Text]], Any], + argv: Optional[List[Text]] = ..., + *, +) -> NoReturn: + ... + + +@overload +def run( + main: Callable[[_MainArgs], Any], + argv: Optional[List[Text]] = ..., + *, + flags_parser: Callable[[List[Text]], _MainArgs], +) -> NoReturn: + ... diff --git a/MLPY/Lib/site-packages/absl/command_name.py b/MLPY/Lib/site-packages/absl/command_name.py new file mode 100644 index 0000000000000000000000000000000000000000..9260fee9bd853ba33b2139b3d47b73e59c127f36 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/command_name.py @@ -0,0 +1,63 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A tiny stand alone library to change the kernel process name on Linux.""" + +import os +import sys + +# This library must be kept small and stand alone. It is used by small things +# that require no extension modules. + + +def make_process_name_useful(): + """Sets the process name to something better than 'python' if possible.""" + set_kernel_process_name(os.path.basename(sys.argv[0])) + + +def set_kernel_process_name(name): + """Changes the Kernel's /proc/self/status process name on Linux. + + The kernel name is NOT what will be shown by the ps or top command. + It is a 15 character string stored in the kernel's process table that + is included in the kernel log when a process is OOM killed. + The first 15 bytes of name are used. Non-ASCII unicode is replaced with '?'. + + Does nothing if /proc/self/comm cannot be written or prctl() fails. + + Args: + name: bytes|unicode, the Linux kernel's command name to set. + """ + if not isinstance(name, bytes): + name = name.encode('ascii', 'replace') + try: + # This is preferred to using ctypes to try and call prctl() when possible. + with open('/proc/self/comm', 'wb') as proc_comm: + proc_comm.write(name[:15]) + except EnvironmentError: + try: + import ctypes # pylint: disable=g-import-not-at-top + except ImportError: + return # No ctypes. + try: + libc = ctypes.CDLL('libc.so.6') + except EnvironmentError: + return # No libc.so.6. + pr_set_name = ctypes.c_ulong(15) # linux/prctl.h PR_SET_NAME value. + zero = ctypes.c_ulong(0) + try: + libc.prctl(pr_set_name, name, zero, zero, zero) + # Ignore the prctl return value. Nothing we can do if it errored. + except AttributeError: + return # No prctl. diff --git a/MLPY/Lib/site-packages/absl/flags/__init__.py b/MLPY/Lib/site-packages/absl/flags/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21e05c477ee3a983c37805c6671c9a8894d49d4b --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/__init__.py @@ -0,0 +1,225 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This package is used to define and parse command line flags. + +This package defines a *distributed* flag-definition policy: rather than +an application having to define all flags in or near main(), each Python +module defines flags that are useful to it. When one Python module +imports another, it gains access to the other's flags. (This is +implemented by having all modules share a common, global registry object +containing all the flag information.) + +Flags are defined through the use of one of the DEFINE_xxx functions. +The specific function used determines how the flag is parsed, checked, +and optionally type-converted, when it's seen on the command line. +""" + +import getopt +import os +import re +import sys +import types +import warnings + +from absl.flags import _argument_parser +from absl.flags import _defines +from absl.flags import _exceptions +from absl.flags import _flag +from absl.flags import _flagvalues +from absl.flags import _helpers +from absl.flags import _validators + +__all__ = ( + 'DEFINE', + 'DEFINE_flag', + 'DEFINE_string', + 'DEFINE_boolean', + 'DEFINE_bool', + 'DEFINE_float', + 'DEFINE_integer', + 'DEFINE_enum', + 'DEFINE_enum_class', + 'DEFINE_list', + 'DEFINE_spaceseplist', + 'DEFINE_multi', + 'DEFINE_multi_string', + 'DEFINE_multi_integer', + 'DEFINE_multi_float', + 'DEFINE_multi_enum', + 'DEFINE_multi_enum_class', + 'DEFINE_alias', + # Flag validators. + 'register_validator', + 'validator', + 'register_multi_flags_validator', + 'multi_flags_validator', + 'mark_flag_as_required', + 'mark_flags_as_required', + 'mark_flags_as_mutual_exclusive', + 'mark_bool_flags_as_mutual_exclusive', + # Flag modifiers. + 'set_default', + 'override_value', + # Key flag related functions. + 'declare_key_flag', + 'adopt_module_key_flags', + 'disclaim_key_flags', + # Module exceptions. + 'Error', + 'CantOpenFlagFileError', + 'DuplicateFlagError', + 'IllegalFlagValueError', + 'UnrecognizedFlagError', + 'UnparsedFlagAccessError', + 'ValidationError', + 'FlagNameConflictsWithMethodError', + # Public classes. + 'Flag', + 'BooleanFlag', + 'EnumFlag', + 'EnumClassFlag', + 'MultiFlag', + 'MultiEnumClassFlag', + 'FlagHolder', + 'FlagValues', + 'ArgumentParser', + 'BooleanParser', + 'EnumParser', + 'EnumClassParser', + 'ArgumentSerializer', + 'FloatParser', + 'IntegerParser', + 'BaseListParser', + 'ListParser', + 'ListSerializer', + 'EnumClassListSerializer', + 'CsvListSerializer', + 'WhitespaceSeparatedListParser', + 'EnumClassSerializer', + # Helper functions. + 'get_help_width', + 'text_wrap', + 'flag_dict_to_args', + 'doc_to_help', + # The global FlagValues instance. + 'FLAGS', +) + +# Initialize the FLAGS_MODULE as early as possible. +# It's only used by adopt_module_key_flags to take SPECIAL_FLAGS into account. +_helpers.FLAGS_MODULE = sys.modules[__name__] + +# Add current module to disclaimed module ids. +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + +# DEFINE functions. They are explained in more details in the module doc string. +# pylint: disable=invalid-name +DEFINE = _defines.DEFINE +DEFINE_flag = _defines.DEFINE_flag +DEFINE_string = _defines.DEFINE_string +DEFINE_boolean = _defines.DEFINE_boolean +DEFINE_bool = DEFINE_boolean # Match C++ API. +DEFINE_float = _defines.DEFINE_float +DEFINE_integer = _defines.DEFINE_integer +DEFINE_enum = _defines.DEFINE_enum +DEFINE_enum_class = _defines.DEFINE_enum_class +DEFINE_list = _defines.DEFINE_list +DEFINE_spaceseplist = _defines.DEFINE_spaceseplist +DEFINE_multi = _defines.DEFINE_multi +DEFINE_multi_string = _defines.DEFINE_multi_string +DEFINE_multi_integer = _defines.DEFINE_multi_integer +DEFINE_multi_float = _defines.DEFINE_multi_float +DEFINE_multi_enum = _defines.DEFINE_multi_enum +DEFINE_multi_enum_class = _defines.DEFINE_multi_enum_class +DEFINE_alias = _defines.DEFINE_alias +# pylint: enable=invalid-name + +# Flag validators. +register_validator = _validators.register_validator +validator = _validators.validator +register_multi_flags_validator = _validators.register_multi_flags_validator +multi_flags_validator = _validators.multi_flags_validator +mark_flag_as_required = _validators.mark_flag_as_required +mark_flags_as_required = _validators.mark_flags_as_required +mark_flags_as_mutual_exclusive = _validators.mark_flags_as_mutual_exclusive +mark_bool_flags_as_mutual_exclusive = _validators.mark_bool_flags_as_mutual_exclusive + +# Flag modifiers. +set_default = _defines.set_default +override_value = _defines.override_value + +# Key flag related functions. +declare_key_flag = _defines.declare_key_flag +adopt_module_key_flags = _defines.adopt_module_key_flags +disclaim_key_flags = _defines.disclaim_key_flags + +# Module exceptions. +# pylint: disable=invalid-name +Error = _exceptions.Error +CantOpenFlagFileError = _exceptions.CantOpenFlagFileError +DuplicateFlagError = _exceptions.DuplicateFlagError +IllegalFlagValueError = _exceptions.IllegalFlagValueError +UnrecognizedFlagError = _exceptions.UnrecognizedFlagError +UnparsedFlagAccessError = _exceptions.UnparsedFlagAccessError +ValidationError = _exceptions.ValidationError +FlagNameConflictsWithMethodError = _exceptions.FlagNameConflictsWithMethodError + +# Public classes. +Flag = _flag.Flag +BooleanFlag = _flag.BooleanFlag +EnumFlag = _flag.EnumFlag +EnumClassFlag = _flag.EnumClassFlag +MultiFlag = _flag.MultiFlag +MultiEnumClassFlag = _flag.MultiEnumClassFlag +FlagHolder = _flagvalues.FlagHolder +FlagValues = _flagvalues.FlagValues +ArgumentParser = _argument_parser.ArgumentParser +BooleanParser = _argument_parser.BooleanParser +EnumParser = _argument_parser.EnumParser +EnumClassParser = _argument_parser.EnumClassParser +ArgumentSerializer = _argument_parser.ArgumentSerializer +FloatParser = _argument_parser.FloatParser +IntegerParser = _argument_parser.IntegerParser +BaseListParser = _argument_parser.BaseListParser +ListParser = _argument_parser.ListParser +ListSerializer = _argument_parser.ListSerializer +EnumClassListSerializer = _argument_parser.EnumClassListSerializer +CsvListSerializer = _argument_parser.CsvListSerializer +WhitespaceSeparatedListParser = _argument_parser.WhitespaceSeparatedListParser +EnumClassSerializer = _argument_parser.EnumClassSerializer +# pylint: enable=invalid-name + +# Helper functions. +get_help_width = _helpers.get_help_width +text_wrap = _helpers.text_wrap +flag_dict_to_args = _helpers.flag_dict_to_args +doc_to_help = _helpers.doc_to_help + +# Special flags. +_helpers.SPECIAL_FLAGS = FlagValues() + +DEFINE_string( + 'flagfile', '', + 'Insert flag definitions from the given file into the command line.', + _helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types + +DEFINE_string('undefok', '', + 'comma-separated list of flag names that it is okay to specify ' + 'on the command line even if the program does not define a flag ' + 'with that name. IMPORTANT: flags in this list that have ' + 'arguments MUST use the --flag=value format.', + _helpers.SPECIAL_FLAGS) # pytype: disable=wrong-arg-types + +#: The global FlagValues instance. +FLAGS = _flagvalues.FLAGS diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11328e43f8fd96fd4e7da6efcaf78560989abebe Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_argument_parser.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_argument_parser.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0afbf473ef9eb16ab945fa3c04ee513bbc40974 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_argument_parser.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_defines.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_defines.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10c12b6f606abfde63e38bffcf4ed2b2932f8b07 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_defines.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8ac7584963d134a30b799c9d6189f1dc006bf57 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_flag.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_flag.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c51c839382f516814f665b9784e665579dd6d918 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_flag.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_flagvalues.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_flagvalues.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7a44478a08839db91cc4919bbb49ea11db1c4d Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_flagvalues.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_helpers.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_helpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e44c1ffd57fec7eb80620acc4772cba4eb882b Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_helpers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..339d31c9d30638175b730cc97e503cdec85c1aca Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators_classes.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators_classes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..450442f2c58964a1c3cff1efe3c59dda13a6afb5 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/_validators_classes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/__pycache__/argparse_flags.cpython-39.pyc b/MLPY/Lib/site-packages/absl/flags/__pycache__/argparse_flags.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196d200e24afb5cdf28d930892c919fc8efd9b76 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/flags/__pycache__/argparse_flags.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/flags/_argument_parser.py b/MLPY/Lib/site-packages/absl/flags/_argument_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..13dc640d025972baf4b395f575feeae539329c10 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_argument_parser.py @@ -0,0 +1,638 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains base classes used to parse and convert arguments. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import collections +import csv +import enum +import io +import string +from typing import Generic, List, Iterable, Optional, Sequence, Text, Type, TypeVar, Union +from xml.dom import minidom + +from absl.flags import _helpers + +_T = TypeVar('_T') +_ET = TypeVar('_ET', bound=enum.Enum) +_N = TypeVar('_N', int, float) + + +def _is_integer_type(instance): + """Returns True if instance is an integer, and not a bool.""" + return (isinstance(instance, int) and + not isinstance(instance, bool)) + + +class _ArgumentParserCache(type): + """Metaclass used to cache and share argument parsers among flags.""" + + _instances = {} + + def __call__(cls, *args, **kwargs): + """Returns an instance of the argument parser cls. + + This method overrides behavior of the __new__ methods in + all subclasses of ArgumentParser (inclusive). If an instance + for cls with the same set of arguments exists, this instance is + returned, otherwise a new instance is created. + + If any keyword arguments are defined, or the values in args + are not hashable, this method always returns a new instance of + cls. + + Args: + *args: Positional initializer arguments. + **kwargs: Initializer keyword arguments. + + Returns: + An instance of cls, shared or new. + """ + if kwargs: + return type.__call__(cls, *args, **kwargs) + else: + instances = cls._instances + key = (cls,) + tuple(args) + try: + return instances[key] + except KeyError: + # No cache entry for key exists, create a new one. + return instances.setdefault(key, type.__call__(cls, *args)) + except TypeError: + # An object in args cannot be hashed, always return + # a new instance. + return type.__call__(cls, *args) + + +class ArgumentParser(Generic[_T], metaclass=_ArgumentParserCache): + """Base class used to parse and convert arguments. + + The :meth:`parse` method checks to make sure that the string argument is a + legal value and convert it to a native type. If the value cannot be + converted, it should throw a ``ValueError`` exception with a human + readable explanation of why the value is illegal. + + Subclasses should also define a syntactic_help string which may be + presented to the user to describe the form of the legal values. + + Argument parser classes must be stateless, since instances are cached + and shared between flags. Initializer arguments are allowed, but all + member variables must be derived from initializer arguments only. + """ + + syntactic_help: Text = '' + + def parse(self, argument: Text) -> Optional[_T]: + """Parses the string argument and returns the native value. + + By default it returns its argument unmodified. + + Args: + argument: string argument passed in the commandline. + + Raises: + ValueError: Raised when it fails to parse the argument. + TypeError: Raised when the argument has the wrong type. + + Returns: + The parsed value in native type. + """ + if not isinstance(argument, str): + raise TypeError('flag value must be a string, found "{}"'.format( + type(argument))) + return argument + + def flag_type(self) -> Text: + """Returns a string representing the type of the flag.""" + return 'string' + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + """Returns a list of minidom.Element to add additional flag information. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + """ + del doc # Unused. + return [] + + +class ArgumentSerializer(Generic[_T]): + """Base class for generating string representations of a flag value.""" + + def serialize(self, value: _T) -> Text: + """Returns a serialized string of the value.""" + return str(value) + + +class NumericParser(ArgumentParser[_N]): + """Parser of numeric values. + + Parsed value may be bounded to a given upper and lower bound. + """ + + lower_bound: Optional[_N] + upper_bound: Optional[_N] + + def is_outside_bounds(self, val: _N) -> bool: + """Returns whether the value is outside the bounds or not.""" + return ((self.lower_bound is not None and val < self.lower_bound) or + (self.upper_bound is not None and val > self.upper_bound)) + + def parse(self, argument: Text) -> _N: + """See base class.""" + val = self.convert(argument) + if self.is_outside_bounds(val): + raise ValueError('%s is not %s' % (val, self.syntactic_help)) + return val + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + if self.lower_bound is not None: + elements.append(_helpers.create_xml_dom_element( + doc, 'lower_bound', self.lower_bound)) + if self.upper_bound is not None: + elements.append(_helpers.create_xml_dom_element( + doc, 'upper_bound', self.upper_bound)) + return elements + + def convert(self, argument: Text) -> _N: + """Returns the correct numeric value of argument. + + Subclass must implement this method, and raise TypeError if argument is not + string or has the right numeric type. + + Args: + argument: string argument passed in the commandline, or the numeric type. + + Raises: + TypeError: Raised when argument is not a string or the right numeric type. + ValueError: Raised when failed to convert argument to the numeric value. + """ + raise NotImplementedError + + +class FloatParser(NumericParser[float]): + """Parser of floating point values. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = 'a' + number_name = 'number' + syntactic_help = ' '.join((number_article, number_name)) + + def __init__( + self, + lower_bound: Optional[float] = None, + upper_bound: Optional[float] = None, + ) -> None: + super(FloatParser, self).__init__() + self.lower_bound = lower_bound + self.upper_bound = upper_bound + sh = self.syntactic_help + if lower_bound is not None and upper_bound is not None: + sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound)) + elif lower_bound == 0: + sh = 'a non-negative %s' % self.number_name + elif upper_bound == 0: + sh = 'a non-positive %s' % self.number_name + elif upper_bound is not None: + sh = '%s <= %s' % (self.number_name, upper_bound) + elif lower_bound is not None: + sh = '%s >= %s' % (self.number_name, lower_bound) + self.syntactic_help = sh + + def convert(self, argument: Union[int, float, str]) -> float: + """Returns the float value of argument.""" + if (_is_integer_type(argument) or isinstance(argument, float) or + isinstance(argument, str)): + return float(argument) + else: + raise TypeError( + 'Expect argument to be a string, int, or float, found {}'.format( + type(argument))) + + def flag_type(self) -> Text: + """See base class.""" + return 'float' + + +class IntegerParser(NumericParser[int]): + """Parser of an integer value. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = 'an' + number_name = 'integer' + syntactic_help = ' '.join((number_article, number_name)) + + def __init__( + self, lower_bound: Optional[int] = None, upper_bound: Optional[int] = None + ) -> None: + super(IntegerParser, self).__init__() + self.lower_bound = lower_bound + self.upper_bound = upper_bound + sh = self.syntactic_help + if lower_bound is not None and upper_bound is not None: + sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound)) + elif lower_bound == 1: + sh = 'a positive %s' % self.number_name + elif upper_bound == -1: + sh = 'a negative %s' % self.number_name + elif lower_bound == 0: + sh = 'a non-negative %s' % self.number_name + elif upper_bound == 0: + sh = 'a non-positive %s' % self.number_name + elif upper_bound is not None: + sh = '%s <= %s' % (self.number_name, upper_bound) + elif lower_bound is not None: + sh = '%s >= %s' % (self.number_name, lower_bound) + self.syntactic_help = sh + + def convert(self, argument: Union[int, Text]) -> int: + """Returns the int value of argument.""" + if _is_integer_type(argument): + return argument + elif isinstance(argument, str): + base = 10 + if len(argument) > 2 and argument[0] == '0': + if argument[1] == 'o': + base = 8 + elif argument[1] == 'x': + base = 16 + return int(argument, base) + else: + raise TypeError('Expect argument to be a string or int, found {}'.format( + type(argument))) + + def flag_type(self) -> Text: + """See base class.""" + return 'int' + + +class BooleanParser(ArgumentParser[bool]): + """Parser of boolean values.""" + + def parse(self, argument: Union[Text, int]) -> bool: + """See base class.""" + if isinstance(argument, str): + if argument.lower() in ('true', 't', '1'): + return True + elif argument.lower() in ('false', 'f', '0'): + return False + else: + raise ValueError('Non-boolean argument to boolean flag', argument) + elif isinstance(argument, int): + # Only allow bool or integer 0, 1. + # Note that float 1.0 == True, 0.0 == False. + bool_value = bool(argument) + if argument == bool_value: + return bool_value + else: + raise ValueError('Non-boolean argument to boolean flag', argument) + + raise TypeError('Non-boolean argument to boolean flag', argument) + + def flag_type(self) -> Text: + """See base class.""" + return 'bool' + + +class EnumParser(ArgumentParser[Text]): + """Parser of a string enum value (a string value from a given set).""" + + def __init__( + self, enum_values: Iterable[Text], case_sensitive: bool = True + ) -> None: + """Initializes EnumParser. + + Args: + enum_values: [str], a non-empty list of string values in the enum. + case_sensitive: bool, whether or not the enum is to be case-sensitive. + + Raises: + ValueError: When enum_values is empty. + """ + if not enum_values: + raise ValueError( + 'enum_values cannot be empty, found "{}"'.format(enum_values)) + if isinstance(enum_values, str): + raise ValueError( + 'enum_values cannot be a str, found "{}"'.format(enum_values) + ) + super(EnumParser, self).__init__() + self.enum_values = list(enum_values) + self.case_sensitive = case_sensitive + + def parse(self, argument: Text) -> Text: + """Determines validity of argument and returns the correct element of enum. + + Args: + argument: str, the supplied flag value. + + Returns: + The first matching element from enum_values. + + Raises: + ValueError: Raised when argument didn't match anything in enum. + """ + if self.case_sensitive: + if argument not in self.enum_values: + raise ValueError('value should be one of <%s>' % + '|'.join(self.enum_values)) + else: + return argument + else: + if argument.upper() not in [value.upper() for value in self.enum_values]: + raise ValueError('value should be one of <%s>' % + '|'.join(self.enum_values)) + else: + return [value for value in self.enum_values + if value.upper() == argument.upper()][0] + + def flag_type(self) -> Text: + """See base class.""" + return 'string enum' + + +class EnumClassParser(ArgumentParser[_ET]): + """Parser of an Enum class member.""" + + def __init__( + self, enum_class: Type[_ET], case_sensitive: bool = True + ) -> None: + """Initializes EnumParser. + + Args: + enum_class: class, the Enum class with all possible flag values. + case_sensitive: bool, whether or not the enum is to be case-sensitive. If + False, all member names must be unique when case is ignored. + + Raises: + TypeError: When enum_class is not a subclass of Enum. + ValueError: When enum_class is empty. + """ + if not issubclass(enum_class, enum.Enum): + raise TypeError('{} is not a subclass of Enum.'.format(enum_class)) + if not enum_class.__members__: + raise ValueError('enum_class cannot be empty, but "{}" is empty.' + .format(enum_class)) + if not case_sensitive: + members = collections.Counter( + name.lower() for name in enum_class.__members__) + duplicate_keys = { + member for member, count in members.items() if count > 1 + } + if duplicate_keys: + raise ValueError( + 'Duplicate enum values for {} using case_sensitive=False'.format( + duplicate_keys)) + + super(EnumClassParser, self).__init__() + self.enum_class = enum_class + self._case_sensitive = case_sensitive + if case_sensitive: + self._member_names = tuple(enum_class.__members__) + else: + self._member_names = tuple( + name.lower() for name in enum_class.__members__) + + @property + def member_names(self) -> Sequence[Text]: + """The accepted enum names, in lowercase if not case sensitive.""" + return self._member_names + + def parse(self, argument: Union[_ET, Text]) -> _ET: + """Determines validity of argument and returns the correct element of enum. + + Args: + argument: str or Enum class member, the supplied flag value. + + Returns: + The first matching Enum class member in Enum class. + + Raises: + ValueError: Raised when argument didn't match anything in enum. + """ + if isinstance(argument, self.enum_class): + return argument # pytype: disable=bad-return-type + elif not isinstance(argument, str): + raise ValueError( + '{} is not an enum member or a name of a member in {}'.format( + argument, self.enum_class)) + key = EnumParser( + self._member_names, case_sensitive=self._case_sensitive).parse(argument) + if self._case_sensitive: + return self.enum_class[key] + else: + # If EnumParser.parse() return a value, we're guaranteed to find it + # as a member of the class + return next(value for name, value in self.enum_class.__members__.items() + if name.lower() == key.lower()) + + def flag_type(self) -> Text: + """See base class.""" + return 'enum class' + + +class ListSerializer(Generic[_T], ArgumentSerializer[List[_T]]): + + def __init__(self, list_sep: Text) -> None: + self.list_sep = list_sep + + def serialize(self, value: List[_T]) -> Text: + """See base class.""" + return self.list_sep.join([str(x) for x in value]) + + +class EnumClassListSerializer(ListSerializer[_ET]): + """A serializer for :class:`MultiEnumClass` flags. + + This serializer simply joins the output of `EnumClassSerializer` using a + provided separator. + """ + + def __init__(self, list_sep: Text, **kwargs) -> None: + """Initializes EnumClassListSerializer. + + Args: + list_sep: String to be used as a separator when serializing + **kwargs: Keyword arguments to the `EnumClassSerializer` used to serialize + individual values. + """ + super(EnumClassListSerializer, self).__init__(list_sep) + self._element_serializer = EnumClassSerializer(**kwargs) + + def serialize(self, value: Union[_ET, List[_ET]]) -> Text: + """See base class.""" + if isinstance(value, list): + return self.list_sep.join( + self._element_serializer.serialize(x) for x in value) + else: + return self._element_serializer.serialize(value) + + +class CsvListSerializer(ListSerializer[Text]): + + def serialize(self, value: List[Text]) -> Text: + """Serializes a list as a CSV string or unicode.""" + output = io.StringIO() + writer = csv.writer(output, delimiter=self.list_sep) + writer.writerow([str(x) for x in value]) + serialized_value = output.getvalue().strip() + + # We need the returned value to be pure ascii or Unicodes so that + # when the xml help is generated they are usefully encodable. + return str(serialized_value) + + +class EnumClassSerializer(ArgumentSerializer[_ET]): + """Class for generating string representations of an enum class flag value.""" + + def __init__(self, lowercase: bool) -> None: + """Initializes EnumClassSerializer. + + Args: + lowercase: If True, enum member names are lowercased during serialization. + """ + self._lowercase = lowercase + + def serialize(self, value: _ET) -> Text: + """Returns a serialized string of the Enum class value.""" + as_string = str(value.name) + return as_string.lower() if self._lowercase else as_string + + +class BaseListParser(ArgumentParser): + """Base class for a parser of lists of strings. + + To extend, inherit from this class; from the subclass ``__init__``, call:: + + super().__init__(token, name) + + where token is a character used to tokenize, and name is a description + of the separator. + """ + + def __init__( + self, token: Optional[Text] = None, name: Optional[Text] = None + ) -> None: + assert name + super(BaseListParser, self).__init__() + self._token = token + self._name = name + self.syntactic_help = 'a %s separated list' % self._name + + def parse(self, argument: Text) -> List[Text]: + """See base class.""" + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + return [s.strip() for s in argument.split(self._token)] + + def flag_type(self) -> Text: + """See base class.""" + return '%s separated list of strings' % self._name + + +class ListParser(BaseListParser): + """Parser for a comma-separated list of strings.""" + + def __init__(self) -> None: + super(ListParser, self).__init__(',', 'comma') + + def parse(self, argument: Union[Text, List[Text]]) -> List[Text]: + """Parses argument as comma-separated list of strings.""" + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + try: + return [s.strip() for s in list(csv.reader([argument], strict=True))[0]] + except csv.Error as e: + # Provide a helpful report for case like + # --listflag="$(printf 'hello,\nworld')" + # IOW, list flag values containing naked newlines. This error + # was previously "reported" by allowing csv.Error to + # propagate. + raise ValueError('Unable to parse the value %r as a %s: %s' + % (argument, self.flag_type(), e)) + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = super(ListParser, self)._custom_xml_dom_elements(doc) + elements.append(_helpers.create_xml_dom_element( + doc, 'list_separator', repr(','))) + return elements + + +class WhitespaceSeparatedListParser(BaseListParser): + """Parser for a whitespace-separated list of strings.""" + + def __init__(self, comma_compat: bool = False) -> None: + """Initializer. + + Args: + comma_compat: bool, whether to support comma as an additional separator. + If False then only whitespace is supported. This is intended only for + backwards compatibility with flags that used to be comma-separated. + """ + self._comma_compat = comma_compat + name = 'whitespace or comma' if self._comma_compat else 'whitespace' + super(WhitespaceSeparatedListParser, self).__init__(None, name) + + def parse(self, argument: Union[Text, List[Text]]) -> List[Text]: + """Parses argument as whitespace-separated list of strings. + + It also parses argument as comma-separated list of strings if requested. + + Args: + argument: string argument passed in the commandline. + + Returns: + [str], the parsed flag value. + """ + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + if self._comma_compat: + argument = argument.replace(',', ' ') + return argument.split() + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = super(WhitespaceSeparatedListParser, self + )._custom_xml_dom_elements(doc) + separators = list(string.whitespace) + if self._comma_compat: + separators.append(',') + separators.sort() + for sep_char in separators: + elements.append(_helpers.create_xml_dom_element( + doc, 'list_separator', repr(sep_char))) + return elements diff --git a/MLPY/Lib/site-packages/absl/flags/_defines.py b/MLPY/Lib/site-packages/absl/flags/_defines.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b102f21aec6533d6960c04aa63765687e1af7a --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_defines.py @@ -0,0 +1,1686 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This modules contains flags DEFINE functions. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import enum +import sys +import types +import typing +from typing import Text, List, Any, TypeVar, Optional, Union, Type, Iterable, overload + +from absl.flags import _argument_parser +from absl.flags import _exceptions +from absl.flags import _flag +from absl.flags import _flagvalues +from absl.flags import _helpers +from absl.flags import _validators + +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + +_T = TypeVar('_T') +_ET = TypeVar('_ET', bound=enum.Enum) + + +def _register_bounds_validator_if_needed(parser, name, flag_values): + """Enforces lower and upper bounds for numeric flags. + + Args: + parser: NumericParser (either FloatParser or IntegerParser), provides lower + and upper bounds, and help text to display. + name: str, name of the flag + flag_values: FlagValues. + """ + if parser.lower_bound is not None or parser.upper_bound is not None: + + def checker(value): + if value is not None and parser.is_outside_bounds(value): + message = '%s is not %s' % (value, parser.syntactic_help) + raise _exceptions.ValidationError(message) + return True + + _validators.register_validator(name, checker, flag_values=flag_values) + + +@overload +def DEFINE( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + name: Text, + default: Any, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + serializer: Optional[_argument_parser.ArgumentSerializer[_T]] = ..., + module_name: Optional[Text] = ..., + required: 'typing.Literal[True]' = ..., + **args: Any +) -> _flagvalues.FlagHolder[_T]: + ... + + +@overload +def DEFINE( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + name: Text, + default: Optional[Any], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + serializer: Optional[_argument_parser.ArgumentSerializer[_T]] = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[_T]]: + ... + + +def DEFINE( # pylint: disable=invalid-name + parser, + name, + default, + help, # pylint: disable=redefined-builtin + flag_values=_flagvalues.FLAGS, + serializer=None, + module_name=None, + required=False, + **args): + """Registers a generic Flag object. + + NOTE: in the docstrings of all DEFINE* functions, "registers" is short + for "creates a new flag and registers it". + + Auxiliary function: clients should use the specialized ``DEFINE_`` + function instead. + + Args: + parser: :class:`ArgumentParser`, used to parse the flag arguments. + name: str, the flag name. + default: The default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + serializer: :class:`ArgumentSerializer`, the flag serializer instance. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + return DEFINE_flag( + _flag.Flag(parser, serializer, name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + + +@overload +def DEFINE_flag( # pylint: disable=invalid-name + flag: _flag.Flag[_T], + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: 'typing.Literal[True]' = ..., +) -> _flagvalues.FlagHolder[_T]: + ... + + +@overload +def DEFINE_flag( # pylint: disable=invalid-name + flag: _flag.Flag[_T], + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., +) -> _flagvalues.FlagHolder[Optional[_T]]: + ... + + +def DEFINE_flag( # pylint: disable=invalid-name + flag, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False): + """Registers a :class:`Flag` object with a :class:`FlagValues` object. + + By default, the global :const:`FLAGS` ``FlagValue`` object is used. + + Typical users will use one of the more specialized DEFINE_xxx + functions, such as :func:`DEFINE_string` or :func:`DEFINE_integer`. But + developers who need to create :class:`Flag` objects themselves should use + this function to register their flags. + + Args: + flag: :class:`Flag`, a flag that is key to the module. + flag_values: :class:`FlagValues`, the ``FlagValues`` instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + + Returns: + a handle to defined flag. + """ + if required and flag.default is not None: + raise ValueError('Required flag --%s cannot have a non-None default' % + flag.name) + # Copying the reference to flag_values prevents pychecker warnings. + fv = flag_values + fv[flag.name] = flag + # Tell flag_values who's defining the flag. + if module_name: + module = sys.modules.get(module_name) + else: + module, module_name = _helpers.get_calling_module_object_and_name() + flag_values.register_flag_by_module(module_name, flag) + flag_values.register_flag_by_module_id(id(module), flag) + if required: + _validators.mark_flag_as_required(flag.name, fv) + ensure_non_none_value = (flag.default is not None) or required + return _flagvalues.FlagHolder( + fv, flag, ensure_non_none_value=ensure_non_none_value) + + +def set_default(flag_holder: _flagvalues.FlagHolder[_T], value: _T) -> None: + """Changes the default value of the provided flag object. + + The flag's current value is also updated if the flag is currently using + the default value, i.e. not specified in the command line, and not set + by FLAGS.name = value. + + Args: + flag_holder: FlagHolder, the flag to modify. + value: The new default value. + + Raises: + IllegalFlagValueError: Raised when value is not valid. + """ + flag_holder._flagvalues.set_default(flag_holder.name, value) # pylint: disable=protected-access + + +def override_value(flag_holder: _flagvalues.FlagHolder[_T], value: _T) -> None: + """Overrides the value of the provided flag. + + This value takes precedent over the default value and, when called after flag + parsing, any value provided at the command line. + + Args: + flag_holder: FlagHolder, the flag to modify. + value: The new value. + + Raises: + IllegalFlagValueError: The value did not pass the flag parser or validators. + """ + fv = flag_holder._flagvalues # pylint: disable=protected-access + # Ensure the new value satisfies the flag's parser while avoiding side + # effects of calling parse(). + parsed = fv[flag_holder.name]._parse(value) # pylint: disable=protected-access + if parsed != value: + raise _exceptions.IllegalFlagValueError( + 'flag %s: parsed value %r not equal to original %r' + % (flag_holder.name, parsed, value) + ) + setattr(fv, flag_holder.name, value) + + +def _internal_declare_key_flags( + flag_names: List[str], + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, + key_flag_values: Optional[_flagvalues.FlagValues] = None, +) -> None: + """Declares a flag as key for the calling module. + + Internal function. User code should call declare_key_flag or + adopt_module_key_flags instead. + + Args: + flag_names: [str], a list of names of already-registered Flag objects. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flags listed in flag_names have registered (the value of the flag_values + argument from the ``DEFINE_*`` calls that defined those flags). This + should almost never need to be overridden. + key_flag_values: :class:`FlagValues`, the FlagValues instance that (among + possibly many other things) keeps track of the key flags for each module. + Default ``None`` means "same as flag_values". This should almost never + need to be overridden. + + Raises: + UnrecognizedFlagError: Raised when the flag is not defined. + """ + key_flag_values = key_flag_values or flag_values + + module = _helpers.get_calling_module() + + for flag_name in flag_names: + key_flag_values.register_key_flag_for_module(module, flag_values[flag_name]) + + +def declare_key_flag( + flag_name: Union[Text, _flagvalues.FlagHolder], + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, +) -> None: + """Declares one flag as key to the current module. + + Key flags are flags that are deemed really important for a module. + They are important when listing help messages; e.g., if the + --helpshort command-line flag is used, then only the key flags of the + main module are listed (instead of all flags, as in the case of + --helpfull). + + Sample usage:: + + flags.declare_key_flag('flag_1') + + Args: + flag_name: str | :class:`FlagHolder`, the name or holder of an already + declared flag. (Redeclaring flags as key, including flags implicitly key + because they were declared in this module, is a no-op.) + Positional-only parameter. + flag_values: :class:`FlagValues`, the FlagValues instance in which the + flag will be declared as a key flag. This should almost never need to be + overridden. + + Raises: + ValueError: Raised if flag_name not defined as a Python flag. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + if flag_name in _helpers.SPECIAL_FLAGS: + # Take care of the special flags, e.g., --flagfile, --undefok. + # These flags are defined in SPECIAL_FLAGS, and are treated + # specially during flag parsing, taking precedence over the + # user-defined flags. + _internal_declare_key_flags([flag_name], + flag_values=_helpers.SPECIAL_FLAGS, + key_flag_values=flag_values) + return + try: + _internal_declare_key_flags([flag_name], flag_values=flag_values) + except KeyError: + raise ValueError('Flag --%s is undefined. To set a flag as a key flag ' + 'first define it in Python.' % flag_name) + + +def adopt_module_key_flags( + module: Any, flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS +) -> None: + """Declares that all flags key to a module are key to the current module. + + Args: + module: module, the module object from which all key flags will be declared + as key flags to the current module. + flag_values: :class:`FlagValues`, the FlagValues instance in which the + flags will be declared as key flags. This should almost never need to be + overridden. + + Raises: + Error: Raised when given an argument that is a module name (a string), + instead of a module object. + """ + if not isinstance(module, types.ModuleType): + raise _exceptions.Error('Expected a module object, not %r.' % (module,)) + _internal_declare_key_flags( + [f.name for f in flag_values.get_key_flags_for_module(module.__name__)], + flag_values=flag_values) + # If module is this flag module, take _helpers.SPECIAL_FLAGS into account. + if module == _helpers.FLAGS_MODULE: + _internal_declare_key_flags( + # As we associate flags with get_calling_module_object_and_name(), the + # special flags defined in this module are incorrectly registered with + # a different module. So, we can't use get_key_flags_for_module. + # Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private + # FlagValues, where no other module should register flags). + [_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS], + flag_values=_helpers.SPECIAL_FLAGS, + key_flag_values=flag_values) + + +def disclaim_key_flags() -> None: + """Declares that the current module will not define any more key flags. + + Normally, the module that calls the DEFINE_xxx functions claims the + flag to be its key flag. This is undesirable for modules that + define additional DEFINE_yyy functions with its own flag parsers and + serializers, since that module will accidentally claim flags defined + by DEFINE_yyy as its key flags. After calling this function, the + module disclaims flag definitions thereafter, so the key flags will + be correctly attributed to the caller of DEFINE_yyy. + + After calling this function, the module will not be able to define + any more flags. This function will affect all FlagValues objects. + """ + globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access + module, _ = _helpers.get_module_object_and_name(globals_for_caller) + _helpers.disclaim_module_ids.add(id(module)) + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: Optional[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[Text]]: + ... + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: Text, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +def DEFINE_string( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be any string.""" + parser = _argument_parser.ArgumentParser[str]() + serializer = _argument_parser.ArgumentSerializer[str]() + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: Union[None, Text, bool, int], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[bool]: + ... + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[bool]]: + ... + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: Union[Text, bool, int], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[bool]: + ... + + +def DEFINE_boolean( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a boolean flag. + + Such a boolean flag does not take an argument. If a user wants to + specify a false value explicitly, the long option beginning with 'no' + must be used: i.e. --noflag + + This flag will have a value of None, True or False. None is possible + if default=None and the user does not specify the flag on the command + line. + + Args: + name: str, the flag name. + default: bool|str|None, the default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + return DEFINE_flag( + _flag.BooleanFlag(name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: Union[None, float, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[float]: + ... + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[float]]: + ... + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: Union[float, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[float]: + ... + + +def DEFINE_float( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value must be a float. + + If ``lower_bound`` or ``upper_bound`` are set, then this flag must be + within the given range. + + Args: + name: str, the flag name. + default: float|str|None, the default value of the flag. + help: str, the help message. + lower_bound: float, min value of the flag. + upper_bound: float, max value of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to :func:`DEFINE`. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.FloatParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + result = DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) + return result + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: Union[None, int, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[int]: + ... + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[int]]: + ... + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: Union[int, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[int]: + ... + + +def DEFINE_integer( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value must be an integer. + + If ``lower_bound``, or ``upper_bound`` are set, then this flag must be + within the given range. + + Args: + name: str, the flag name. + default: int|str|None, the default value of the flag. + help: str, the help message. + lower_bound: int, min value of the flag. + upper_bound: int, max value of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to :func:`DEFINE`. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.IntegerParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + result = DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) + return result + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: Optional[Text], + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: None, + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[Text]]: + ... + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: Text, + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +def DEFINE_enum( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_values, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a flag whose value can be any string from enum_values. + + Instead of a string enum, prefer `DEFINE_enum_class`, which allows + defining enums from an `enum.Enum` class. + + Args: + name: str, the flag name. + default: str|None, the default value of the flag. + enum_values: [str], a non-empty list of strings with the possible values for + the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + result = DEFINE_flag( + _flag.EnumFlag(name, default, help, enum_values, **args), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[None, _ET, Text], + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[_ET]: + ... + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: None, + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[_ET]]: + ... + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[_ET, Text], + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[_ET]: + ... + + +def DEFINE_enum_class( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_class, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + case_sensitive=False, + required=False, + **args): + """Registers a flag whose value can be the name of enum members. + + Args: + name: str, the flag name. + default: Enum|str|None, the default value of the flag. + enum_class: class, the Enum class with all the possible values for the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + case_sensitive: bool, whether to map strings to members of the enum_class + without considering case. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + # NOTE: pytype fails if this is a direct return. + result = DEFINE_flag( + _flag.EnumClassFlag( + name, default, help, enum_class, case_sensitive=case_sensitive, **args + ), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_list( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value is a comma-separated list of strings. + + The flag value is parsed with a CSV parser. + + Args: + name: str, the flag name. + default: list|str|None, the default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.ListParser() + serializer = _argument_parser.CsvListSerializer(',') + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + comma_compat=False, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value is a whitespace-separated list of strings. + + Any whitespace can be used as a separator. + + Args: + name: str, the flag name. + default: list|str|None, the default value of the flag. + help: str, the help message. + comma_compat: bool - Whether to support comma as an additional separator. If + false then only whitespace is supported. This is intended only for + backwards compatibility with flags that used to be comma-separated. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.WhitespaceSeparatedListParser( + comma_compat=comma_compat) + serializer = _argument_parser.ListSerializer(' ') + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Iterable[_T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Union[None, _T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[_T]]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Iterable[_T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: _T, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +def DEFINE_multi( # pylint: disable=invalid-name,redefined-builtin + parser, + serializer, + name, + default, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a generic MultiFlag that parses its args with a given parser. + + Auxiliary function. Normal users should NOT use it directly. + + Developers who need to create their own 'Parser' classes for options + which can appear multiple times can call this module function to + register their flags. + + Args: + parser: ArgumentParser, used to parse the flag arguments. + serializer: ArgumentSerializer, the flag serializer instance. + name: str, the flag name. + default: Union[Iterable[T], Text, None], the default value of the flag. If + the value is text, it will be parsed as if it was provided from the + command line. If the value is a non-string iterable, it will be iterated + over to create a shallow copy of the values. If it is None, it is left + as-is. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the Python module declaring this flag. If + not provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + result = DEFINE_flag( + _flag.MultiFlag(parser, serializer, name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_multi_string( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of any strings. + + Use the flag on the command line multiple times to place multiple + string values into the list. The 'default' may be a single string + (which will be converted into a single-element list) or a list of + strings. + + + Args: + name: str, the flag name. + default: Union[Iterable[Text], Text, None], the default value of the flag; + see :func:`DEFINE_multi`. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.ArgumentParser() + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[int], int, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[int]]: + ... + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[int]]]: + ... + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[int], int, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[int]]: + ... + + +def DEFINE_multi_integer( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of arbitrary integers. + + Use the flag on the command line multiple times to place multiple + integer values into the list. The 'default' may be a single integer + (which will be converted into a single-element list) or a list of + integers. + + Args: + name: str, the flag name. + default: Union[Iterable[int], Text, None], the default value of the flag; + see `DEFINE_multi`. + help: str, the help message. + lower_bound: int, min values of the flag. + upper_bound: int, max values of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.IntegerParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[float], float, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[float]]: + ... + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[float]]]: + ... + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[float], float, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[float]]: + ... + + +def DEFINE_multi_float( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of arbitrary floats. + + Use the flag on the command line multiple times to place multiple + float values into the list. The 'default' may be a single float + (which will be converted into a single-element list) or a list of + floats. + + Args: + name: str, the flag name. + default: Union[Iterable[float], Text, None], the default value of the flag; + see `DEFINE_multi`. + help: str, the help message. + lower_bound: float, min values of the flag. + upper_bound: float, max values of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.FloatParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: None, + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_multi_enum( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_values, + help, + flag_values=_flagvalues.FLAGS, + case_sensitive=True, + required=False, + **args): + """Registers a flag whose value can be a list strings from enum_values. + + Use the flag on the command line multiple times to place multiple + enum values into the list. The 'default' may be a single string + (which will be converted into a single-element list) or a list of + strings. + + Args: + name: str, the flag name. + default: Union[Iterable[Text], Text, None], the default value of the flag; + see `DEFINE_multi`. + enum_values: [str], a non-empty list of strings with the possible values for + the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + case_sensitive: Whether or not the enum is to be case-sensitive. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.EnumParser(enum_values, case_sensitive) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + '<%s>: %s' % ('|'.join(enum_values), help), + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + # This is separate from `Union[None, _ET, Iterable[Text], Text]` to avoid a + # Pytype issue inferring the return value to + # FlagHolder[List[Union[_ET, enum.Enum]]] when an iterable of concrete enum + # subclasses are used. + default: Iterable[_ET], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[None, _ET, Iterable[Text], Text], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: None, + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[_ET]]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + # This is separate from `Union[None, _ET, Iterable[Text], Text]` to avoid a + # Pytype issue inferring the return value to + # FlagHolder[List[Union[_ET, enum.Enum]]] when an iterable of concrete enum + # subclasses are used. + default: Iterable[_ET], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[_ET, Iterable[Text], Text], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +def DEFINE_multi_enum_class( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_class, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + case_sensitive=False, + required=False, + **args): + """Registers a flag whose value can be a list of enum members. + + Use the flag on the command line multiple times to place multiple + enum values into the list. + + Args: + name: str, the flag name. + default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the + default value of the flag; see `DEFINE_multi`; only differences are + documented here. If the value is a single Enum, it is treated as a + single-item list of that Enum value. If it is an iterable, text values + within the iterable will be converted to the equivalent Enum objects. + enum_class: class, the Enum class with all the possible values for the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the Python module declaring this flag. If + not provided, it will be computed using the stack trace of this call. + case_sensitive: bool, whether to map strings to members of the enum_class + without considering case. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + # NOTE: pytype fails if this is a direct return. + result = DEFINE_flag( + _flag.MultiEnumClassFlag( + name, + default, + help, + enum_class, + case_sensitive=case_sensitive, + **args, + ), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +def DEFINE_alias( # pylint: disable=invalid-name + name: Text, + original_name: Text, + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, + module_name: Optional[Text] = None, +) -> _flagvalues.FlagHolder[Any]: + """Defines an alias flag for an existing one. + + Args: + name: str, the flag name. + original_name: str, the original flag name. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the module that defines this flag. + + Returns: + a handle to defined flag. + + Raises: + flags.FlagError: + UnrecognizedFlagError: if the referenced flag doesn't exist. + DuplicateFlagError: if the alias name has been used by some existing flag. + """ + if original_name not in flag_values: + raise _exceptions.UnrecognizedFlagError(original_name) + flag = flag_values[original_name] + + class _FlagAlias(_flag.Flag): + """Overrides Flag class so alias value is copy of original flag value.""" + + def parse(self, argument): + flag.parse(argument) + self.present += 1 + + def _parse_from_default(self, value): + # The value was already parsed by the aliased flag, so there is no + # need to call the parser on it a second time. + # Additionally, because of how MultiFlag parses and merges values, + # it isn't possible to delegate to the aliased flag and still get + # the correct values. + return value + + @property + def value(self): + return flag.value + + @value.setter + def value(self, value): + flag.value = value + + help_msg = 'Alias for --%s.' % flag.name + # If alias_name has been used, flags.DuplicatedFlag will be raised. + return DEFINE_flag( + _FlagAlias( + flag.parser, + flag.serializer, + name, + flag.default, + help_msg, + boolean=flag.boolean), flag_values, module_name) diff --git a/MLPY/Lib/site-packages/absl/flags/_exceptions.py b/MLPY/Lib/site-packages/absl/flags/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b569d9460e31622da8d1c826ea31400fb2d74fcc --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_exceptions.py @@ -0,0 +1,108 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Exception classes in ABSL flags library. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import sys + +from absl.flags import _helpers + + +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + + +class Error(Exception): + """The base class for all flags errors.""" + + +class CantOpenFlagFileError(Error): + """Raised when flagfile fails to open. + + E.g. the file doesn't exist, or has wrong permissions. + """ + + +class DuplicateFlagError(Error): + """Raised if there is a flag naming conflict.""" + + @classmethod + def from_flag(cls, flagname, flag_values, other_flag_values=None): + """Creates a DuplicateFlagError by providing flag name and values. + + Args: + flagname: str, the name of the flag being redefined. + flag_values: :class:`FlagValues`, the FlagValues instance containing the + first definition of flagname. + other_flag_values: :class:`FlagValues`, if it is not None, it should be + the FlagValues object where the second definition of flagname occurs. + If it is None, we assume that we're being called when attempting to + create the flag a second time, and we use the module calling this one + as the source of the second definition. + + Returns: + An instance of DuplicateFlagError. + """ + first_module = flag_values.find_module_defining_flag( + flagname, default='') + if other_flag_values is None: + second_module = _helpers.get_calling_module() + else: + second_module = other_flag_values.find_module_defining_flag( + flagname, default='') + flag_summary = flag_values[flagname].help + msg = ("The flag '%s' is defined twice. First from %s, Second from %s. " + "Description from first occurrence: %s") % ( + flagname, first_module, second_module, flag_summary) + return cls(msg) + + +class IllegalFlagValueError(Error): + """Raised when the flag command line argument is illegal.""" + + +class UnrecognizedFlagError(Error): + """Raised when a flag is unrecognized. + + Attributes: + flagname: str, the name of the unrecognized flag. + flagvalue: The value of the flag, empty if the flag is not defined. + """ + + def __init__(self, flagname, flagvalue='', suggestions=None): + self.flagname = flagname + self.flagvalue = flagvalue + if suggestions: + # Space before the question mark is intentional to not include it in the + # selection when copy-pasting the suggestion from (some) terminals. + tip = '. Did you mean: %s ?' % ', '.join(suggestions) + else: + tip = '' + super(UnrecognizedFlagError, self).__init__( + 'Unknown command line flag \'%s\'%s' % (flagname, tip)) + + +class UnparsedFlagAccessError(Error): + """Raised when accessing the flag value from unparsed :class:`FlagValues`.""" + + +class ValidationError(Error): + """Raised when flag validator constraint is not satisfied.""" + + +class FlagNameConflictsWithMethodError(Error): + """Raised when a flag name conflicts with :class:`FlagValues` methods.""" diff --git a/MLPY/Lib/site-packages/absl/flags/_flag.py b/MLPY/Lib/site-packages/absl/flags/_flag.py new file mode 100644 index 0000000000000000000000000000000000000000..67117880c02b36de1291c12cd37e4906d932996f --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_flag.py @@ -0,0 +1,556 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains Flag class - information about single command-line flag. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +from collections import abc +import copy +import enum +import functools +from typing import Any, Dict, Generic, Iterable, List, Optional, Text, Type, TypeVar, Union +from xml.dom import minidom + +from absl.flags import _argument_parser +from absl.flags import _exceptions +from absl.flags import _helpers + +_T = TypeVar('_T') +_ET = TypeVar('_ET', bound=enum.Enum) + + +@functools.total_ordering +class Flag(Generic[_T]): + """Information about a command-line flag. + + Attributes: + name: the name for this flag + default: the default value for this flag + default_unparsed: the unparsed default value for this flag. + default_as_str: default value as repr'd string, e.g., "'true'" + (or None) + value: the most recent parsed value of this flag set by :meth:`parse` + help: a help string or None if no help is available + short_name: the single letter alias for this flag (or None) + boolean: if 'true', this flag does not accept arguments + present: true if this flag was parsed from command line flags + parser: an :class:`~absl.flags.ArgumentParser` object + serializer: an ArgumentSerializer object + allow_override: the flag may be redefined without raising an error, + and newly defined flag overrides the old one. + allow_override_cpp: use the flag from C++ if available the flag + definition is replaced by the C++ flag after init + allow_hide_cpp: use the Python flag despite having a C++ flag with + the same name (ignore the C++ flag) + using_default_value: the flag value has not been set by user + allow_overwrite: the flag may be parsed more than once without + raising an error, the last set value will be used + allow_using_method_names: whether this flag can be defined even if + it has a name that conflicts with a FlagValues method. + validators: list of the flag validators. + + The only public method of a ``Flag`` object is :meth:`parse`, but it is + typically only called by a :class:`~absl.flags.FlagValues` object. The + :meth:`parse` method is a thin wrapper around the + :meth:`ArgumentParser.parse()` method. The + parsed value is saved in ``.value``, and the ``.present`` attribute is + updated. If this flag was already present, an Error is raised. + + :meth:`parse` is also called during ``__init__`` to parse the default value + and initialize the ``.value`` attribute. This enables other python modules to + safely use flags even if the ``__main__`` module neglects to parse the + command line arguments. The ``.present`` attribute is cleared after + ``__init__`` parsing. If the default value is set to ``None``, then the + ``__init__`` parsing step is skipped and the ``.value`` attribute is + initialized to None. + + Note: The default value is also presented to the user in the help + string, so it is important that it be a legal value for this flag. + """ + + # NOTE: pytype doesn't find defaults without this. + default: Optional[_T] + default_as_str: Optional[Text] + default_unparsed: Union[Optional[_T], Text] + + def __init__( + self, + parser: _argument_parser.ArgumentParser[_T], + serializer: Optional[_argument_parser.ArgumentSerializer[_T]], + name: Text, + default: Union[Optional[_T], Text], + help_string: Optional[Text], + short_name: Optional[Text] = None, + boolean: bool = False, + allow_override: bool = False, + allow_override_cpp: bool = False, + allow_hide_cpp: bool = False, + allow_overwrite: bool = True, + allow_using_method_names: bool = False, + ) -> None: + self.name = name + + if not help_string: + help_string = '(no help available)' + + self.help = help_string + self.short_name = short_name + self.boolean = boolean + self.present = 0 + self.parser = parser + self.serializer = serializer + self.allow_override = allow_override + self.allow_override_cpp = allow_override_cpp + self.allow_hide_cpp = allow_hide_cpp + self.allow_overwrite = allow_overwrite + self.allow_using_method_names = allow_using_method_names + + self.using_default_value = True + self._value = None + self.validators = [] + if self.allow_hide_cpp and self.allow_override_cpp: + raise _exceptions.Error( + "Can't have both allow_hide_cpp (means use Python flag) and " + 'allow_override_cpp (means use C++ flag after InitGoogle)') + + self._set_default(default) + + @property + def value(self) -> Optional[_T]: + return self._value + + @value.setter + def value(self, value: Optional[_T]): + self._value = value + + def __hash__(self): + return hash(id(self)) + + def __eq__(self, other): + return self is other + + def __lt__(self, other): + if isinstance(other, Flag): + return id(self) < id(other) + return NotImplemented + + def __bool__(self): + raise TypeError('A Flag instance would always be True. ' + 'Did you mean to test the `.value` attribute?') + + def __getstate__(self): + raise TypeError("can't pickle Flag objects") + + def __copy__(self): + raise TypeError('%s does not support shallow copies. ' + 'Use copy.deepcopy instead.' % type(self).__name__) + + def __deepcopy__(self, memo: Dict[int, Any]) -> 'Flag[_T]': + result = object.__new__(type(self)) + result.__dict__ = copy.deepcopy(self.__dict__, memo) + return result + + def _get_parsed_value_as_string(self, value: Optional[_T]) -> Optional[Text]: + """Returns parsed flag value as string.""" + if value is None: + return None + if self.serializer: + return repr(self.serializer.serialize(value)) + if self.boolean: + if value: + return repr('true') + else: + return repr('false') + return repr(str(value)) + + def parse(self, argument: Union[Text, Optional[_T]]) -> None: + """Parses string and sets flag value. + + Args: + argument: str or the correct flag value type, argument to be parsed. + """ + if self.present and not self.allow_overwrite: + raise _exceptions.IllegalFlagValueError( + 'flag --%s=%s: already defined as %s' % ( + self.name, argument, self.value)) + self.value = self._parse(argument) + self.present += 1 + + def _parse(self, argument: Union[Text, _T]) -> Optional[_T]: + """Internal parse function. + + It returns the parsed value, and does not modify class states. + + Args: + argument: str or the correct flag value type, argument to be parsed. + + Returns: + The parsed value. + """ + try: + return self.parser.parse(argument) + except (TypeError, ValueError) as e: # Recast as IllegalFlagValueError. + raise _exceptions.IllegalFlagValueError( + 'flag --%s=%s: %s' % (self.name, argument, e)) + + def unparse(self) -> None: + self.value = self.default + self.using_default_value = True + self.present = 0 + + def serialize(self) -> Text: + """Serializes the flag.""" + return self._serialize(self.value) + + def _serialize(self, value: Optional[_T]) -> Text: + """Internal serialize function.""" + if value is None: + return '' + if self.boolean: + if value: + return '--%s' % self.name + else: + return '--no%s' % self.name + else: + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name) + return '--%s=%s' % (self.name, self.serializer.serialize(value)) + + def _set_default(self, value: Union[Optional[_T], Text]) -> None: + """Changes the default value (and current value too) for this Flag.""" + self.default_unparsed = value + if value is None: + self.default = None + else: + self.default = self._parse_from_default(value) + self.default_as_str = self._get_parsed_value_as_string(self.default) + if self.using_default_value: + self.value = self.default + + # This is split out so that aliases can skip regular parsing of the default + # value. + def _parse_from_default(self, value: Union[Text, _T]) -> Optional[_T]: + return self._parse(value) + + def flag_type(self) -> Text: + """Returns a str that describes the type of the flag. + + NOTE: we use strings, and not the types.*Type constants because + our flags can have more exotic types, e.g., 'comma separated list + of strings', 'whitespace separated list of strings', etc. + """ + return self.parser.flag_type() + + def _create_xml_dom_element( + self, doc: minidom.Document, module_name: str, is_key: bool = False + ) -> minidom.Element: + """Returns an XML element that contains this flag's information. + + This is information that is relevant to all flags (e.g., name, + meaning, etc.). If you defined a flag that has some other pieces of + info, then please override _ExtraXMLInfo. + + Please do NOT override this method. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + module_name: str,, the name of the module that defines this flag. + is_key: boolean, True iff this flag is key for main module. + + Returns: + A minidom.Element instance. + """ + element = doc.createElement('flag') + if is_key: + element.appendChild(_helpers.create_xml_dom_element(doc, 'key', 'yes')) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'file', module_name)) + # Adds flag features that are relevant for all flags. + element.appendChild(_helpers.create_xml_dom_element(doc, 'name', self.name)) + if self.short_name: + element.appendChild(_helpers.create_xml_dom_element( + doc, 'short_name', self.short_name)) + if self.help: + element.appendChild(_helpers.create_xml_dom_element( + doc, 'meaning', self.help)) + # The default flag value can either be represented as a string like on the + # command line, or as a Python object. We serialize this value in the + # latter case in order to remain consistent. + if self.serializer and not isinstance(self.default, str): + if self.default is not None: + default_serialized = self.serializer.serialize(self.default) + else: + default_serialized = '' + else: + default_serialized = self.default + element.appendChild(_helpers.create_xml_dom_element( + doc, 'default', default_serialized)) + value_serialized = self._serialize_value_for_xml(self.value) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'current', value_serialized)) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'type', self.flag_type())) + # Adds extra flag features this flag may have. + for e in self._extra_xml_dom_elements(doc): + element.appendChild(e) + return element + + def _serialize_value_for_xml(self, value: Optional[_T]) -> Any: + """Returns the serialized value, for use in an XML help text.""" + return value + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + """Returns extra info about this flag in XML. + + "Extra" means "not already included by _create_xml_dom_element above." + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + + Returns: + A list of minidom.Element. + """ + # Usually, the parser knows the extra details about the flag, so + # we just forward the call to it. + return self.parser._custom_xml_dom_elements(doc) # pylint: disable=protected-access + + +class BooleanFlag(Flag[bool]): + """Basic boolean flag. + + Boolean flags do not take any arguments, and their value is either + ``True`` (1) or ``False`` (0). The false value is specified on the command + line by prepending the word ``'no'`` to either the long or the short flag + name. + + For example, if a Boolean flag was created whose long name was + ``'update'`` and whose short name was ``'x'``, then this flag could be + explicitly unset through either ``--noupdate`` or ``--nox``. + """ + + def __init__( + self, + name: Text, + default: Union[Optional[bool], Text], + help: Optional[Text], # pylint: disable=redefined-builtin + short_name: Optional[Text] = None, + **args + ) -> None: + p = _argument_parser.BooleanParser() + super(BooleanFlag, self).__init__( + p, None, name, default, help, short_name, True, **args + ) + + +class EnumFlag(Flag[Text]): + """Basic enum flag; its value can be any string from list of enum_values.""" + + def __init__( + self, + name: Text, + default: Optional[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + enum_values: Iterable[Text], + short_name: Optional[Text] = None, + case_sensitive: bool = True, + **args + ): + p = _argument_parser.EnumParser(enum_values, case_sensitive) + g = _argument_parser.ArgumentSerializer() + super(EnumFlag, self).__init__( + p, g, name, default, help, short_name, **args) + # NOTE: parser should be typed EnumParser but the constructor + # restricts the available interface to ArgumentParser[str]. + self.parser = p + self.help = '<%s>: %s' % ('|'.join(p.enum_values), self.help) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_values: + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class EnumClassFlag(Flag[_ET]): + """Basic enum flag; its value is an enum class's member.""" + + def __init__( + self, + name: Text, + default: Union[Optional[_ET], Text], + help: Optional[Text], # pylint: disable=redefined-builtin + enum_class: Type[_ET], + short_name: Optional[Text] = None, + case_sensitive: bool = False, + **args + ): + p = _argument_parser.EnumClassParser( + enum_class, case_sensitive=case_sensitive) + g = _argument_parser.EnumClassSerializer(lowercase=not case_sensitive) + super(EnumClassFlag, self).__init__( + p, g, name, default, help, short_name, **args) + # NOTE: parser should be typed EnumClassParser[_ET] but the constructor + # restricts the available interface to ArgumentParser[_ET]. + self.parser = p + self.help = '<%s>: %s' % ('|'.join(p.member_names), self.help) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_class.__members__.keys(): + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class MultiFlag(Generic[_T], Flag[List[_T]]): + """A flag that can appear multiple time on the command-line. + + The value of such a flag is a list that contains the individual values + from all the appearances of that flag on the command-line. + + See the __doc__ for Flag for most behavior of this class. Only + differences in behavior are described here: + + * The default value may be either a single value or an iterable of values. + A single value is transformed into a single-item list of that value. + + * The value of the flag is always a list, even if the option was + only supplied once, and even if the default value is a single + value + """ + + def __init__(self, *args, **kwargs): + super(MultiFlag, self).__init__(*args, **kwargs) + self.help += ';\n repeat this option to specify a list of values' + + def parse(self, arguments: Union[Text, _T, Iterable[_T]]): # pylint: disable=arguments-renamed + """Parses one or more arguments with the installed parser. + + Args: + arguments: a single argument or a list of arguments (typically a + list of default values); a single argument is converted + internally into a list containing one item. + """ + new_values = self._parse(arguments) + if self.present: + self.value.extend(new_values) + else: + self.value = new_values + self.present += len(new_values) + + def _parse(self, arguments: Union[Text, Optional[Iterable[_T]]]) -> List[_T]: # pylint: disable=arguments-renamed + if (isinstance(arguments, abc.Iterable) and + not isinstance(arguments, str)): + arguments = list(arguments) + + if not isinstance(arguments, list): + # Default value may be a list of values. Most other arguments + # will not be, so convert them into a single-item list to make + # processing simpler below. + arguments = [arguments] + + return [super(MultiFlag, self)._parse(item) for item in arguments] + + def _serialize(self, value: Optional[List[_T]]) -> Text: + """See base class.""" + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name) + if value is None: + return '' + + serialized_items = [ + super(MultiFlag, self)._serialize(value_item) for value_item in value + ] + + return '\n'.join(serialized_items) + + def flag_type(self): + """See base class.""" + return 'multi ' + self.parser.flag_type() + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + if hasattr(self.parser, 'enum_values'): + for enum_value in self.parser.enum_values: # pytype: disable=attribute-error + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class MultiEnumClassFlag(MultiFlag[_ET]): # pytype: disable=not-indexable + """A multi_enum_class flag. + + See the __doc__ for MultiFlag for most behaviors of this class. In addition, + this class knows how to handle enum.Enum instances as values for this flag + type. + """ + + def __init__( + self, + name: str, + default: Union[None, Iterable[_ET], _ET, Iterable[Text], Text], + help_string: str, + enum_class: Type[_ET], + case_sensitive: bool = False, + **args + ): + p = _argument_parser.EnumClassParser( + enum_class, case_sensitive=case_sensitive) + g = _argument_parser.EnumClassListSerializer( + list_sep=',', lowercase=not case_sensitive) + super(MultiEnumClassFlag, self).__init__( + p, g, name, default, help_string, **args) + # NOTE: parser should be typed EnumClassParser[_ET] but the constructor + # restricts the available interface to ArgumentParser[str]. + self.parser = p + # NOTE: serializer should be non-Optional but this isn't inferred. + self.serializer = g + self.help = ( + '<%s>: %s;\n repeat this option to specify a list of values' % + ('|'.join(p.member_names), help_string or '(no help available)')) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_class.__members__.keys(): # pytype: disable=attribute-error + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + def _serialize_value_for_xml(self, value): + """See base class.""" + if value is not None: + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name + ) + value_serialized = self.serializer.serialize(value) + else: + value_serialized = '' + return value_serialized diff --git a/MLPY/Lib/site-packages/absl/flags/_flagvalues.py b/MLPY/Lib/site-packages/absl/flags/_flagvalues.py new file mode 100644 index 0000000000000000000000000000000000000000..e25f1d3e100b80aafb51b3a5b5a25772d2b6119c --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_flagvalues.py @@ -0,0 +1,1480 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Defines the FlagValues class - registry of 'Flag' objects. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import copy +import itertools +import logging +import os +import sys +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Text, TextIO, Generic, TypeVar, Union, Tuple +from xml.dom import minidom + +from absl.flags import _exceptions +from absl.flags import _flag +from absl.flags import _helpers +from absl.flags import _validators_classes +from absl.flags._flag import Flag + +# Add flagvalues module to disclaimed module ids. +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + +_T = TypeVar('_T') + + +class FlagValues: + """Registry of :class:`~absl.flags.Flag` objects. + + A :class:`FlagValues` can then scan command line arguments, passing flag + arguments through to the 'Flag' objects that it owns. It also + provides easy access to the flag values. Typically only one + :class:`FlagValues` object is needed by an application: + :const:`FLAGS`. + + This class is heavily overloaded: + + :class:`Flag` objects are registered via ``__setitem__``:: + + FLAGS['longname'] = x # register a new flag + + The ``.value`` attribute of the registered :class:`~absl.flags.Flag` objects + can be accessed as attributes of this :class:`FlagValues` object, through + ``__getattr__``. Both the long and short name of the original + :class:`~absl.flags.Flag` objects can be used to access its value:: + + FLAGS.longname # parsed flag value + FLAGS.x # parsed flag value (short name) + + Command line arguments are scanned and passed to the registered + :class:`~absl.flags.Flag` objects through the ``__call__`` method. Unparsed + arguments, including ``argv[0]`` (e.g. the program name) are returned:: + + argv = FLAGS(sys.argv) # scan command line arguments + + The original registered :class:`~absl.flags.Flag` objects can be retrieved + through the use of the dictionary-like operator, ``__getitem__``:: + + x = FLAGS['longname'] # access the registered Flag object + + The ``str()`` operator of a :class:`absl.flags.FlagValues` object provides + help for all of the registered :class:`~absl.flags.Flag` objects. + """ + + _HAS_DYNAMIC_ATTRIBUTES = True + + # A note on collections.abc.Mapping: + # FlagValues defines __getitem__, __iter__, and __len__. It makes perfect + # sense to let it be a collections.abc.Mapping class. However, we are not + # able to do so. The mixin methods, e.g. keys, values, are not uncommon flag + # names. Those flag values would not be accessible via the FLAGS.xxx form. + + __dict__: Dict[str, Any] + + def __init__(self): + # Since everything in this class is so heavily overloaded, the only + # way of defining and using fields is to access __dict__ directly. + + # Dictionary: flag name (string) -> Flag object. + self.__dict__['__flags'] = {} + + # Set: name of hidden flag (string). + # Holds flags that should not be directly accessible from Python. + self.__dict__['__hiddenflags'] = set() + + # Dictionary: module name (string) -> list of Flag objects that are defined + # by that module. + self.__dict__['__flags_by_module'] = {} + # Dictionary: module id (int) -> list of Flag objects that are defined by + # that module. + self.__dict__['__flags_by_module_id'] = {} + # Dictionary: module name (string) -> list of Flag objects that are + # key for that module. + self.__dict__['__key_flags_by_module'] = {} + + # Bool: True if flags were parsed. + self.__dict__['__flags_parsed'] = False + + # Bool: True if unparse_flags() was called. + self.__dict__['__unparse_flags_called'] = False + + # None or Method(name, value) to call from __setattr__ for an unknown flag. + self.__dict__['__set_unknown'] = None + + # A set of banned flag names. This is to prevent users from accidentally + # defining a flag that has the same name as a method on this class. + # Users can still allow defining the flag by passing + # allow_using_method_names=True in DEFINE_xxx functions. + self.__dict__['__banned_flag_names'] = frozenset(dir(FlagValues)) + + # Bool: Whether to use GNU style scanning. + self.__dict__['__use_gnu_getopt'] = True + + # Bool: Whether use_gnu_getopt has been explicitly set by the user. + self.__dict__['__use_gnu_getopt_explicitly_set'] = False + + # Function: Takes a flag name as parameter, returns a tuple + # (is_retired, type_is_bool). + self.__dict__['__is_retired_flag_func'] = None + + def set_gnu_getopt(self, gnu_getopt: bool = True) -> None: + """Sets whether or not to use GNU style scanning. + + GNU style allows mixing of flag and non-flag arguments. See + http://docs.python.org/library/getopt.html#getopt.gnu_getopt + + Args: + gnu_getopt: bool, whether or not to use GNU style scanning. + """ + self.__dict__['__use_gnu_getopt'] = gnu_getopt + self.__dict__['__use_gnu_getopt_explicitly_set'] = True + + def is_gnu_getopt(self) -> bool: + return self.__dict__['__use_gnu_getopt'] + + def _flags(self) -> Dict[Text, Flag]: + return self.__dict__['__flags'] + + def flags_by_module_dict(self) -> Dict[Text, List[Flag]]: + """Returns the dictionary of module_name -> list of defined flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__flags_by_module'] + + def flags_by_module_id_dict(self) -> Dict[int, List[Flag]]: + """Returns the dictionary of module_id -> list of defined flags. + + Returns: + A dictionary. Its keys are module IDs (ints). Its values + are lists of Flag objects. + """ + return self.__dict__['__flags_by_module_id'] + + def key_flags_by_module_dict(self) -> Dict[Text, List[Flag]]: + """Returns the dictionary of module_name -> list of key flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__key_flags_by_module'] + + def register_flag_by_module(self, module_name: Text, flag: Flag) -> None: + """Records the module that defines a specific flag. + + We keep track of which flag is defined by which module so that we + can later sort the flags by module. + + Args: + module_name: str, the name of a Python module. + flag: Flag, the Flag instance that is key to the module. + """ + flags_by_module = self.flags_by_module_dict() + flags_by_module.setdefault(module_name, []).append(flag) + + def register_flag_by_module_id(self, module_id: int, flag: Flag) -> None: + """Records the module that defines a specific flag. + + Args: + module_id: int, the ID of the Python module. + flag: Flag, the Flag instance that is key to the module. + """ + flags_by_module_id = self.flags_by_module_id_dict() + flags_by_module_id.setdefault(module_id, []).append(flag) + + def register_key_flag_for_module(self, module_name: Text, flag: Flag) -> None: + """Specifies that a flag is a key flag for a module. + + Args: + module_name: str, the name of a Python module. + flag: Flag, the Flag instance that is key to the module. + """ + key_flags_by_module = self.key_flags_by_module_dict() + # The list of key flags for the module named module_name. + key_flags = key_flags_by_module.setdefault(module_name, []) + # Add flag, but avoid duplicates. + if flag not in key_flags: + key_flags.append(flag) + + def _flag_is_registered(self, flag_obj: Flag) -> bool: + """Checks whether a Flag object is registered under long name or short name. + + Args: + flag_obj: Flag, the Flag instance to check for. + + Returns: + bool, True iff flag_obj is registered under long name or short name. + """ + flag_dict = self._flags() + # Check whether flag_obj is registered under its long name. + name = flag_obj.name + if flag_dict.get(name, None) == flag_obj: + return True + # Check whether flag_obj is registered under its short name. + short_name = flag_obj.short_name + if (short_name is not None and flag_dict.get(short_name, None) == flag_obj): + return True + return False + + def _cleanup_unregistered_flag_from_module_dicts( + self, flag_obj: Flag + ) -> None: + """Cleans up unregistered flags from all module -> [flags] dictionaries. + + If flag_obj is registered under either its long name or short name, it + won't be removed from the dictionaries. + + Args: + flag_obj: Flag, the Flag instance to clean up for. + """ + if self._flag_is_registered(flag_obj): + return + for flags_by_module_dict in (self.flags_by_module_dict(), + self.flags_by_module_id_dict(), + self.key_flags_by_module_dict()): + for flags_in_module in flags_by_module_dict.values(): + # While (as opposed to if) takes care of multiple occurrences of a + # flag in the list for the same module. + while flag_obj in flags_in_module: + flags_in_module.remove(flag_obj) + + def get_flags_for_module(self, module: Union[Text, Any]) -> List[Flag]: + """Returns the list of flags defined by a module. + + Args: + module: module|str, the module to get flags from. + + Returns: + [Flag], a new list of Flag instances. Caller may update this list as + desired: none of those changes will affect the internals of this + FlagValue instance. + """ + if not isinstance(module, str): + module = module.__name__ + if module == '__main__': + module = sys.argv[0] + + return list(self.flags_by_module_dict().get(module, [])) + + def get_key_flags_for_module(self, module: Union[Text, Any]) -> List[Flag]: + """Returns the list of key flags for a module. + + Args: + module: module|str, the module to get key flags from. + + Returns: + [Flag], a new list of Flag instances. Caller may update this list as + desired: none of those changes will affect the internals of this + FlagValue instance. + """ + if not isinstance(module, str): + module = module.__name__ + if module == '__main__': + module = sys.argv[0] + + # Any flag is a key flag for the module that defined it. NOTE: + # key_flags is a fresh list: we can update it without affecting the + # internals of this FlagValues object. + key_flags = self.get_flags_for_module(module) + + # Take into account flags explicitly declared as key for a module. + for flag in self.key_flags_by_module_dict().get(module, []): + if flag not in key_flags: + key_flags.append(flag) + return key_flags + + # TODO(yileiyang): Restrict default to Optional[Text]. + def find_module_defining_flag( + self, flagname: Text, default: Optional[_T] = None + ) -> Union[str, Optional[_T]]: + """Return the name of the module defining this flag, or default. + + Args: + flagname: str, name of the flag to lookup. + default: Value to return if flagname is not defined. Defaults to None. + + Returns: + The name of the module which registered the flag with this name. + If no such module exists (i.e. no flag with this name exists), + we return default. + """ + registered_flag = self._flags().get(flagname) + if registered_flag is None: + return default + for module, flags in self.flags_by_module_dict().items(): + for flag in flags: + # It must compare the flag with the one in _flags. This is because a + # flag might be overridden only for its long name (or short name), + # and only its short name (or long name) is considered registered. + if (flag.name == registered_flag.name and + flag.short_name == registered_flag.short_name): + return module + return default + + # TODO(yileiyang): Restrict default to Optional[Text]. + def find_module_id_defining_flag( + self, flagname: Text, default: Optional[_T] = None + ) -> Union[int, Optional[_T]]: + """Return the ID of the module defining this flag, or default. + + Args: + flagname: str, name of the flag to lookup. + default: Value to return if flagname is not defined. Defaults to None. + + Returns: + The ID of the module which registered the flag with this name. + If no such module exists (i.e. no flag with this name exists), + we return default. + """ + registered_flag = self._flags().get(flagname) + if registered_flag is None: + return default + for module_id, flags in self.flags_by_module_id_dict().items(): + for flag in flags: + # It must compare the flag with the one in _flags. This is because a + # flag might be overridden only for its long name (or short name), + # and only its short name (or long name) is considered registered. + if (flag.name == registered_flag.name and + flag.short_name == registered_flag.short_name): + return module_id + return default + + def _register_unknown_flag_setter( + self, setter: Callable[[str, Any], None] + ) -> None: + """Allow set default values for undefined flags. + + Args: + setter: Method(name, value) to call to __setattr__ an unknown flag. Must + raise NameError or ValueError for invalid name/value. + """ + self.__dict__['__set_unknown'] = setter + + def _set_unknown_flag(self, name: str, value: _T) -> _T: + """Returns value if setting flag |name| to |value| returned True. + + Args: + name: str, name of the flag to set. + value: Value to set. + + Returns: + Flag value on successful call. + + Raises: + UnrecognizedFlagError + IllegalFlagValueError + """ + setter = self.__dict__['__set_unknown'] + if setter: + try: + setter(name, value) + return value + except (TypeError, ValueError): # Flag value is not valid. + raise _exceptions.IllegalFlagValueError( + '"{1}" is not valid for --{0}'.format(name, value)) + except NameError: # Flag name is not valid. + pass + raise _exceptions.UnrecognizedFlagError(name, value) + + def append_flag_values(self, flag_values: 'FlagValues') -> None: + """Appends flags registered in another FlagValues instance. + + Args: + flag_values: FlagValues, the FlagValues instance from which to copy flags. + """ + for flag_name, flag in flag_values._flags().items(): # pylint: disable=protected-access + # Each flags with short_name appears here twice (once under its + # normal name, and again with its short name). To prevent + # problems (DuplicateFlagError) with double flag registration, we + # perform a check to make sure that the entry we're looking at is + # for its normal name. + if flag_name == flag.name: + try: + self[flag_name] = flag + except _exceptions.DuplicateFlagError: + raise _exceptions.DuplicateFlagError.from_flag( + flag_name, self, other_flag_values=flag_values) + + def remove_flag_values( + self, flag_values: 'Union[FlagValues, Iterable[Text]]' + ) -> None: + """Remove flags that were previously appended from another FlagValues. + + Args: + flag_values: FlagValues, the FlagValues instance containing flags to + remove. + """ + for flag_name in flag_values: + self.__delattr__(flag_name) + + def __setitem__(self, name: Text, flag: Flag) -> None: + """Registers a new flag variable.""" + fl = self._flags() + if not isinstance(flag, _flag.Flag): + raise _exceptions.IllegalFlagValueError( + f'Expect Flag instances, found type {type(flag)}. ' + "Maybe you didn't mean to use FlagValue.__setitem__?") + if not isinstance(name, str): + raise _exceptions.Error('Flag name must be a string') + if not name: + raise _exceptions.Error('Flag name cannot be empty') + if ' ' in name: + raise _exceptions.Error('Flag name cannot contain a space') + self._check_method_name_conflicts(name, flag) + if name in fl and not flag.allow_override and not fl[name].allow_override: + module, module_name = _helpers.get_calling_module_object_and_name() + if (self.find_module_defining_flag(name) == module_name and + id(module) != self.find_module_id_defining_flag(name)): + # If the flag has already been defined by a module with the same name, + # but a different ID, we can stop here because it indicates that the + # module is simply being imported a subsequent time. + return + raise _exceptions.DuplicateFlagError.from_flag(name, self) + # If a new flag overrides an old one, we need to cleanup the old flag's + # modules if it's not registered. + flags_to_cleanup = set() + short_name: str = flag.short_name # pytype: disable=annotation-type-mismatch + if short_name is not None: + if (short_name in fl and not flag.allow_override and + not fl[short_name].allow_override): + raise _exceptions.DuplicateFlagError.from_flag(short_name, self) + if short_name in fl and fl[short_name] != flag: + flags_to_cleanup.add(fl[short_name]) + fl[short_name] = flag + if (name not in fl # new flag + or fl[name].using_default_value or not flag.using_default_value): + if name in fl and fl[name] != flag: + flags_to_cleanup.add(fl[name]) + fl[name] = flag + for f in flags_to_cleanup: + self._cleanup_unregistered_flag_from_module_dicts(f) + + def __dir__(self) -> List[Text]: + """Returns list of names of all defined flags. + + Useful for TAB-completion in ipython. + + Returns: + [str], a list of names of all defined flags. + """ + return sorted(self.__dict__['__flags']) + + def __getitem__(self, name: Text) -> Flag: + """Returns the Flag object for the flag --name.""" + return self._flags()[name] + + def _hide_flag(self, name): + """Marks the flag --name as hidden.""" + self.__dict__['__hiddenflags'].add(name) + + def __getattr__(self, name: Text) -> Any: + """Retrieves the 'value' attribute of the flag --name.""" + fl = self._flags() + if name not in fl: + raise AttributeError(name) + if name in self.__dict__['__hiddenflags']: + raise AttributeError(name) + + if self.__dict__['__flags_parsed'] or fl[name].present: + return fl[name].value + else: + raise _exceptions.UnparsedFlagAccessError( + 'Trying to access flag --%s before flags were parsed.' % name) + + def __setattr__(self, name: Text, value: _T) -> _T: + """Sets the 'value' attribute of the flag --name.""" + self._set_attributes(**{name: value}) + return value + + def _set_attributes(self, **attributes: Any) -> None: + """Sets multiple flag values together, triggers validators afterwards.""" + fl = self._flags() + known_flag_vals = {} + known_flag_used_defaults = {} + try: + for name, value in attributes.items(): + if name in self.__dict__['__hiddenflags']: + raise AttributeError(name) + if name in fl: + orig = fl[name].value + fl[name].value = value + known_flag_vals[name] = orig + else: + self._set_unknown_flag(name, value) + for name in known_flag_vals: + self._assert_validators(fl[name].validators) + known_flag_used_defaults[name] = fl[name].using_default_value + fl[name].using_default_value = False + except: + for name, orig in known_flag_vals.items(): + fl[name].value = orig + for name, orig in known_flag_used_defaults.items(): + fl[name].using_default_value = orig + # NOTE: We do not attempt to undo unknown flag side effects because we + # cannot reliably undo the user-configured behavior. + raise + + def validate_all_flags(self) -> None: + """Verifies whether all flags pass validation. + + Raises: + AttributeError: Raised if validators work with a non-existing flag. + IllegalFlagValueError: Raised if validation fails for at least one + validator. + """ + all_validators = set() + for flag in self._flags().values(): + all_validators.update(flag.validators) + self._assert_validators(all_validators) + + def _assert_validators( + self, validators: Iterable[_validators_classes.Validator] + ) -> None: + """Asserts if all validators in the list are satisfied. + + It asserts validators in the order they were created. + + Args: + validators: Iterable(validators.Validator), validators to be verified. + + Raises: + AttributeError: Raised if validators work with a non-existing flag. + IllegalFlagValueError: Raised if validation fails for at least one + validator. + """ + messages = [] + bad_flags = set() + for validator in sorted( + validators, key=lambda validator: validator.insertion_index): + try: + if isinstance(validator, _validators_classes.SingleFlagValidator): + if validator.flag_name in bad_flags: + continue + elif isinstance(validator, _validators_classes.MultiFlagsValidator): + if bad_flags & set(validator.flag_names): + continue + validator.verify(self) + except _exceptions.ValidationError as e: + if isinstance(validator, _validators_classes.SingleFlagValidator): + bad_flags.add(validator.flag_name) + elif isinstance(validator, _validators_classes.MultiFlagsValidator): + bad_flags.update(set(validator.flag_names)) + message = validator.print_flags_with_values(self) + messages.append('%s: %s' % (message, str(e))) + if messages: + raise _exceptions.IllegalFlagValueError('\n'.join(messages)) + + def __delattr__(self, flag_name: Text) -> None: + """Deletes a previously-defined flag from a flag object. + + This method makes sure we can delete a flag by using + + del FLAGS. + + E.g., + + flags.DEFINE_integer('foo', 1, 'Integer flag.') + del flags.FLAGS.foo + + If a flag is also registered by its the other name (long name or short + name), the other name won't be deleted. + + Args: + flag_name: str, the name of the flag to be deleted. + + Raises: + AttributeError: Raised when there is no registered flag named flag_name. + """ + fl = self._flags() + if flag_name not in fl: + raise AttributeError(flag_name) + + flag_obj = fl[flag_name] + del fl[flag_name] + + self._cleanup_unregistered_flag_from_module_dicts(flag_obj) + + def set_default(self, name: Text, value: Any) -> None: + """Changes the default value of the named flag object. + + The flag's current value is also updated if the flag is currently using + the default value, i.e. not specified in the command line, and not set + by FLAGS.name = value. + + Args: + name: str, the name of the flag to modify. + value: The new default value. + + Raises: + UnrecognizedFlagError: Raised when there is no registered flag named name. + IllegalFlagValueError: Raised when value is not valid. + """ + fl = self._flags() + if name not in fl: + self._set_unknown_flag(name, value) + return + fl[name]._set_default(value) # pylint: disable=protected-access + self._assert_validators(fl[name].validators) + + def __contains__(self, name: Text) -> bool: + """Returns True if name is a value (flag) in the dict.""" + return name in self._flags() + + def __len__(self) -> int: + return len(self.__dict__['__flags']) + + def __iter__(self) -> Iterator[Text]: + return iter(self._flags()) + + def __call__( + self, argv: Sequence[Text], known_only: bool = False + ) -> List[Text]: + """Parses flags from argv; stores parsed flags into this FlagValues object. + + All unparsed arguments are returned. + + Args: + argv: a tuple/list of strings. + known_only: bool, if True, parse and remove known flags; return the rest + untouched. Unknown flags specified by --undefok are not returned. + + Returns: + The list of arguments not parsed as options, including argv[0]. + + Raises: + Error: Raised on any parsing error. + TypeError: Raised on passing wrong type of arguments. + ValueError: Raised on flag value parsing error. + """ + if isinstance(argv, (str, bytes)): + raise TypeError( + 'argv should be a tuple/list of strings, not bytes or string.') + if not argv: + raise ValueError( + 'argv cannot be an empty list, and must contain the program name as ' + 'the first element.') + + # This pre parses the argv list for --flagfile=<> options. + program_name = argv[0] + args = self.read_flags_from_files(argv[1:], force_gnu=False) + + # Parse the arguments. + unknown_flags, unparsed_args = self._parse_args(args, known_only) + + # Handle unknown flags by raising UnrecognizedFlagError. + # Note some users depend on us raising this particular error. + for name, value in unknown_flags: + suggestions = _helpers.get_flag_suggestions(name, list(self)) + raise _exceptions.UnrecognizedFlagError( + name, value, suggestions=suggestions) + + self.mark_as_parsed() + self.validate_all_flags() + return [program_name] + unparsed_args + + def __getstate__(self) -> Any: + raise TypeError("can't pickle FlagValues") + + def __copy__(self) -> Any: + raise TypeError('FlagValues does not support shallow copies. ' + 'Use absl.testing.flagsaver or copy.deepcopy instead.') + + def __deepcopy__(self, memo) -> Any: + result = object.__new__(type(self)) + result.__dict__.update(copy.deepcopy(self.__dict__, memo)) + return result + + def _set_is_retired_flag_func(self, is_retired_flag_func): + """Sets a function for checking retired flags. + + Do not use it. This is a private absl API used to check retired flags + registered by the absl C++ flags library. + + Args: + is_retired_flag_func: Callable(str) -> (bool, bool), a function takes flag + name as parameter, returns a tuple (is_retired, type_is_bool). + """ + self.__dict__['__is_retired_flag_func'] = is_retired_flag_func + + def _parse_args( + self, args: List[str], known_only: bool + ) -> Tuple[List[Tuple[Optional[str], Any]], List[str]]: + """Helper function to do the main argument parsing. + + This function goes through args and does the bulk of the flag parsing. + It will find the corresponding flag in our flag dictionary, and call its + .parse() method on the flag value. + + Args: + args: [str], a list of strings with the arguments to parse. + known_only: bool, if True, parse and remove known flags; return the rest + untouched. Unknown flags specified by --undefok are not returned. + + Returns: + A tuple with the following: + unknown_flags: List of (flag name, arg) for flags we don't know about. + unparsed_args: List of arguments we did not parse. + + Raises: + Error: Raised on any parsing error. + ValueError: Raised on flag value parsing error. + """ + unparsed_names_and_args = [] # A list of (flag name or None, arg). + undefok = set() + retired_flag_func = self.__dict__['__is_retired_flag_func'] + + flag_dict = self._flags() + args = iter(args) + for arg in args: + value = None + + def get_value(): + # pylint: disable=cell-var-from-loop + try: + return next(args) if value is None else value + except StopIteration: + raise _exceptions.Error('Missing value for flag ' + arg) # pylint: disable=undefined-loop-variable + + if not arg.startswith('-'): + # A non-argument: default is break, GNU is skip. + unparsed_names_and_args.append((None, arg)) + if self.is_gnu_getopt(): + continue + else: + break + + if arg == '--': + if known_only: + unparsed_names_and_args.append((None, arg)) + break + + # At this point, arg must start with '-'. + if arg.startswith('--'): + arg_without_dashes = arg[2:] + else: + arg_without_dashes = arg[1:] + + if '=' in arg_without_dashes: + name, value = arg_without_dashes.split('=', 1) + else: + name, value = arg_without_dashes, None + + if not name: + # The argument is all dashes (including one dash). + unparsed_names_and_args.append((None, arg)) + if self.is_gnu_getopt(): + continue + else: + break + + # --undefok is a special case. + if name == 'undefok': + value = get_value() + undefok.update(v.strip() for v in value.split(',')) + undefok.update('no' + v.strip() for v in value.split(',')) + continue + + flag = flag_dict.get(name) + if flag is not None: + if flag.boolean and value is None: + value = 'true' + else: + value = get_value() + elif name.startswith('no') and len(name) > 2: + # Boolean flags can take the form of --noflag, with no value. + noflag = flag_dict.get(name[2:]) + if noflag is not None and noflag.boolean: + if value is not None: + raise ValueError(arg + ' does not take an argument') + flag = noflag + value = 'false' + + if retired_flag_func and flag is None: + is_retired, is_bool = retired_flag_func(name) + + # If we didn't recognize that flag, but it starts with + # "no" then maybe it was a boolean flag specified in the + # --nofoo form. + if not is_retired and name.startswith('no'): + is_retired, is_bool = retired_flag_func(name[2:]) + is_retired = is_retired and is_bool + + if is_retired: + if not is_bool and value is None: + # This happens when a non-bool retired flag is specified + # in format of "--flag value". + get_value() + logging.error( + 'Flag "%s" is retired and should no longer be specified. See ' + 'https://abseil.io/tips/90.', + name, + ) + continue + + if flag is not None: + # LINT.IfChange + flag.parse(value) + flag.using_default_value = False + # LINT.ThenChange(../testing/flagsaver.py:flag_override_parsing) + else: + unparsed_names_and_args.append((name, arg)) + + unknown_flags = [] + unparsed_args = [] + for name, arg in unparsed_names_and_args: + if name is None: + # Positional arguments. + unparsed_args.append(arg) + elif name in undefok: + # Remove undefok flags. + continue + else: + # This is an unknown flag. + if known_only: + unparsed_args.append(arg) + else: + unknown_flags.append((name, arg)) + + unparsed_args.extend(list(args)) + return unknown_flags, unparsed_args + + def is_parsed(self) -> bool: + """Returns whether flags were parsed.""" + return self.__dict__['__flags_parsed'] + + def mark_as_parsed(self) -> None: + """Explicitly marks flags as parsed. + + Use this when the caller knows that this FlagValues has been parsed as if + a ``__call__()`` invocation has happened. This is only a public method for + use by things like appcommands which do additional command like parsing. + """ + self.__dict__['__flags_parsed'] = True + + def unparse_flags(self) -> None: + """Unparses all flags to the point before any FLAGS(argv) was called.""" + for f in self._flags().values(): + f.unparse() + # We log this message before marking flags as unparsed to avoid a + # problem when the logging library causes flags access. + logging.info('unparse_flags() called; flags access will now raise errors.') + self.__dict__['__flags_parsed'] = False + self.__dict__['__unparse_flags_called'] = True + + def flag_values_dict(self) -> Dict[Text, Any]: + """Returns a dictionary that maps flag names to flag values.""" + return {name: flag.value for name, flag in self._flags().items()} + + def __str__(self): + """Returns a help string for all known flags.""" + return self.get_help() + + def get_help( + self, prefix: Text = '', include_special_flags: bool = True + ) -> Text: + """Returns a help string for all known flags. + + Args: + prefix: str, per-line output prefix. + include_special_flags: bool, whether to include description of + SPECIAL_FLAGS, i.e. --flagfile and --undefok. + + Returns: + str, formatted help message. + """ + flags_by_module = self.flags_by_module_dict() + if flags_by_module: + modules = sorted(flags_by_module) + # Print the help for the main module first, if possible. + main_module = sys.argv[0] + if main_module in modules: + modules.remove(main_module) + modules = [main_module] + modules + return self._get_help_for_modules(modules, prefix, include_special_flags) + else: + output_lines = [] + # Just print one long list of flags. + values = self._flags().values() + if include_special_flags: + values = itertools.chain( + values, _helpers.SPECIAL_FLAGS._flags().values() # pylint: disable=protected-access # pytype: disable=attribute-error + ) + self._render_flag_list(values, output_lines, prefix) + return '\n'.join(output_lines) + + def _get_help_for_modules(self, modules, prefix, include_special_flags): + """Returns the help string for a list of modules. + + Private to absl.flags package. + + Args: + modules: List[str], a list of modules to get the help string for. + prefix: str, a string that is prepended to each generated help line. + include_special_flags: bool, whether to include description of + SPECIAL_FLAGS, i.e. --flagfile and --undefok. + """ + output_lines = [] + for module in modules: + self._render_our_module_flags(module, output_lines, prefix) + if include_special_flags: + self._render_module_flags( + 'absl.flags', + _helpers.SPECIAL_FLAGS._flags().values(), # pylint: disable=protected-access # pytype: disable=attribute-error + output_lines, + prefix, + ) + return '\n'.join(output_lines) + + def _render_module_flags(self, module, flags, output_lines, prefix=''): + """Returns a help string for a given module.""" + if not isinstance(module, str): + module = module.__name__ + output_lines.append('\n%s%s:' % (prefix, module)) + self._render_flag_list(flags, output_lines, prefix + ' ') + + def _render_our_module_flags(self, module, output_lines, prefix=''): + """Returns a help string for a given module.""" + flags = self.get_flags_for_module(module) + if flags: + self._render_module_flags(module, flags, output_lines, prefix) + + def _render_our_module_key_flags(self, module, output_lines, prefix=''): + """Returns a help string for the key flags of a given module. + + Args: + module: module|str, the module to render key flags for. + output_lines: [str], a list of strings. The generated help message lines + will be appended to this list. + prefix: str, a string that is prepended to each generated help line. + """ + key_flags = self.get_key_flags_for_module(module) + if key_flags: + self._render_module_flags(module, key_flags, output_lines, prefix) + + def module_help(self, module: Any) -> Text: + """Describes the key flags of a module. + + Args: + module: module|str, the module to describe the key flags for. + + Returns: + str, describing the key flags of a module. + """ + helplist = [] + self._render_our_module_key_flags(module, helplist) + return '\n'.join(helplist) + + def main_module_help(self) -> Text: + """Describes the key flags of the main module. + + Returns: + str, describing the key flags of the main module. + """ + return self.module_help(sys.argv[0]) + + def _render_flag_list(self, flaglist, output_lines, prefix=' '): + fl = self._flags() + special_fl = _helpers.SPECIAL_FLAGS._flags() # pylint: disable=protected-access # pytype: disable=attribute-error + flaglist = [(flag.name, flag) for flag in flaglist] + flaglist.sort() + flagset = {} + for (name, flag) in flaglist: + # It's possible this flag got deleted or overridden since being + # registered in the per-module flaglist. Check now against the + # canonical source of current flag information, the _flags. + if fl.get(name, None) != flag and special_fl.get(name, None) != flag: + # a different flag is using this name now + continue + # only print help once + if flag in flagset: + continue + flagset[flag] = 1 + flaghelp = '' + if flag.short_name: + flaghelp += '-%s,' % flag.short_name + if flag.boolean: + flaghelp += '--[no]%s:' % flag.name + else: + flaghelp += '--%s:' % flag.name + flaghelp += ' ' + if flag.help: + flaghelp += flag.help + flaghelp = _helpers.text_wrap( + flaghelp, indent=prefix + ' ', firstline_indent=prefix) + if flag.default_as_str: + flaghelp += '\n' + flaghelp += _helpers.text_wrap( + '(default: %s)' % flag.default_as_str, indent=prefix + ' ') + if flag.parser.syntactic_help: + flaghelp += '\n' + flaghelp += _helpers.text_wrap( + '(%s)' % flag.parser.syntactic_help, indent=prefix + ' ') + output_lines.append(flaghelp) + + def get_flag_value(self, name: Text, default: Any) -> Any: # pylint: disable=invalid-name + """Returns the value of a flag (if not None) or a default value. + + Args: + name: str, the name of a flag. + default: Default value to use if the flag value is None. + + Returns: + Requested flag value or default. + """ + + value = self.__getattr__(name) + if value is not None: # Can't do if not value, b/c value might be '0' or "" + return value + else: + return default + + def _is_flag_file_directive(self, flag_string): + """Checks whether flag_string contain a --flagfile= directive.""" + if isinstance(flag_string, str): + if flag_string.startswith('--flagfile='): + return 1 + elif flag_string == '--flagfile': + return 1 + elif flag_string.startswith('-flagfile='): + return 1 + elif flag_string == '-flagfile': + return 1 + else: + return 0 + return 0 + + def _extract_filename(self, flagfile_str): + """Returns filename from a flagfile_str of form -[-]flagfile=filename. + + The cases of --flagfile foo and -flagfile foo shouldn't be hitting + this function, as they are dealt with in the level above this + function. + + Args: + flagfile_str: str, the flagfile string. + + Returns: + str, the filename from a flagfile_str of form -[-]flagfile=filename. + + Raises: + Error: Raised when illegal --flagfile is provided. + """ + if flagfile_str.startswith('--flagfile='): + return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) + elif flagfile_str.startswith('-flagfile='): + return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) + else: + raise _exceptions.Error('Hit illegal --flagfile type: %s' % flagfile_str) + + def _get_flag_file_lines(self, filename, parsed_file_stack=None): + """Returns the useful (!=comments, etc) lines from a file with flags. + + Args: + filename: str, the name of the flag file. + parsed_file_stack: [str], a list of the names of the files that we have + recursively encountered at the current depth. MUTATED BY THIS FUNCTION + (but the original value is preserved upon successfully returning from + function call). + + Returns: + List of strings. See the note below. + + NOTE(springer): This function checks for a nested --flagfile= + tag and handles the lower file recursively. It returns a list of + all the lines that _could_ contain command flags. This is + EVERYTHING except whitespace lines and comments (lines starting + with '#' or '//'). + """ + # For consistency with the cpp version, ignore empty values. + if not filename: + return [] + if parsed_file_stack is None: + parsed_file_stack = [] + # We do a little safety check for reparsing a file we've already encountered + # at a previous depth. + if filename in parsed_file_stack: + sys.stderr.write('Warning: Hit circular flagfile dependency. Ignoring' + ' flagfile: %s\n' % (filename,)) + return [] + else: + parsed_file_stack.append(filename) + + line_list = [] # All line from flagfile. + flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags. + try: + file_obj = open(filename, 'r') + except IOError as e_msg: + raise _exceptions.CantOpenFlagFileError( + 'ERROR:: Unable to open flagfile: %s' % e_msg) + + with file_obj: + line_list = file_obj.readlines() + + # This is where we check each line in the file we just read. + for line in line_list: + if line.isspace(): + pass + # Checks for comment (a line that starts with '#'). + elif line.startswith('#') or line.startswith('//'): + pass + # Checks for a nested "--flagfile=" flag in the current file. + # If we find one, recursively parse down into that file. + elif self._is_flag_file_directive(line): + sub_filename = self._extract_filename(line) + included_flags = self._get_flag_file_lines( + sub_filename, parsed_file_stack=parsed_file_stack) + flag_line_list.extend(included_flags) + else: + # Any line that's not a comment or a nested flagfile should get + # copied into 2nd position. This leaves earlier arguments + # further back in the list, thus giving them higher priority. + flag_line_list.append(line.strip()) + + parsed_file_stack.pop() + return flag_line_list + + def read_flags_from_files( + self, argv: Sequence[Text], force_gnu: bool = True + ) -> List[Text]: + """Processes command line args, but also allow args to be read from file. + + Args: + argv: [str], a list of strings, usually sys.argv[1:], which may contain + one or more flagfile directives of the form --flagfile="./filename". + Note that the name of the program (sys.argv[0]) should be omitted. + force_gnu: bool, if False, --flagfile parsing obeys the + FLAGS.is_gnu_getopt() value. If True, ignore the value and always follow + gnu_getopt semantics. + + Returns: + A new list which has the original list combined with what we read + from any flagfile(s). + + Raises: + IllegalFlagValueError: Raised when --flagfile is provided with no + argument. + + This function is called by FLAGS(argv). + It scans the input list for a flag that looks like: + --flagfile=. Then it opens , reads all valid key + and value pairs and inserts them into the input list in exactly the + place where the --flagfile arg is found. + + Note that your application's flags are still defined the usual way + using absl.flags DEFINE_flag() type functions. + + Notes (assuming we're getting a commandline of some sort as our input): + + * For duplicate flags, the last one we hit should "win". + * Since flags that appear later win, a flagfile's settings can be "weak" + if the --flagfile comes at the beginning of the argument sequence, + and it can be "strong" if the --flagfile comes at the end. + * A further "--flagfile=" CAN be nested in a flagfile. + It will be expanded in exactly the spot where it is found. + * In a flagfile, a line beginning with # or // is a comment. + * Entirely blank lines _should_ be ignored. + """ + rest_of_args = argv + new_argv = [] + while rest_of_args: + current_arg = rest_of_args[0] + rest_of_args = rest_of_args[1:] + if self._is_flag_file_directive(current_arg): + # This handles the case of -(-)flagfile foo. In this case the + # next arg really is part of this one. + if current_arg == '--flagfile' or current_arg == '-flagfile': + if not rest_of_args: + raise _exceptions.IllegalFlagValueError( + '--flagfile with no argument') + flag_filename = os.path.expanduser(rest_of_args[0]) + rest_of_args = rest_of_args[1:] + else: + # This handles the case of (-)-flagfile=foo. + flag_filename = self._extract_filename(current_arg) + new_argv.extend(self._get_flag_file_lines(flag_filename)) + else: + new_argv.append(current_arg) + # Stop parsing after '--', like getopt and gnu_getopt. + if current_arg == '--': + break + # Stop parsing after a non-flag, like getopt. + if not current_arg.startswith('-'): + if not force_gnu and not self.__dict__['__use_gnu_getopt']: + break + else: + if ('=' not in current_arg and rest_of_args and + not rest_of_args[0].startswith('-')): + # If this is an occurrence of a legitimate --x y, skip the value + # so that it won't be mistaken for a standalone arg. + fl = self._flags() + name = current_arg.lstrip('-') + if name in fl and not fl[name].boolean: + current_arg = rest_of_args[0] + rest_of_args = rest_of_args[1:] + new_argv.append(current_arg) + + if rest_of_args: + new_argv.extend(rest_of_args) + + return new_argv + + def flags_into_string(self) -> Text: + """Returns a string with the flags assignments from this FlagValues object. + + This function ignores flags whose value is None. Each flag + assignment is separated by a newline. + + NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString + from https://github.com/gflags/gflags. + + Returns: + str, the string with the flags assignments from this FlagValues object. + The flags are ordered by (module_name, flag_name). + """ + module_flags = sorted(self.flags_by_module_dict().items()) + s = '' + for unused_module_name, flags in module_flags: + flags = sorted(flags, key=lambda f: f.name) + for flag in flags: + if flag.value is not None: + s += flag.serialize() + '\n' + return s + + def append_flags_into_file(self, filename: Text) -> None: + """Appends all flags assignments from this FlagInfo object to a file. + + Output will be in the format of a flagfile. + + NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile + from https://github.com/gflags/gflags. + + Args: + filename: str, name of the file. + """ + with open(filename, 'a') as out_file: + out_file.write(self.flags_into_string()) + + def write_help_in_xml_format(self, outfile: Optional[TextIO] = None) -> None: + """Outputs flag documentation in XML format. + + NOTE: We use element names that are consistent with those used by + the C++ command-line flag library, from + https://github.com/gflags/gflags. + We also use a few new elements (e.g., ), but we do not + interfere / overlap with existing XML elements used by the C++ + library. Please maintain this consistency. + + Args: + outfile: File object we write to. Default None means sys.stdout. + """ + doc = minidom.Document() + all_flag = doc.createElement('AllFlags') + doc.appendChild(all_flag) + + all_flag.appendChild( + _helpers.create_xml_dom_element(doc, 'program', + os.path.basename(sys.argv[0]))) + + usage_doc = sys.modules['__main__'].__doc__ + if not usage_doc: + usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] + else: + usage_doc = usage_doc.replace('%s', sys.argv[0]) + all_flag.appendChild( + _helpers.create_xml_dom_element(doc, 'usage', usage_doc)) + + # Get list of key flags for the main module. + key_flags = self.get_key_flags_for_module(sys.argv[0]) + + # Sort flags by declaring module name and next by flag name. + flags_by_module = self.flags_by_module_dict() + all_module_names = list(flags_by_module.keys()) + all_module_names.sort() + for module_name in all_module_names: + flag_list = [(f.name, f) for f in flags_by_module[module_name]] + flag_list.sort() + for unused_flag_name, flag in flag_list: + is_key = flag in key_flags + all_flag.appendChild( + flag._create_xml_dom_element( # pylint: disable=protected-access + doc, + module_name, + is_key=is_key)) + + outfile = outfile or sys.stdout + outfile.write( + doc.toprettyxml(indent=' ', encoding='utf-8').decode('utf-8')) + outfile.flush() + + def _check_method_name_conflicts(self, name: str, flag: Flag): + if flag.allow_using_method_names: + return + short_name = flag.short_name + flag_names = {name} if short_name is None else {name, short_name} + for flag_name in flag_names: + if flag_name in self.__dict__['__banned_flag_names']: + raise _exceptions.FlagNameConflictsWithMethodError( + 'Cannot define a flag named "{name}". It conflicts with a method ' + 'on class "{class_name}". To allow defining it, use ' + 'allow_using_method_names and access the flag value with ' + "FLAGS['{name}'].value. FLAGS.{name} returns the method, " + 'not the flag value.'.format( + name=flag_name, class_name=type(self).__name__)) + + +FLAGS = FlagValues() + + +class FlagHolder(Generic[_T]): + """Holds a defined flag. + + This facilitates a cleaner api around global state. Instead of:: + + flags.DEFINE_integer('foo', ...) + flags.DEFINE_integer('bar', ...) + + def method(): + # prints parsed value of 'bar' flag + print(flags.FLAGS.foo) + # runtime error due to typo or possibly bad coding style. + print(flags.FLAGS.baz) + + it encourages code like:: + + _FOO_FLAG = flags.DEFINE_integer('foo', ...) + _BAR_FLAG = flags.DEFINE_integer('bar', ...) + + def method(): + print(_FOO_FLAG.value) + print(_BAR_FLAG.value) + + since the name of the flag appears only once in the source code. + """ + + value: _T + + def __init__( + self, + flag_values: FlagValues, + flag: Flag[_T], + ensure_non_none_value: bool = False, + ): + """Constructs a FlagHolder instance providing typesafe access to flag. + + Args: + flag_values: The container the flag is registered to. + flag: The flag object for this flag. + ensure_non_none_value: Is the value of the flag allowed to be None. + """ + self._flagvalues = flag_values + # We take the entire flag object, but only keep the name. Why? + # - We want FlagHolder[T] to be generic container + # - flag_values contains all flags, so has no reference to T. + # - typecheckers don't like to see a generic class where none of the ctor + # arguments refer to the generic type. + self._name = flag.name + # We intentionally do NOT check if the default value is None. + # This allows future use of this for "required flags with None default" + self._ensure_non_none_value = ensure_non_none_value + + def __eq__(self, other): + raise TypeError( + "unsupported operand type(s) for ==: '{0}' and '{1}' " + "(did you mean to use '{0}.value' instead?)".format( + type(self).__name__, type(other).__name__)) + + def __bool__(self): + raise TypeError( + "bool() not supported for instances of type '{0}' " + "(did you mean to use '{0}.value' instead?)".format( + type(self).__name__)) + + __nonzero__ = __bool__ + + @property + def name(self) -> Text: + return self._name + + @property + def value(self) -> _T: + """Returns the value of the flag. + + If ``_ensure_non_none_value`` is ``True``, then return value is not + ``None``. + + Raises: + UnparsedFlagAccessError: if flag parsing has not finished. + IllegalFlagValueError: if value is None unexpectedly. + """ + val = getattr(self._flagvalues, self._name) + if self._ensure_non_none_value and val is None: + raise _exceptions.IllegalFlagValueError( + 'Unexpected None value for flag %s' % self._name) + return val + + @property + def default(self) -> _T: + """Returns the default value of the flag.""" + return self._flagvalues[self._name].default + + @property + def present(self) -> bool: + """Returns True if the flag was parsed from command-line flags.""" + return bool(self._flagvalues[self._name].present) + + def serialize(self) -> Text: + """Returns a serialized representation of the flag.""" + return self._flagvalues[self._name].serialize() + + +def resolve_flag_ref( + flag_ref: Union[str, FlagHolder], flag_values: FlagValues +) -> Tuple[str, FlagValues]: + """Helper to validate and resolve a flag reference argument.""" + if isinstance(flag_ref, FlagHolder): + new_flag_values = flag_ref._flagvalues # pylint: disable=protected-access + if flag_values != FLAGS and flag_values != new_flag_values: + raise ValueError( + 'flag_values must not be customized when operating on a FlagHolder') + return flag_ref.name, new_flag_values + return flag_ref, flag_values + + +def resolve_flag_refs( + flag_refs: Sequence[Union[str, FlagHolder]], flag_values: FlagValues +) -> Tuple[List[str], FlagValues]: + """Helper to validate and resolve flag reference list arguments.""" + fv = None + names = [] + for ref in flag_refs: + if isinstance(ref, FlagHolder): + newfv = ref._flagvalues # pylint: disable=protected-access + name = ref.name + else: + newfv = flag_values + name = ref + if fv and fv != newfv: + raise ValueError( + 'multiple FlagValues instances used in invocation. ' + 'FlagHolders must be registered to the same FlagValues instance as ' + 'do flag names, if provided.') + fv = newfv + names.append(name) + return names, fv diff --git a/MLPY/Lib/site-packages/absl/flags/_helpers.py b/MLPY/Lib/site-packages/absl/flags/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1ad559c0487b41d24f406c46e0da4d3240a1947f --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_helpers.py @@ -0,0 +1,421 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helper functions for Abseil Python flags library.""" + +import os +import re +import struct +import sys +import textwrap +import types +from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Set +from xml.dom import minidom +# pylint: disable=g-import-not-at-top +try: + import fcntl +except ImportError: + fcntl = None +try: + # Importing termios will fail on non-unix platforms. + import termios +except ImportError: + termios = None +# pylint: enable=g-import-not-at-top + + +_DEFAULT_HELP_WIDTH = 80 # Default width of help output. +# Minimal "sane" width of help output. We assume that any value below 40 is +# unreasonable. +_MIN_HELP_WIDTH = 40 + +# Define the allowed error rate in an input string to get suggestions. +# +# We lean towards a high threshold because we tend to be matching a phrase, +# and the simple algorithm used here is geared towards correcting word +# spellings. +# +# For manual testing, consider " --list" which produced a large number +# of spurious suggestions when we used "least_errors > 0.5" instead of +# "least_erros >= 0.5". +_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50 + +# Characters that cannot appear or are highly discouraged in an XML 1.0 +# document. (See http://www.w3.org/TR/REC-xml/#charsets or +# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0) +_ILLEGAL_XML_CHARS_REGEX = re.compile( + u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]') + +# This is a set of module ids for the modules that disclaim key flags. +# This module is explicitly added to this set so that we never consider it to +# define key flag. +disclaim_module_ids: Set[int] = set([id(sys.modules[__name__])]) + + +# Define special flags here so that help may be generated for them. +# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module. +# Initialized inside flagvalues.py. +# NOTE: This cannot be annotated as its actual FlagValues type since this would +# create a circular dependency. +SPECIAL_FLAGS: Any = None + + +# This points to the flags module, initialized in flags/__init__.py. +# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into +# account. +FLAGS_MODULE: types.ModuleType = None + + +class _ModuleObjectAndName(NamedTuple): + """Module object and name. + + Fields: + - module: object, module object. + - module_name: str, module name. + """ + module: types.ModuleType + module_name: str + + +def get_module_object_and_name( + globals_dict: Dict[str, Any] +) -> _ModuleObjectAndName: + """Returns the module that defines a global environment, and its name. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + _ModuleObjectAndName - pair of module object & module name. + Returns (None, None) if the module could not be identified. + """ + name = globals_dict.get('__name__', None) + module = sys.modules.get(name, None) + # Pick a more informative name for the main module. + return _ModuleObjectAndName(module, + (sys.argv[0] if name == '__main__' else name)) + + +def get_calling_module_object_and_name() -> _ModuleObjectAndName: + """Returns the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + + Returns: + The module object that called into this one. + + Raises: + AssertionError: Raised when no calling module could be identified. + """ + for depth in range(1, sys.getrecursionlimit()): + # sys._getframe is the right thing to use here, as it's the best + # way to walk up the call stack. + globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access + module, module_name = get_module_object_and_name(globals_for_frame) + if id(module) not in disclaim_module_ids and module_name is not None: + return _ModuleObjectAndName(module, module_name) + raise AssertionError('No module was found') + + +def get_calling_module() -> str: + """Returns the name of the module that's calling into this module.""" + return get_calling_module_object_and_name().module_name + + +def create_xml_dom_element( + doc: minidom.Document, name: str, value: Any +) -> minidom.Element: + """Returns an XML DOM element with name and text value. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + name: str, the tag of XML element. + value: object, whose string representation will be used + as the value of the XML element. Illegal or highly discouraged xml 1.0 + characters are stripped. + + Returns: + An instance of minidom.Element. + """ + s = str(value) + if isinstance(value, bool): + # Display boolean values as the C++ flag library does: no caps. + s = s.lower() + # Remove illegal xml characters. + s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s) + + e = doc.createElement(name) + e.appendChild(doc.createTextNode(s)) + return e + + +def get_help_width() -> int: + """Returns the integer width of help lines that is used in TextWrap.""" + if not sys.stdout.isatty() or termios is None or fcntl is None: + return _DEFAULT_HELP_WIDTH + try: + data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, b'1234') + columns = struct.unpack('hh', data)[1] + # Emacs mode returns 0. + # Here we assume that any value below 40 is unreasonable. + if columns >= _MIN_HELP_WIDTH: + return columns + # Returning an int as default is fine, int(int) just return the int. + return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH)) + + except (TypeError, IOError, struct.error): + return _DEFAULT_HELP_WIDTH + + +def get_flag_suggestions( + attempt: Optional[str], longopt_list: Sequence[str] +) -> List[str]: + """Returns helpful similar matches for an invalid flag.""" + # Don't suggest on very short strings, or if no longopts are specified. + if len(attempt) <= 2 or not longopt_list: + return [] + + option_names = [v.split('=')[0] for v in longopt_list] + + # Find close approximations in flag prefixes. + # This also handles the case where the flag is spelled right but ambiguous. + distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option) + for option in option_names] + # t[0] is distance, and sorting by t[1] allows us to have stable output. + distances.sort() + + least_errors, _ = distances[0] + # Don't suggest excessively bad matches. + if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt): + return [] + + suggestions = [] + for errors, name in distances: + if errors == least_errors: + suggestions.append(name) + else: + break + return suggestions + + +def _damerau_levenshtein(a, b): + """Returns Damerau-Levenshtein edit distance from a to b.""" + memo = {} + + def distance(x, y): + """Recursively defined string distance with memoization.""" + if (x, y) in memo: + return memo[x, y] + if not x: + d = len(y) + elif not y: + d = len(x) + else: + d = min( + distance(x[1:], y) + 1, # correct an insertion error + distance(x, y[1:]) + 1, # correct a deletion error + distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character + if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]: + # Correct a transposition. + t = distance(x[2:], y[2:]) + 1 + if d > t: + d = t + + memo[x, y] = d + return d + return distance(a, b) + + +def text_wrap( + text: str, + length: Optional[int] = None, + indent: str = '', + firstline_indent: Optional[str] = None, +) -> str: + """Wraps a given text to a maximum line length and returns it. + + It turns lines that only contain whitespace into empty lines, keeps new lines, + and expands tabs using 4 spaces. + + Args: + text: str, text to wrap. + length: int, maximum length of a line, includes indentation. + If this is None then use get_help_width() + indent: str, indent for all but first line. + firstline_indent: str, indent for first line; if None, fall back to indent. + + Returns: + str, the wrapped text. + + Raises: + ValueError: Raised if indent or firstline_indent not shorter than length. + """ + # Get defaults where callee used None + if length is None: + length = get_help_width() + if indent is None: + indent = '' + if firstline_indent is None: + firstline_indent = indent + + if len(indent) >= length: + raise ValueError('Length of indent exceeds length') + if len(firstline_indent) >= length: + raise ValueError('Length of first line indent exceeds length') + + text = text.expandtabs(4) + + result = [] + # Create one wrapper for the first paragraph and one for subsequent + # paragraphs that does not have the initial wrapping. + wrapper = textwrap.TextWrapper( + width=length, initial_indent=firstline_indent, subsequent_indent=indent) + subsequent_wrapper = textwrap.TextWrapper( + width=length, initial_indent=indent, subsequent_indent=indent) + + # textwrap does not have any special treatment for newlines. From the docs: + # "...newlines may appear in the middle of a line and cause strange output. + # For this reason, text should be split into paragraphs (using + # str.splitlines() or similar) which are wrapped separately." + for paragraph in (p.strip() for p in text.splitlines()): + if paragraph: + result.extend(wrapper.wrap(paragraph)) + else: + result.append('') # Keep empty lines. + # Replace initial wrapper with wrapper for subsequent paragraphs. + wrapper = subsequent_wrapper + + return '\n'.join(result) + + +def flag_dict_to_args( + flag_map: Dict[str, Any], multi_flags: Optional[Set[str]] = None +) -> Iterable[str]: + """Convert a dict of values into process call parameters. + + This method is used to convert a dictionary into a sequence of parameters + for a binary that parses arguments using this module. + + Args: + flag_map: dict, a mapping where the keys are flag names (strings). + values are treated according to their type: + + * If value is ``None``, then only the name is emitted. + * If value is ``True``, then only the name is emitted. + * If value is ``False``, then only the name prepended with 'no' is + emitted. + * If value is a string then ``--name=value`` is emitted. + * If value is a collection, this will emit + ``--name=value1,value2,value3``, unless the flag name is in + ``multi_flags``, in which case this will emit + ``--name=value1 --name=value2 --name=value3``. + * Everything else is converted to string an passed as such. + + multi_flags: set, names (strings) of flags that should be treated as + multi-flags. + Yields: + sequence of string suitable for a subprocess execution. + """ + for key, value in flag_map.items(): + if value is None: + yield '--%s' % key + elif isinstance(value, bool): + if value: + yield '--%s' % key + else: + yield '--no%s' % key + elif isinstance(value, (bytes, type(u''))): + # We don't want strings to be handled like python collections. + yield '--%s=%s' % (key, value) + else: + # Now we attempt to deal with collections. + try: + if multi_flags and key in multi_flags: + for item in value: + yield '--%s=%s' % (key, str(item)) + else: + yield '--%s=%s' % (key, ','.join(str(item) for item in value)) + except TypeError: + # Default case. + yield '--%s=%s' % (key, value) + + +def trim_docstring(docstring: str) -> str: + """Removes indentation from triple-quoted strings. + + This is the function specified in PEP 257 to handle docstrings: + https://www.python.org/dev/peps/pep-0257/. + + Args: + docstring: str, a python docstring. + + Returns: + str, docstring with indentation removed. + """ + if not docstring: + return '' + + # If you've got a line longer than this you have other problems... + max_indent = 1 << 29 + + # Convert tabs to spaces (following the normal Python rules) + # and split into a list of lines: + lines = docstring.expandtabs().splitlines() + + # Determine minimum indentation (first line doesn't count): + indent = max_indent + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < max_indent: + for line in lines[1:]: + trimmed.append(line[indent:].rstrip()) + # Strip off trailing and leading blank lines: + while trimmed and not trimmed[-1]: + trimmed.pop() + while trimmed and not trimmed[0]: + trimmed.pop(0) + # Return a single string: + return '\n'.join(trimmed) + + +def doc_to_help(doc: str) -> str: + """Takes a __doc__ string and reformats it as help.""" + + # Get rid of starting and ending white space. Using lstrip() or even + # strip() could drop more than maximum of first line and right space + # of last line. + doc = doc.strip() + + # Get rid of all empty lines. + whitespace_only_line = re.compile('^[ \t]+$', re.M) + doc = whitespace_only_line.sub('', doc) + + # Cut out common space at line beginnings. + doc = trim_docstring(doc) + + # Just like this module's comment, comments tend to be aligned somehow. + # In other words they all start with the same amount of white space. + # 1) keep double new lines; + # 2) keep ws after new lines if not empty line; + # 3) all other new lines shall be changed to a space; + # Solution: Match new lines between non white space and replace with space. + doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M) + + return doc diff --git a/MLPY/Lib/site-packages/absl/flags/_validators.py b/MLPY/Lib/site-packages/absl/flags/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..2161284a8e284fdfbe42d0f0128e7068cb1ba85f --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_validators.py @@ -0,0 +1,352 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module to enforce different constraints on flags. + +Flags validators can be registered using following functions / decorators:: + + flags.register_validator + @flags.validator + flags.register_multi_flags_validator + @flags.multi_flags_validator + +Three convenience functions are also provided for common flag constraints:: + + flags.mark_flag_as_required + flags.mark_flags_as_required + flags.mark_flags_as_mutual_exclusive + flags.mark_bool_flags_as_mutual_exclusive + +See their docstring in this module for a usage manual. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import warnings + +from absl.flags import _exceptions +from absl.flags import _flagvalues +from absl.flags import _validators_classes + + +def register_validator(flag_name, + checker, + message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """Adds a constraint, which will be enforced during program execution. + + The constraint is validated when flags are initially parsed, and after each + change of the corresponding flag's value. + + Args: + flag_name: str | FlagHolder, name or holder of the flag to be checked. + Positional-only parameter. + checker: callable, a function to validate the flag. + + * input - A single positional argument: The value of the corresponding + flag (string, boolean, etc. This value will be passed to checker + by the library). + * output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either ``return False`` or + ``raise flags.ValidationError(desired_error_message)``. + + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + ValueError: Raised when flag_values is non-default and does not match the + FlagValues of the provided FlagHolder instance. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + v = _validators_classes.SingleFlagValidator(flag_name, checker, message) + _add_validator(flag_values, v) + + +def validator(flag_name, message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """A function decorator for defining a flag validator. + + Registers the decorated function as a validator for flag_name, e.g.:: + + @flags.validator('foo') + def _CheckFoo(foo): + ... + + See :func:`register_validator` for the specification of checker function. + + Args: + flag_name: str | FlagHolder, name or holder of the flag to be checked. + Positional-only parameter. + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + Returns: + A function decorator that registers its function argument as a validator. + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + """ + + def decorate(function): + register_validator(flag_name, function, + message=message, + flag_values=flag_values) + return function + return decorate + + +def register_multi_flags_validator(flag_names, + multi_flags_checker, + message='Flags validation failed', + flag_values=_flagvalues.FLAGS): + """Adds a constraint to multiple flags. + + The constraint is validated when flags are initially parsed, and after each + change of the corresponding flag's value. + + Args: + flag_names: [str | FlagHolder], a list of the flag names or holders to be + checked. Positional-only parameter. + multi_flags_checker: callable, a function to validate the flag. + + * input - dict, with keys() being flag_names, and value for each key + being the value of the corresponding flag (string, boolean, etc). + * output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError. + + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Raises: + AttributeError: Raised when a flag is not registered as a valid flag name. + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + v = _validators_classes.MultiFlagsValidator( + flag_names, multi_flags_checker, message) + _add_validator(flag_values, v) + + +def multi_flags_validator(flag_names, + message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """A function decorator for defining a multi-flag validator. + + Registers the decorated function as a validator for flag_names, e.g.:: + + @flags.multi_flags_validator(['foo', 'bar']) + def _CheckFooBar(flags_dict): + ... + + See :func:`register_multi_flags_validator` for the specification of checker + function. + + Args: + flag_names: [str | FlagHolder], a list of the flag names or holders to be + checked. Positional-only parameter. + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Returns: + A function decorator that registers its function argument as a validator. + + Raises: + AttributeError: Raised when a flag is not registered as a valid flag name. + """ + + def decorate(function): + register_multi_flags_validator(flag_names, + function, + message=message, + flag_values=flag_values) + return function + + return decorate + + +def mark_flag_as_required(flag_name, flag_values=_flagvalues.FLAGS): + """Ensures that flag is not None during program execution. + + Registers a flag validator, which will follow usual validator rules. + Important note: validator will pass for any non-``None`` value, such as + ``False``, ``0`` (zero), ``''`` (empty string) and so on. + + If your module might be imported by others, and you only wish to make the flag + required when the module is directly executed, call this method like this:: + + if __name__ == '__main__': + flags.mark_flag_as_required('your_flag_name') + app.run() + + Args: + flag_name: str | FlagHolder, name or holder of the flag. + Positional-only parameter. + flag_values: flags.FlagValues, optional :class:`~absl.flags.FlagValues` + instance where the flag is defined. + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + ValueError: Raised when flag_values is non-default and does not match the + FlagValues of the provided FlagHolder instance. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + if flag_values[flag_name].default is not None: + warnings.warn( + 'Flag --%s has a non-None default value; therefore, ' + 'mark_flag_as_required will pass even if flag is not specified in the ' + 'command line!' % flag_name, + stacklevel=2) + register_validator( + flag_name, + lambda value: value is not None, + message='Flag --{} must have a value other than None.'.format(flag_name), + flag_values=flag_values) + + +def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): + """Ensures that flags are not None during program execution. + + If your module might be imported by others, and you only wish to make the flag + required when the module is directly executed, call this method like this:: + + if __name__ == '__main__': + flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) + app.run() + + Args: + flag_names: Sequence[str | FlagHolder], names or holders of the flags. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + Raises: + AttributeError: If any of flag name has not already been defined as a flag. + """ + for flag_name in flag_names: + mark_flag_as_required(flag_name, flag_values) + + +def mark_flags_as_mutual_exclusive(flag_names, required=False, + flag_values=_flagvalues.FLAGS): + """Ensures that only one flag among flag_names is not None. + + Important note: This validator checks if flag values are ``None``, and it does + not distinguish between default and explicit values. Therefore, this validator + does not make sense when applied to flags with default values other than None, + including other false values (e.g. ``False``, ``0``, ``''``, ``[]``). That + includes multi flags with a default value of ``[]`` instead of None. + + Args: + flag_names: [str | FlagHolder], names or holders of flags. + Positional-only parameter. + required: bool. If true, exactly one of the flags must have a value other + than None. Otherwise, at most one of the flags can have a value other + than None, and it is valid for all of the flags to be None. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + + Raises: + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + for flag_name in flag_names: + if flag_values[flag_name].default is not None: + warnings.warn( + 'Flag --{} has a non-None default value. That does not make sense ' + 'with mark_flags_as_mutual_exclusive, which checks whether the ' + 'listed flags have a value other than None.'.format(flag_name), + stacklevel=2) + + def validate_mutual_exclusion(flags_dict): + flag_count = sum(1 for val in flags_dict.values() if val is not None) + if flag_count == 1 or (not required and flag_count == 0): + return True + raise _exceptions.ValidationError( + '{} one of ({}) must have a value other than None.'.format( + 'Exactly' if required else 'At most', ', '.join(flag_names))) + + register_multi_flags_validator( + flag_names, validate_mutual_exclusion, flag_values=flag_values) + + +def mark_bool_flags_as_mutual_exclusive(flag_names, required=False, + flag_values=_flagvalues.FLAGS): + """Ensures that only one flag among flag_names is True. + + Args: + flag_names: [str | FlagHolder], names or holders of flags. + Positional-only parameter. + required: bool. If true, exactly one flag must be True. Otherwise, at most + one flag can be True, and it is valid for all flags to be False. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + + Raises: + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + for flag_name in flag_names: + if not flag_values[flag_name].boolean: + raise _exceptions.ValidationError( + 'Flag --{} is not Boolean, which is required for flags used in ' + 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name)) + + def validate_boolean_mutual_exclusion(flags_dict): + flag_count = sum(bool(val) for val in flags_dict.values()) + if flag_count == 1 or (not required and flag_count == 0): + return True + raise _exceptions.ValidationError( + '{} one of ({}) must be True.'.format( + 'Exactly' if required else 'At most', ', '.join(flag_names))) + + register_multi_flags_validator( + flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values) + + +def _add_validator(fv, validator_instance): + """Register new flags validator to be checked. + + Args: + fv: flags.FlagValues, the FlagValues instance to add the validator. + validator_instance: validators.Validator, the validator to add. + Raises: + KeyError: Raised when validators work with a non-existing flag. + """ + for flag_name in validator_instance.get_flags_names(): + fv[flag_name].validators.append(validator_instance) diff --git a/MLPY/Lib/site-packages/absl/flags/_validators_classes.py b/MLPY/Lib/site-packages/absl/flags/_validators_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..59100c8e6dc9528634c1752a8de18983eba26676 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/_validators_classes.py @@ -0,0 +1,172 @@ +# Copyright 2021 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defines *private* classes used for flag validators. + +Do NOT import this module. DO NOT use anything from this module. They are +private APIs. +""" + +from absl.flags import _exceptions + + +class Validator(object): + """Base class for flags validators. + + Users should NOT overload these classes, and use flags.Register... + methods instead. + """ + + # Used to assign each validator an unique insertion_index + validators_count = 0 + + def __init__(self, checker, message): + """Constructor to create all validators. + + Args: + checker: function to verify the constraint. + Input of this method varies, see SingleFlagValidator and + multi_flags_validator for a detailed description. + message: str, error message to be shown to the user. + """ + self.checker = checker + self.message = message + Validator.validators_count += 1 + # Used to assert validators in the order they were registered. + self.insertion_index = Validator.validators_count + + def verify(self, flag_values): + """Verifies that constraint is satisfied. + + flags library calls this method to verify Validator's constraint. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Raises: + Error: Raised if constraint is not satisfied. + """ + param = self._get_input_to_checker_function(flag_values) + if not self.checker(param): + raise _exceptions.ValidationError(self.message) + + def get_flags_names(self): + """Returns the names of the flags checked by this validator. + + Returns: + [string], names of the flags. + """ + raise NotImplementedError('This method should be overloaded') + + def print_flags_with_values(self, flag_values): + raise NotImplementedError('This method should be overloaded') + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, containing all flags. + Returns: + The input to be given to checker. The return type depends on the specific + validator. + """ + raise NotImplementedError('This method should be overloaded') + + +class SingleFlagValidator(Validator): + """Validator behind register_validator() method. + + Validates that a single flag passes its checker function. The checker function + takes the flag value and returns True (if value looks fine) or, if flag value + is not valid, either returns False or raises an Exception. + """ + + def __init__(self, flag_name, checker, message): + """Constructor. + + Args: + flag_name: string, name of the flag. + checker: function to verify the validator. + input - value of the corresponding flag (string, boolean, etc). + output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError(desired_error_message). + message: str, error message to be shown to the user if validator's + condition is not satisfied. + """ + super(SingleFlagValidator, self).__init__(checker, message) + self.flag_name = flag_name + + def get_flags_names(self): + return [self.flag_name] + + def print_flags_with_values(self, flag_values): + return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value) + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Returns: + object, the input to be given to checker. + """ + return flag_values[self.flag_name].value + + +class MultiFlagsValidator(Validator): + """Validator behind register_multi_flags_validator method. + + Validates that flag values pass their common checker function. The checker + function takes flag values and returns True (if values look fine) or, + if values are not valid, either returns False or raises an Exception. + """ + + def __init__(self, flag_names, checker, message): + """Constructor. + + Args: + flag_names: [str], containing names of the flags used by checker. + checker: function to verify the validator. + input - dict, with keys() being flag_names, and value for each + key being the value of the corresponding flag (string, boolean, + etc). + output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError(desired_error_message). + message: str, error message to be shown to the user if validator's + condition is not satisfied + """ + super(MultiFlagsValidator, self).__init__(checker, message) + self.flag_names = flag_names + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Returns: + dict, with keys() being self.flag_names, and value for each key + being the value of the corresponding flag (string, boolean, etc). + """ + return dict([key, flag_values[key].value] for key in self.flag_names) + + def print_flags_with_values(self, flag_values): + prefix = 'flags ' + flags_with_values = [] + for key in self.flag_names: + flags_with_values.append('%s=%s' % (key, flag_values[key].value)) + return prefix + ', '.join(flags_with_values) + + def get_flags_names(self): + return self.flag_names diff --git a/MLPY/Lib/site-packages/absl/flags/argparse_flags.py b/MLPY/Lib/site-packages/absl/flags/argparse_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..f05c7943ec53b5364817675ef4fc94bbd9976df4 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/flags/argparse_flags.py @@ -0,0 +1,388 @@ +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides argparse integration with absl.flags. + +``argparse_flags.ArgumentParser`` is a drop-in replacement for +:class:`argparse.ArgumentParser`. It takes care of collecting and defining absl +flags in :mod:`argparse`. + +Here is a simple example:: + + # Assume the following absl.flags is defined in another module: + # + # from absl import flags + # flags.DEFINE_string('echo', None, 'The echo message.') + # + parser = argparse_flags.ArgumentParser( + description='A demo of absl.flags and argparse integration.') + parser.add_argument('--header', help='Header message to print.') + + # The parser will also accept the absl flag `--echo`. + # The `header` value is available as `args.header` just like a regular + # argparse flag. The absl flag `--echo` continues to be available via + # `absl.flags.FLAGS` if you want to access it. + args = parser.parse_args() + + # Example usages: + # ./program --echo='A message.' --header='A header' + # ./program --header 'A header' --echo 'A message.' + + +Here is another example demonstrates subparsers:: + + parser = argparse_flags.ArgumentParser(description='A subcommands demo.') + parser.add_argument('--header', help='The header message to print.') + + subparsers = parser.add_subparsers(help='The command to execute.') + + roll_dice_parser = subparsers.add_parser( + 'roll_dice', help='Roll a dice.', + # By default, absl flags can also be specified after the sub-command. + # To only allow them before sub-command, pass + # `inherited_absl_flags=None`. + inherited_absl_flags=None) + roll_dice_parser.add_argument('--num_faces', type=int, default=6) + roll_dice_parser.set_defaults(command=roll_dice) + + shuffle_parser = subparsers.add_parser('shuffle', help='Shuffle inputs.') + shuffle_parser.add_argument( + 'inputs', metavar='I', nargs='+', help='Inputs to shuffle.') + shuffle_parser.set_defaults(command=shuffle) + + args = parser.parse_args(argv[1:]) + args.command(args) + + # Example usages: + # ./program --echo='A message.' roll_dice --num_faces=6 + # ./program shuffle --echo='A message.' 1 2 3 4 + + +There are several differences between :mod:`absl.flags` and +:mod:`~absl.flags.argparse_flags`: + +1. Flags defined with absl.flags are parsed differently when using the + argparse parser. Notably: + + 1) absl.flags allows both single-dash and double-dash for any flag, and + doesn't distinguish them; argparse_flags only allows double-dash for + flag's regular name, and single-dash for flag's ``short_name``. + 2) Boolean flags in absl.flags can be specified with ``--bool``, + ``--nobool``, as well as ``--bool=true/false`` (though not recommended); + in argparse_flags, it only allows ``--bool``, ``--nobool``. + +2. Help related flag differences: + + 1) absl.flags does not define help flags, absl.app does that; argparse_flags + defines help flags unless passed with ``add_help=False``. + 2) absl.app supports ``--helpxml``; argparse_flags does not. + 3) argparse_flags supports ``-h``; absl.app does not. +""" + +import argparse +import sys + +from absl import flags + + +_BUILT_IN_FLAGS = frozenset({ + 'help', + 'helpshort', + 'helpfull', + 'helpxml', + 'flagfile', + 'undefok', +}) + + +class ArgumentParser(argparse.ArgumentParser): + """Custom ArgumentParser class to support special absl flags.""" + + def __init__(self, **kwargs): + """Initializes ArgumentParser. + + Args: + **kwargs: same as argparse.ArgumentParser, except: + 1. It also accepts `inherited_absl_flags`: the absl flags to inherit. + The default is the global absl.flags.FLAGS instance. Pass None to + ignore absl flags. + 2. The `prefix_chars` argument must be the default value '-'. + + Raises: + ValueError: Raised when prefix_chars is not '-'. + """ + prefix_chars = kwargs.get('prefix_chars', '-') + if prefix_chars != '-': + raise ValueError( + 'argparse_flags.ArgumentParser only supports "-" as the prefix ' + 'character, found "{}".'.format(prefix_chars)) + + # Remove inherited_absl_flags before calling super. + self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS) + # Now call super to initialize argparse.ArgumentParser before calling + # add_argument in _define_absl_flags. + super(ArgumentParser, self).__init__(**kwargs) + + if self.add_help: + # -h and --help are defined in super. + # Also add the --helpshort and --helpfull flags. + self.add_argument( + # Action 'help' defines a similar flag to -h/--help. + '--helpshort', action='help', + default=argparse.SUPPRESS, help=argparse.SUPPRESS) + self.add_argument( + '--helpfull', action=_HelpFullAction, + default=argparse.SUPPRESS, help='show full help message and exit') + + if self._inherited_absl_flags is not None: + self.add_argument( + '--undefok', default=argparse.SUPPRESS, help=argparse.SUPPRESS) + self._define_absl_flags(self._inherited_absl_flags) + + def parse_known_args(self, args=None, namespace=None): + if args is None: + args = sys.argv[1:] + if self._inherited_absl_flags is not None: + # Handle --flagfile. + # Explicitly specify force_gnu=True, since argparse behaves like + # gnu_getopt: flags can be specified after positional arguments. + args = self._inherited_absl_flags.read_flags_from_files( + args, force_gnu=True) + + undefok_missing = object() + undefok = getattr(namespace, 'undefok', undefok_missing) + + namespace, args = super(ArgumentParser, self).parse_known_args( + args, namespace) + + # For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where + # sub-parsers don't preserve existing namespace attributes. + # Restore the undefok attribute if a sub-parser dropped it. + if undefok is not undefok_missing: + namespace.undefok = undefok + + if self._inherited_absl_flags is not None: + # Handle --undefok. At this point, `args` only contains unknown flags, + # so it won't strip defined flags that are also specified with --undefok. + # For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where + # sub-parsers don't preserve existing namespace attributes. The undefok + # attribute might not exist because a subparser dropped it. + if hasattr(namespace, 'undefok'): + args = _strip_undefok_args(namespace.undefok, args) + # absl flags are not exposed in the Namespace object. See Namespace: + # https://docs.python.org/3/library/argparse.html#argparse.Namespace. + del namespace.undefok + self._inherited_absl_flags.mark_as_parsed() + try: + self._inherited_absl_flags.validate_all_flags() + except flags.IllegalFlagValueError as e: + self.error(str(e)) + + return namespace, args + + def _define_absl_flags(self, absl_flags): + """Defines flags from absl_flags.""" + key_flags = set(absl_flags.get_key_flags_for_module(sys.argv[0])) + for name in absl_flags: + if name in _BUILT_IN_FLAGS: + # Do not inherit built-in flags. + continue + flag_instance = absl_flags[name] + # Each flags with short_name appears in FLAGS twice, so only define + # when the dictionary key is equal to the regular name. + if name == flag_instance.name: + # Suppress the flag in the help short message if it's not a main + # module's key flag. + suppress = flag_instance not in key_flags + self._define_absl_flag(flag_instance, suppress) + + def _define_absl_flag(self, flag_instance, suppress): + """Defines a flag from the flag_instance.""" + flag_name = flag_instance.name + short_name = flag_instance.short_name + argument_names = ['--' + flag_name] + if short_name: + argument_names.insert(0, '-' + short_name) + if suppress: + helptext = argparse.SUPPRESS + else: + # argparse help string uses %-formatting. Escape the literal %'s. + helptext = flag_instance.help.replace('%', '%%') + if flag_instance.boolean: + # Only add the `no` form to the long name. + argument_names.append('--no' + flag_name) + self.add_argument( + *argument_names, action=_BooleanFlagAction, help=helptext, + metavar=flag_instance.name.upper(), + flag_instance=flag_instance) + else: + self.add_argument( + *argument_names, action=_FlagAction, help=helptext, + metavar=flag_instance.name.upper(), + flag_instance=flag_instance) + + +class _FlagAction(argparse.Action): + """Action class for Abseil non-boolean flags.""" + + def __init__( + self, + option_strings, + dest, + help, # pylint: disable=redefined-builtin + metavar, + flag_instance, + default=argparse.SUPPRESS): + """Initializes _FlagAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + help: See argparse.Action. + metavar: See argparse.Action. + flag_instance: absl.flags.Flag, the absl flag instance. + default: Ignored. The flag always uses dest=argparse.SUPPRESS so it + doesn't affect the parsing result. + """ + del dest + self._flag_instance = flag_instance + super(_FlagAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + self._flag_instance.parse(values) + self._flag_instance.using_default_value = False + + +class _BooleanFlagAction(argparse.Action): + """Action class for Abseil boolean flags.""" + + def __init__( + self, + option_strings, + dest, + help, # pylint: disable=redefined-builtin + metavar, + flag_instance, + default=argparse.SUPPRESS): + """Initializes _BooleanFlagAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + help: See argparse.Action. + metavar: See argparse.Action. + flag_instance: absl.flags.Flag, the absl flag instance. + default: Ignored. The flag always uses dest=argparse.SUPPRESS so it + doesn't affect the parsing result. + """ + del dest, default + self._flag_instance = flag_instance + flag_names = [self._flag_instance.name] + if self._flag_instance.short_name: + flag_names.append(self._flag_instance.short_name) + self._flag_names = frozenset(flag_names) + super(_BooleanFlagAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + nargs=0, # Does not accept values, only `--bool` or `--nobool`. + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + if not isinstance(values, list) or values: + raise ValueError('values must be an empty list.') + if option_string.startswith('--'): + option = option_string[2:] + else: + option = option_string[1:] + if option in self._flag_names: + self._flag_instance.parse('true') + else: + if not option.startswith('no') or option[2:] not in self._flag_names: + raise ValueError('invalid option_string: ' + option_string) + self._flag_instance.parse('false') + self._flag_instance.using_default_value = False + + +class _HelpFullAction(argparse.Action): + """Action class for --helpfull flag.""" + + def __init__(self, option_strings, dest, default, help): # pylint: disable=redefined-builtin + """Initializes _HelpFullAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + default: Ignored. + help: See argparse.Action. + """ + del dest, default + super(_HelpFullAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + default=argparse.SUPPRESS, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + # This only prints flags when help is not argparse.SUPPRESS. + # It includes user defined argparse flags, as well as main module's + # key absl flags. Other absl flags use argparse.SUPPRESS, so they aren't + # printed here. + parser.print_help() + + absl_flags = parser._inherited_absl_flags # pylint: disable=protected-access + if absl_flags is not None: + modules = sorted(absl_flags.flags_by_module_dict()) + main_module = sys.argv[0] + if main_module in modules: + # The main module flags are already printed in parser.print_help(). + modules.remove(main_module) + print(absl_flags._get_help_for_modules( # pylint: disable=protected-access + modules, prefix='', include_special_flags=True)) + parser.exit() + + +def _strip_undefok_args(undefok, args): + """Returns a new list of args after removing flags in --undefok.""" + if undefok: + undefok_names = set(name.strip() for name in undefok.split(',')) + undefok_names |= set('no' + name for name in undefok_names) + # Remove undefok flags. + args = [arg for arg in args if not _is_undefok(arg, undefok_names)] + return args + + +def _is_undefok(arg, undefok_names): + """Returns whether we can ignore arg based on a set of undefok flag names.""" + if not arg.startswith('-'): + return False + if arg.startswith('--'): + arg_without_dash = arg[2:] + else: + arg_without_dash = arg[1:] + if '=' in arg_without_dash: + name, _ = arg_without_dash.split('=', 1) + else: + name = arg_without_dash + if name in undefok_names: + return True + return False diff --git a/MLPY/Lib/site-packages/absl/logging/__init__.py b/MLPY/Lib/site-packages/absl/logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..42166cd892f99c34f637dd661576897196a357f4 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/logging/__init__.py @@ -0,0 +1,1281 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abseil Python logging module implemented on top of standard logging. + +Simple usage:: + + from absl import logging + + logging.info('Interesting Stuff') + logging.info('Interesting Stuff with Arguments: %d', 42) + + logging.set_verbosity(logging.INFO) + logging.log(logging.DEBUG, 'This will *not* be printed') + logging.set_verbosity(logging.DEBUG) + logging.log(logging.DEBUG, 'This will be printed') + + logging.warning('Worrying Stuff') + logging.error('Alarming Stuff') + logging.fatal('AAAAHHHHH!!!!') # Process exits. + +Usage note: Do not pre-format the strings in your program code. +Instead, let the logging module perform argument interpolation. +This saves cycles because strings that don't need to be printed +are never formatted. Note that this module does not attempt to +interpolate arguments when no arguments are given. In other words:: + + logging.info('Interesting Stuff: %s') + +does not raise an exception because logging.info() has only one +argument, the message string. + +"Lazy" evaluation for debugging +------------------------------- + +If you do something like this:: + + logging.debug('Thing: %s', thing.ExpensiveOp()) + +then the ExpensiveOp will be evaluated even if nothing +is printed to the log. To avoid this, use the level_debug() function:: + + if logging.level_debug(): + logging.debug('Thing: %s', thing.ExpensiveOp()) + +Per file level logging is supported by logging.vlog() and +logging.vlog_is_on(). For example:: + + if logging.vlog_is_on(2): + logging.vlog(2, very_expensive_debug_message()) + +Notes on Unicode +---------------- + +The log output is encoded as UTF-8. Don't pass data in other encodings in +bytes() instances -- instead pass unicode string instances when you need to +(for both the format string and arguments). + +Note on critical and fatal: +Standard logging module defines fatal as an alias to critical, but it's not +documented, and it does NOT actually terminate the program. +This module only defines fatal but not critical, and it DOES terminate the +program. + +The differences in behavior are historical and unfortunate. +""" + +import collections +from collections import abc +import getpass +import io +import itertools +import logging +import os +import socket +import struct +import sys +import tempfile +import threading +import time +import timeit +import traceback +import types +import warnings + +from absl import flags +from absl.logging import converter + +# pylint: disable=g-import-not-at-top +try: + from typing import NoReturn +except ImportError: + pass + +# pylint: enable=g-import-not-at-top + +FLAGS = flags.FLAGS + + +# Logging levels. +FATAL = converter.ABSL_FATAL +ERROR = converter.ABSL_ERROR +WARNING = converter.ABSL_WARNING +WARN = converter.ABSL_WARNING # Deprecated name. +INFO = converter.ABSL_INFO +DEBUG = converter.ABSL_DEBUG + +# Regex to match/parse log line prefixes. +ABSL_LOGGING_PREFIX_REGEX = ( + r'^(?P[IWEF])' + r'(?P\d\d)(?P\d\d) ' + r'(?P\d\d):(?P\d\d):(?P\d\d)' + r'\.(?P\d\d\d\d\d\d) +' + r'(?P-?\d+) ' + r'(?P[a-zA-Z<][\w._<>-]+):(?P\d+)') + + +# Mask to convert integer thread ids to unsigned quantities for logging purposes +_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1 + +# Extra property set on the LogRecord created by ABSLLogger when its level is +# CRITICAL/FATAL. +_ABSL_LOG_FATAL = '_absl_log_fatal' +# Extra prefix added to the log message when a non-absl logger logs a +# CRITICAL/FATAL message. +_CRITICAL_PREFIX = 'CRITICAL - ' + +# Used by findCaller to skip callers from */logging/__init__.py. +_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.') + +# The ABSL logger instance, initialized in _initialize(). +_absl_logger = None +# The ABSL handler instance, initialized in _initialize(). +_absl_handler = None + + +_CPP_NAME_TO_LEVELS = { + 'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here. + 'info': '0', + 'warning': '1', + 'warn': '1', + 'error': '2', + 'fatal': '3' +} + +_CPP_LEVEL_TO_NAMES = { + '0': 'info', + '1': 'warning', + '2': 'error', + '3': 'fatal', +} + + +class _VerbosityFlag(flags.Flag): + """Flag class for -v/--verbosity.""" + + def __init__(self, *args, **kwargs): + super(_VerbosityFlag, self).__init__( + flags.IntegerParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + self._value = v + self._update_logging_levels() + + def _update_logging_levels(self): + """Updates absl logging levels to the current verbosity. + + Visibility: module-private + """ + if not _absl_logger: + return + + if self._value <= converter.ABSL_DEBUG: + standard_verbosity = converter.absl_to_standard(self._value) + else: + # --verbosity is set to higher than 1 for vlog. + standard_verbosity = logging.DEBUG - (self._value - 1) + + # Also update root level when absl_handler is used. + if _absl_handler in logging.root.handlers: + # Make absl logger inherit from the root logger. absl logger might have + # a non-NOTSET value if logging.set_verbosity() is called at import time. + _absl_logger.setLevel(logging.NOTSET) + logging.root.setLevel(standard_verbosity) + else: + _absl_logger.setLevel(standard_verbosity) + + +class _LoggerLevelsFlag(flags.Flag): + """Flag class for --logger_levels.""" + + def __init__(self, *args, **kwargs): + super(_LoggerLevelsFlag, self).__init__( + _LoggerLevelsParser(), + _LoggerLevelsSerializer(), + *args, **kwargs) + + @property + def value(self): + # For lack of an immutable type, be defensive and return a copy. + # Modifications to the dict aren't supported and won't have any affect. + # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so + # just return a copy. + return self._value.copy() + + @value.setter + def value(self, v): + self._value = {} if v is None else v + self._update_logger_levels() + + def _update_logger_levels(self): + # Visibility: module-private. + # This is called by absl.app.run() during initialization. + for name, level in self._value.items(): + logging.getLogger(name).setLevel(level) + + +class _LoggerLevelsParser(flags.ArgumentParser): + """Parser for --logger_levels flag.""" + + def parse(self, value): + if isinstance(value, abc.Mapping): + return value + + pairs = [pair.strip() for pair in value.split(',') if pair.strip()] + + # Preserve the order so that serialization is deterministic. + levels = collections.OrderedDict() + for name_level in pairs: + name, level = name_level.split(':', 1) + name = name.strip() + level = level.strip() + levels[name] = level + return levels + + +class _LoggerLevelsSerializer(object): + """Serializer for --logger_levels flag.""" + + def serialize(self, value): + if isinstance(value, str): + return value + return ','.join( + '{}:{}'.format(name, level) for name, level in value.items()) + + +class _StderrthresholdFlag(flags.Flag): + """Flag class for --stderrthreshold.""" + + def __init__(self, *args, **kwargs): + super(_StderrthresholdFlag, self).__init__( + flags.ArgumentParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + if v in _CPP_LEVEL_TO_NAMES: + # --stderrthreshold also accepts numeric strings whose values are + # Abseil C++ log levels. + cpp_value = int(v) + v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings. + elif v.lower() in _CPP_NAME_TO_LEVELS: + v = v.lower() + if v == 'warn': + v = 'warning' # Use 'warning' as the canonical name. + cpp_value = int(_CPP_NAME_TO_LEVELS[v]) + else: + raise ValueError( + '--stderrthreshold must be one of (case-insensitive) ' + "'debug', 'info', 'warning', 'error', 'fatal', " + "or '0', '1', '2', '3', not '%s'" % v) + + self._value = v + + +LOGTOSTDERR = flags.DEFINE_boolean( + 'logtostderr', + False, + 'Should only log to stderr?', + allow_override_cpp=True, +) +ALSOLOGTOSTDERR = flags.DEFINE_boolean( + 'alsologtostderr', + False, + 'also log to stderr?', + allow_override_cpp=True, +) +LOG_DIR = flags.DEFINE_string( + 'log_dir', + os.getenv('TEST_TMPDIR', ''), + 'directory to write logfiles into', + allow_override_cpp=True, +) +VERBOSITY = flags.DEFINE_flag( + _VerbosityFlag( + 'verbosity', + -1, + ( + 'Logging verbosity level. Messages logged at this level or lower' + ' will be included. Set to 1 for debug logging. If the flag was not' + ' set or supplied, the value will be changed from the default of -1' + ' (warning) to 0 (info) after flags are parsed.' + ), + short_name='v', + allow_hide_cpp=True, + ) +) +LOGGER_LEVELS = flags.DEFINE_flag( + _LoggerLevelsFlag( + 'logger_levels', + {}, + ( + 'Specify log level of loggers. The format is a CSV list of ' + '`name:level`. Where `name` is the logger name used with ' + '`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, ' + 'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`' + ), + ) +) +STDERRTHRESHOLD = flags.DEFINE_flag( + _StderrthresholdFlag( + 'stderrthreshold', + 'fatal', + ( + 'log messages at this level, or more severe, to stderr in ' + 'addition to the logfile. Possible values are ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'Obsoletes --alsologtostderr. Using --alsologtostderr ' + 'cancels the effect of this flag. Please also note that ' + 'this flag is subject to --verbosity and requires logfile ' + 'not be stderr.' + ), + allow_hide_cpp=True, + ) +) +SHOWPREFIXFORINFO = flags.DEFINE_boolean( + 'showprefixforinfo', + True, + ( + 'If False, do not prepend prefix to info messages ' + "when it's logged to stderr, " + '--verbosity is set to INFO level, ' + 'and python logging is used.' + ), +) + + +def get_verbosity(): + """Returns the logging verbosity.""" + return FLAGS['verbosity'].value + + +def set_verbosity(v): + """Sets the logging verbosity. + + Causes all messages of level <= v to be logged, + and all messages of level > v to be silently discarded. + + Args: + v: int|str, the verbosity level as an integer or string. Legal string values + are those that can be coerced to an integer as well as case-insensitive + 'debug', 'info', 'warning', 'error', and 'fatal'. + """ + try: + new_level = int(v) + except ValueError: + new_level = converter.ABSL_NAMES[v.upper()] + FLAGS.verbosity = new_level + + +def set_stderrthreshold(s): + """Sets the stderr threshold to the value passed in. + + Args: + s: str|int, valid strings values are case-insensitive 'debug', + 'info', 'warning', 'error', and 'fatal'; valid integer values are + logging.DEBUG|INFO|WARNING|ERROR|FATAL. + + Raises: + ValueError: Raised when s is an invalid value. + """ + if s in converter.ABSL_LEVELS: + FLAGS.stderrthreshold = converter.ABSL_LEVELS[s] + elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES: + FLAGS.stderrthreshold = s + else: + raise ValueError( + 'set_stderrthreshold only accepts integer absl logging level ' + 'from -3 to 1, or case-insensitive string values ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'But found "{}" ({}).'.format(s, type(s))) + + +def fatal(msg, *args, **kwargs): + # type: (Any, Any, Any) -> NoReturn + """Logs a fatal message.""" + log(FATAL, msg, *args, **kwargs) + + +def error(msg, *args, **kwargs): + """Logs an error message.""" + log(ERROR, msg, *args, **kwargs) + + +def warning(msg, *args, **kwargs): + """Logs a warning message.""" + log(WARNING, msg, *args, **kwargs) + + +def warn(msg, *args, **kwargs): + """Deprecated, use 'warning' instead.""" + warnings.warn("The 'warn' function is deprecated, use 'warning' instead", + DeprecationWarning, 2) + log(WARNING, msg, *args, **kwargs) + + +def info(msg, *args, **kwargs): + """Logs an info message.""" + log(INFO, msg, *args, **kwargs) + + +def debug(msg, *args, **kwargs): + """Logs a debug message.""" + log(DEBUG, msg, *args, **kwargs) + + +def exception(msg, *args, exc_info=True, **kwargs): + """Logs an exception, with traceback and message.""" + error(msg, *args, exc_info=exc_info, **kwargs) + + +# Counter to keep track of number of log entries per token. +_log_counter_per_token = {} + + +def _get_next_log_count_per_token(token): + """Wrapper for _log_counter_per_token. Thread-safe. + + Args: + token: The token for which to look up the count. + + Returns: + The number of times this function has been called with + *token* as an argument (starting at 0). + """ + # Can't use a defaultdict because defaultdict isn't atomic, whereas + # setdefault is. + return next(_log_counter_per_token.setdefault(token, itertools.count())) + + +def log_every_n(level, msg, n, *args): + """Logs ``msg % args`` at level 'level' once per 'n' times. + + Logs the 1st call, (N+1)st call, (2N+1)st call, etc. + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the number of times this should be called before it is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, not (count % n), *args) + + +# Keeps track of the last log time of the given token. +# Note: must be a dict since set/get is atomic in CPython. +# Note: entries are never released as their number is expected to be low. +_log_timer_per_token = {} + + +def _seconds_have_elapsed(token, num_seconds): + """Tests if 'num_seconds' have passed since 'token' was requested. + + Not strictly thread-safe - may log with the wrong frequency if called + concurrently from multiple threads. Accuracy depends on resolution of + 'timeit.default_timer()'. + + Always returns True on the first call for a given 'token'. + + Args: + token: The token for which to look up the count. + num_seconds: The number of seconds to test for. + + Returns: + Whether it has been >= 'num_seconds' since 'token' was last requested. + """ + now = timeit.default_timer() + then = _log_timer_per_token.get(token, None) + if then is None or (now - then) >= num_seconds: + _log_timer_per_token[token] = now + return True + else: + return False + + +def log_every_n_seconds(level, msg, n_seconds, *args): + """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call. + + Logs the first call, logs subsequent calls if 'n' seconds have elapsed since + the last logging call from the same call site (file + line). Not thread-safe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n_seconds: float or int, seconds which should elapse before logging again. + *args: The args to be substituted into the msg. + """ + should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) + log_if(level, msg, should_log, *args) + + +def log_first_n(level, msg, n, *args): + """Logs ``msg % args`` at level ``level`` only first ``n`` times. + + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the maximal number of times the message is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, count < n, *args) + + +def log_if(level, msg, condition, *args): + """Logs ``msg % args`` at level ``level`` only if condition is fulfilled.""" + if condition: + log(level, msg, *args) + + +def log(level, msg, *args, **kwargs): + """Logs ``msg % args`` at absl logging level ``level``. + + If no args are given just print msg, ignoring any interpolation specifiers. + + Args: + level: int, the absl logging level at which to log the message + (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging + level constants are also supported, callers should prefer explicit + logging.vlog() calls for such purpose. + + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + + # Match standard logging's behavior. Before use_absl_handler() and + # logging is configured, there is no handler attached on _absl_logger nor + # logging.root. So logs go no where. + if not logging.root.handlers: + logging.basicConfig() + + _absl_logger.log(standard_level, msg, *args, **kwargs) + + +def vlog(level, msg, *args, **kwargs): + """Log ``msg % args`` at C++ vlog level ``level``. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer logging.log|debug|info|... calls for such purpose. + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + log(level, msg, *args, **kwargs) + + +def vlog_is_on(level): + """Checks if vlog is enabled for the given level in caller's source file. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer level_debug|level_info|... calls for + checking those. + + Returns: + True if logging is turned on for that level. + """ + + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + return _absl_logger.isEnabledFor(standard_level) + + +def flush(): + """Flushes all log files.""" + get_absl_handler().flush() + + +def level_debug(): + """Returns True if debug logging is turned on.""" + return get_verbosity() >= DEBUG + + +def level_info(): + """Returns True if info logging is turned on.""" + return get_verbosity() >= INFO + + +def level_warning(): + """Returns True if warning logging is turned on.""" + return get_verbosity() >= WARNING + + +level_warn = level_warning # Deprecated function. + + +def level_error(): + """Returns True if error logging is turned on.""" + return get_verbosity() >= ERROR + + +def get_log_file_name(level=INFO): + """Returns the name of the log file. + + For Python logging, only one file is used and level is ignored. And it returns + empty string if it logs to stderr/stdout or the log stream has no `name` + attribute. + + Args: + level: int, the absl.logging level. + + Raises: + ValueError: Raised when `level` has an invalid value. + """ + if level not in converter.ABSL_LEVELS: + raise ValueError('Invalid absl.logging level {}'.format(level)) + stream = get_absl_handler().python_handler.stream + if (stream == sys.stderr or stream == sys.stdout or + not hasattr(stream, 'name')): + return '' + else: + return stream.name + + +def find_log_dir_and_names(program_name=None, log_dir=None): + """Computes the directory and filename prefix for log file. + + Args: + program_name: str|None, the filename part of the path to the program that + is running without its extension. e.g: if your program is called + ``usr/bin/foobar.py`` this method should probably be called with + ``program_name='foobar`` However, this is just a convention, you can + pass in any string you want, and it will be used as part of the + log filename. If you don't pass in anything, the default behavior + is as described in the example. In python standard logging mode, + the program_name will be prepended with ``py_`` if it is the + ``program_name`` argument is omitted. + log_dir: str|None, the desired log directory. + + Returns: + (log_dir, file_prefix, symlink_prefix) + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + if not program_name: + # Strip the extension (foobar.par becomes foobar, and + # fubar.py becomes fubar). We do this so that the log + # file names are similar to C++ log file names. + program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] + + # Prepend py_ to files so that python code gets a unique file, and + # so that C++ libraries do not try to write to the same log files as us. + program_name = 'py_%s' % program_name + + actual_log_dir = find_log_dir(log_dir=log_dir) + + try: + username = getpass.getuser() + except KeyError: + # This can happen, e.g. when running under docker w/o passwd file. + if hasattr(os, 'getuid'): + # Windows doesn't have os.getuid + username = str(os.getuid()) + else: + username = 'unknown' + hostname = socket.gethostname() + file_prefix = '%s.%s.%s.log' % (program_name, hostname, username) + + return actual_log_dir, file_prefix, program_name + + +def find_log_dir(log_dir=None): + """Returns the most suitable directory to put log files into. + + Args: + log_dir: str|None, if specified, the logfile(s) will be created in that + directory. Otherwise if the --log_dir command-line flag is provided, + the logfile will be created in that directory. Otherwise the logfile + will be created in a standard location. + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + # Get a list of possible log dirs (will try to use them in order). + # NOTE: Google's internal implementation has a special handling for Google + # machines, which uses a list of directories. Hence the following uses `dirs` + # instead of a single directory. + if log_dir: + # log_dir was explicitly specified as an arg, so use it and it alone. + dirs = [log_dir] + elif FLAGS['log_dir'].value: + # log_dir flag was provided, so use it and it alone (this mimics the + # behavior of the same flag in logging.cc). + dirs = [FLAGS['log_dir'].value] + else: + dirs = [tempfile.gettempdir()] + + # Find the first usable log dir. + for d in dirs: + if os.path.isdir(d) and os.access(d, os.W_OK): + return d + raise FileNotFoundError( + "Can't find a writable directory for logs, tried %s" % dirs) + + +def get_absl_log_prefix(record): + """Returns the absl log prefix for the log record. + + Args: + record: logging.LogRecord, the record to get prefix for. + """ + created_tuple = time.localtime(record.created) + created_microsecond = int(record.created % 1.0 * 1e6) + + critical_prefix = '' + level = record.levelno + if _is_non_absl_fatal_record(record): + # When the level is FATAL, but not logged from absl, lower the level so + # it's treated as ERROR. + level = logging.ERROR + critical_prefix = _CRITICAL_PREFIX + severity = converter.get_initial_for_level(level) + + return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % ( + severity, + created_tuple.tm_mon, + created_tuple.tm_mday, + created_tuple.tm_hour, + created_tuple.tm_min, + created_tuple.tm_sec, + created_microsecond, + _get_thread_id(), + record.filename, + record.lineno, + critical_prefix) + + +def skip_log_prefix(func): + """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`. + + This is a convenience wrapper function / decorator for + :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`. + + If a callable function is provided, only that function will be skipped. + If a function name is provided, all functions with the same name in the + file that this is called in will be skipped. + + This can be used as a decorator of the intended function to be skipped. + + Args: + func: Callable function or its name as a string. + + Returns: + func (the input, unchanged). + + Raises: + ValueError: The input is callable but does not have a function code object. + TypeError: The input is neither callable nor a string. + """ + if callable(func): + func_code = getattr(func, '__code__', None) + if func_code is None: + raise ValueError('Input callable does not have a function code object.') + file_name = func_code.co_filename + func_name = func_code.co_name + func_lineno = func_code.co_firstlineno + elif isinstance(func, str): + file_name = get_absl_logger().findCaller()[0] + func_name = func + func_lineno = None + else: + raise TypeError('Input is neither callable nor a string.') + ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno) + return func + + +def _is_non_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + not log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +def _is_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +# Indicates if we still need to warn about pre-init logs going to stderr. +_warn_preinit_stderr = True + + +class PythonHandler(logging.StreamHandler): + """The handler class used by Abseil Python logging implementation.""" + + def __init__(self, stream=None, formatter=None): + super(PythonHandler, self).__init__(stream) + self.setFormatter(formatter or PythonFormatter()) + + def start_logging_to_file(self, program_name=None, log_dir=None): + """Starts logging messages to files instead of standard error.""" + FLAGS.logtostderr = False + + actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names( + program_name=program_name, log_dir=log_dir) + + basename = '%s.INFO.%s.%d' % ( + file_prefix, + time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())), + os.getpid()) + filename = os.path.join(actual_log_dir, basename) + + self.stream = open(filename, 'a', encoding='utf-8') + + # os.symlink is not available on Windows Python 2. + if getattr(os, 'symlink', None): + # Create a symlink to the log file with a canonical name. + symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO') + try: + if os.path.islink(symlink): + os.unlink(symlink) + os.symlink(os.path.basename(filename), symlink) + except EnvironmentError: + # If it fails, we're sad but it's no error. Commonly, this + # fails because the symlink was created by another user and so + # we can't modify it + pass + + def use_absl_log_file(self, program_name=None, log_dir=None): + """Conditionally logs to files, based on --logtostderr.""" + if FLAGS['logtostderr'].value: + self.stream = sys.stderr + else: + self.start_logging_to_file(program_name=program_name, log_dir=log_dir) + + def flush(self): + """Flushes all log files.""" + self.acquire() + try: + if self.stream and hasattr(self.stream, 'flush'): + self.stream.flush() + except (EnvironmentError, ValueError): + # A ValueError is thrown if we try to flush a closed file. + pass + finally: + self.release() + + def _log_to_stderr(self, record): + """Emits the record to stderr. + + This temporarily sets the handler stream to stderr, calls + StreamHandler.emit, then reverts the stream back. + + Args: + record: logging.LogRecord, the record to log. + """ + # emit() is protected by a lock in logging.Handler, so we don't need to + # protect here again. + old_stream = self.stream + self.stream = sys.stderr + try: + super(PythonHandler, self).emit(record) + finally: + self.stream = old_stream + + def emit(self, record): + """Prints a record out to some streams. + + 1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY. + 2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``. + 3. If ``FLAGS.logtostderr`` is not set, it will log to the stream + associated with the current thread. + + Args: + record: :class:`logging.LogRecord`, the record to emit. + """ + # People occasionally call logging functions at import time before + # our flags may have even been defined yet, let alone even parsed, as we + # rely on the C++ side to define some flags for us and app init to + # deal with parsing. Match the C++ library behavior of notify and emit + # such messages to stderr. It encourages people to clean-up and does + # not hide the message. + level = record.levelno + if not FLAGS.is_parsed(): # Also implies "before flag has been defined". + global _warn_preinit_stderr + if _warn_preinit_stderr: + sys.stderr.write( + 'WARNING: Logging before flag parsing goes to stderr.\n') + _warn_preinit_stderr = False + self._log_to_stderr(record) + elif FLAGS['logtostderr'].value: + self._log_to_stderr(record) + else: + super(PythonHandler, self).emit(record) + stderr_threshold = converter.string_to_standard( + FLAGS['stderrthreshold'].value) + if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and + self.stream != sys.stderr): + self._log_to_stderr(record) + # Die when the record is created from ABSLLogger and level is FATAL. + if _is_absl_fatal_record(record): + self.flush() # Flush the log before dying. + + # In threaded python, sys.exit() from a non-main thread only + # exits the thread in question. + os.abort() + + def close(self): + """Closes the stream to which we are writing.""" + self.acquire() + try: + self.flush() + try: + # Do not close the stream if it's sys.stderr|stdout. They may be + # redirected or overridden to files, which should be managed by users + # explicitly. + user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__ + if self.stream not in user_managed and ( + not hasattr(self.stream, 'isatty') or not self.stream.isatty()): + self.stream.close() + except ValueError: + # A ValueError is thrown if we try to run isatty() on a closed file. + pass + super(PythonHandler, self).close() + finally: + self.release() + + +class ABSLHandler(logging.Handler): + """Abseil Python logging module's log handler.""" + + def __init__(self, python_logging_formatter): + super(ABSLHandler, self).__init__() + + self._python_handler = PythonHandler(formatter=python_logging_formatter) + self.activate_python_handler() + + def format(self, record): + return self._current_handler.format(record) + + def setFormatter(self, fmt): + self._current_handler.setFormatter(fmt) + + def emit(self, record): + self._current_handler.emit(record) + + def flush(self): + self._current_handler.flush() + + def close(self): + super(ABSLHandler, self).close() + self._current_handler.close() + + def handle(self, record): + rv = self.filter(record) + if rv: + return self._current_handler.handle(record) + return rv + + @property + def python_handler(self): + return self._python_handler + + def activate_python_handler(self): + """Uses the Python logging handler as the current logging handler.""" + self._current_handler = self._python_handler + + def use_absl_log_file(self, program_name=None, log_dir=None): + self._current_handler.use_absl_log_file(program_name, log_dir) + + def start_logging_to_file(self, program_name=None, log_dir=None): + self._current_handler.start_logging_to_file(program_name, log_dir) + + +class PythonFormatter(logging.Formatter): + """Formatter class used by :class:`~absl.logging.PythonHandler`.""" + + def format(self, record): + """Appends the message from the record to the results of the prefix. + + Args: + record: logging.LogRecord, the record to be formatted. + + Returns: + The formatted string representing the record. + """ + if (not FLAGS['showprefixforinfo'].value and + FLAGS['verbosity'].value == converter.ABSL_INFO and + record.levelno == logging.INFO and + _absl_handler.python_handler.stream == sys.stderr): + prefix = '' + else: + prefix = get_absl_log_prefix(record) + return prefix + super(PythonFormatter, self).format(record) + + +class ABSLLogger(logging.getLoggerClass()): + """A logger that will create LogRecords while skipping some stack frames. + + This class maintains an internal list of filenames and method names + for use when determining who called the currently executing stack + frame. Any method names from specific source files are skipped when + walking backwards through the stack. + + Client code should use the register_frame_to_skip method to let the + ABSLLogger know which method from which file should be + excluded from the walk backwards through the stack. + """ + _frames_to_skip = set() + + def findCaller(self, stack_info=False, stacklevel=1): + """Finds the frame of the calling method on the stack. + + This method skips any frames registered with the + ABSLLogger and any methods from this file, and whatever + method is currently being used to generate the prefix for the log + line. Then it returns the file name, line number, and method name + of the calling method. An optional fourth item may be returned, + callers who only need things from the first three are advised to + always slice or index the result rather than using direct unpacking + assignment. + + Args: + stack_info: bool, when True, include the stack trace as a fourth item + returned. On Python 3 there are always four items returned - the + fourth will be None when this is False. On Python 2 the stdlib + base class API only returns three items. We do the same when this + new parameter is unspecified or False for compatibility. + + Returns: + (filename, lineno, methodname[, sinfo]) of the calling method. + """ + f_to_skip = ABSLLogger._frames_to_skip + # Use sys._getframe(2) instead of logging.currentframe(), it's slightly + # faster because there is one less frame to traverse. + frame = sys._getframe(2) # pylint: disable=protected-access + + while frame: + code = frame.f_code + if (_LOGGING_FILE_PREFIX not in code.co_filename and + (code.co_filename, code.co_name, + code.co_firstlineno) not in f_to_skip and + (code.co_filename, code.co_name) not in f_to_skip): + sinfo = None + if stack_info: + out = io.StringIO() + out.write(u'Stack (most recent call last):\n') + traceback.print_stack(frame, file=out) + sinfo = out.getvalue().rstrip(u'\n') + return (code.co_filename, frame.f_lineno, code.co_name, sinfo) + frame = frame.f_back + + def critical(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``CRITICAL``.""" + self.log(logging.CRITICAL, msg, *args, **kwargs) + + def fatal(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``FATAL``.""" + self.log(logging.FATAL, msg, *args, **kwargs) + + def error(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``ERROR``.""" + self.log(logging.ERROR, msg, *args, **kwargs) + + def warn(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARN``.""" + warnings.warn("The 'warn' method is deprecated, use 'warning' instead", + DeprecationWarning, 2) + self.log(logging.WARN, msg, *args, **kwargs) + + def warning(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARNING``.""" + self.log(logging.WARNING, msg, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``INFO``.""" + self.log(logging.INFO, msg, *args, **kwargs) + + def debug(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``DEBUG``.""" + self.log(logging.DEBUG, msg, *args, **kwargs) + + def log(self, level, msg, *args, **kwargs): + """Logs a message at a cetain level substituting in the supplied arguments. + + This method behaves differently in python and c++ modes. + + Args: + level: int, the standard logging level at which to log the message. + msg: str, the text of the message to log. + *args: The arguments to substitute in the message. + **kwargs: The keyword arguments to substitute in the message. + """ + if level >= logging.FATAL: + # Add property to the LogRecord created by this logger. + # This will be used by the ABSLHandler to determine whether it should + # treat CRITICAL/FATAL logs as really FATAL. + extra = kwargs.setdefault('extra', {}) + extra[_ABSL_LOG_FATAL] = True + super(ABSLLogger, self).log(level, msg, *args, **kwargs) + + def handle(self, record): + """Calls handlers without checking ``Logger.disabled``. + + Non-root loggers are set to disabled after setup with :func:`logging.config` + if it's not explicitly specified. Historically, absl logging will not be + disabled by that. To maintaining this behavior, this function skips + checking the ``Logger.disabled`` bit. + + This logger can still be disabled by adding a filter that filters out + everything. + + Args: + record: logging.LogRecord, the record to handle. + """ + if self.filter(record): + self.callHandlers(record) + + @classmethod + def register_frame_to_skip(cls, file_name, function_name, line_number=None): + """Registers a function name to skip when walking the stack. + + The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the + stack to make the log messages meaningful in their appropriate context. + This method registers a function from a particular file as one + which should be skipped. + + Args: + file_name: str, the name of the file that contains the function. + function_name: str, the name of the function to skip. + line_number: int, if provided, only the function with this starting line + number will be skipped. Otherwise, all functions with the same name + in the file will be skipped. + """ + if line_number is not None: + cls._frames_to_skip.add((file_name, function_name, line_number)) + else: + cls._frames_to_skip.add((file_name, function_name)) + + +def _get_thread_id(): + """Gets id of current thread, suitable for logging as an unsigned quantity. + + If pywrapbase is linked, returns GetTID() for the thread ID to be + consistent with C++ logging. Otherwise, returns the numeric thread id. + The quantities are made unsigned by masking with 2*sys.maxint + 1. + + Returns: + Thread ID unique to this process (unsigned) + """ + thread_id = threading.get_ident() + return thread_id & _THREAD_ID_MASK + + +def get_absl_logger(): + """Returns the absl logger instance.""" + assert _absl_logger is not None + return _absl_logger + + +def get_absl_handler(): + """Returns the absl handler instance.""" + assert _absl_handler is not None + return _absl_handler + + +def use_python_logging(quiet=False): + """Uses the python implementation of the logging code. + + Args: + quiet: No logging message about switching logging type. + """ + get_absl_handler().activate_python_handler() + if not quiet: + info('Restoring pure python logging') + + +_attempted_to_remove_stderr_stream_handlers = False + + +def use_absl_handler(): + """Uses the ABSL logging handler for logging. + + This method is called in :func:`app.run()` so the absl handler + is used in absl apps. + """ + global _attempted_to_remove_stderr_stream_handlers + if not _attempted_to_remove_stderr_stream_handlers: + # The absl handler logs to stderr by default. To prevent double logging to + # stderr, the following code tries its best to remove other handlers that + # emit to stderr. Those handlers are most commonly added when + # logging.info/debug is called before calling use_absl_handler(). + handlers = [ + h for h in logging.root.handlers + if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr] + for h in handlers: + logging.root.removeHandler(h) + _attempted_to_remove_stderr_stream_handlers = True + + absl_handler = get_absl_handler() + if absl_handler not in logging.root.handlers: + logging.root.addHandler(absl_handler) + FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access + FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access + + +def _initialize(): + """Initializes loggers and handlers.""" + global _absl_logger, _absl_handler + + if _absl_logger: + return + + original_logger_class = logging.getLoggerClass() + logging.setLoggerClass(ABSLLogger) + _absl_logger = logging.getLogger('absl') + logging.setLoggerClass(original_logger_class) + + python_logging_formatter = PythonFormatter() + _absl_handler = ABSLHandler(python_logging_formatter) + + +_initialize() diff --git a/MLPY/Lib/site-packages/absl/logging/__init__.pyi b/MLPY/Lib/site-packages/absl/logging/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d5bb69d59d8e0c8fdaafaab7b975361031be898 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/logging/__init__.pyi @@ -0,0 +1,290 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, TypeVar, Union + +from absl import flags + +# Logging levels. +FATAL: int +ERROR: int +WARNING: int +WARN: int # Deprecated name. +INFO: int +DEBUG: int + +ABSL_LOGGING_PREFIX_REGEX: str + +LOGTOSTDERR: flags.FlagHolder[bool] +ALSOLOGTOSTDERR: flags.FlagHolder[bool] +LOG_DIR: flags.FlagHolder[str] +VERBOSITY: flags.FlagHolder[int] +LOGGER_LEVELS: flags.FlagHolder[Dict[str, str]] +STDERRTHRESHOLD: flags.FlagHolder[str] +SHOWPREFIXFORINFO: flags.FlagHolder[bool] + + +def get_verbosity() -> int: + ... + + +def set_verbosity(v: Union[int, str]) -> None: + ... + + +def set_stderrthreshold(s: Union[int, str]) -> None: + ... + + +# TODO(b/277607978): Provide actual args+kwargs shadowing stdlib's logging functions. +def fatal(msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + +def error(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warning(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warn(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def info(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def debug(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def exception(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def log_every_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_every_n_seconds( + level: int, msg: Any, n_seconds: float, *args: Any +) -> None: + ... + + +def log_first_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_if(level: int, msg: Any, condition: Any, *args: Any) -> None: + ... + + +def log(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog_is_on(level: int) -> bool: + ... + + +def flush() -> None: + ... + + +def level_debug() -> bool: + ... + + +def level_info() -> bool: + ... + + +def level_warning() -> bool: + ... + + +level_warn = level_warning # Deprecated function. + + +def level_error() -> bool: + ... + + +def get_log_file_name(level: int = ...) -> str: + ... + + +def find_log_dir_and_names( + program_name: Optional[str] = ..., log_dir: Optional[str] = ... +) -> Tuple[str, str, str]: + ... + + +def find_log_dir(log_dir: Optional[str] = ...) -> str: + ... + + +def get_absl_log_prefix(record: logging.LogRecord) -> str: + ... + + +_SkipLogT = TypeVar('_SkipLogT', str, Callable[..., Any]) + +def skip_log_prefix(func: _SkipLogT) -> _SkipLogT: + ... + + +_StreamT = TypeVar("_StreamT") + + +class PythonHandler(logging.StreamHandler[_StreamT]): + + def __init__( + self, + stream: Optional[_StreamT] = ..., + formatter: Optional[logging.Formatter] = ..., + ) -> None: + ... + + def start_logging_to_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def flush(self) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def close(self) -> None: + ... + + +class ABSLHandler(logging.Handler): + + def __init__(self, python_logging_formatter: PythonFormatter) -> None: + ... + + def format(self, record: logging.LogRecord) -> str: + ... + + def setFormatter(self, fmt) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def flush(self) -> None: + ... + + def close(self) -> None: + ... + + def handle(self, record: logging.LogRecord) -> bool: + ... + + @property + def python_handler(self) -> PythonHandler: + ... + + def activate_python_handler(self) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def start_logging_to_file(self, program_name=None, log_dir=None) -> None: + ... + + +class PythonFormatter(logging.Formatter): + + def format(self, record: logging.LogRecord) -> str: + ... + + +class ABSLLogger(logging.Logger): + + def findCaller( + self, stack_info: bool = ..., stacklevel: int = ... + ) -> Tuple[str, int, str, Optional[str]]: + ... + + def critical(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def fatal(self, msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + def error(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warn(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def info(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def log(self, level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def handle(self, record: logging.LogRecord) -> None: + ... + + @classmethod + def register_frame_to_skip( + cls, file_name: str, function_name: str, line_number: Optional[int] = ... + ) -> None: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_logger() -> ABSLLogger: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_handler() -> ABSLHandler: + ... + + +def use_python_logging(quiet: bool = ...) -> None: + ... + + +def use_absl_handler() -> None: + ... diff --git a/MLPY/Lib/site-packages/absl/logging/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/absl/logging/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adc6f82dd6179e991019af8945dd94181188ac57 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/logging/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/logging/__pycache__/converter.cpython-39.pyc b/MLPY/Lib/site-packages/absl/logging/__pycache__/converter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9685713b2f3992227776b4f5bd88896c8884ff89 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/logging/__pycache__/converter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/logging/converter.py b/MLPY/Lib/site-packages/absl/logging/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..0239ab4556458b995f9cbca796281cc44acaf476 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/logging/converter.py @@ -0,0 +1,214 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module to convert log levels between Abseil Python, C++, and Python standard. + +This converter has to convert (best effort) between three different +logging level schemes: + + * **cpp**: The C++ logging level scheme used in Abseil C++. + * **absl**: The absl.logging level scheme used in Abseil Python. + * **standard**: The python standard library logging level scheme. + +Here is a handy ascii chart for easy mental mapping:: + + LEVEL | cpp | absl | standard | + ---------+-----+--------+----------+ + DEBUG | 0 | 1 | 10 | + INFO | 0 | 0 | 20 | + WARNING | 1 | -1 | 30 | + ERROR | 2 | -2 | 40 | + CRITICAL | 3 | -3 | 50 | + FATAL | 3 | -3 | 50 | + +Note: standard logging ``CRITICAL`` is mapped to absl/cpp ``FATAL``. +However, only ``CRITICAL`` logs from the absl logger (or absl.logging.fatal) +will terminate the program. ``CRITICAL`` logs from non-absl loggers are treated +as error logs with a message prefix ``"CRITICAL - "``. + +Converting from standard to absl or cpp is a lossy conversion. +Converting back to standard will lose granularity. For this reason, +users should always try to convert to standard, the richest +representation, before manipulating the levels, and then only to cpp +or absl if those level schemes are absolutely necessary. +""" + +import logging + +STANDARD_CRITICAL = logging.CRITICAL +STANDARD_ERROR = logging.ERROR +STANDARD_WARNING = logging.WARNING +STANDARD_INFO = logging.INFO +STANDARD_DEBUG = logging.DEBUG + +# These levels are also used to define the constants +# FATAL, ERROR, WARNING, INFO, and DEBUG in the +# absl.logging module. +ABSL_FATAL = -3 +ABSL_ERROR = -2 +ABSL_WARNING = -1 +ABSL_WARN = -1 # Deprecated name. +ABSL_INFO = 0 +ABSL_DEBUG = 1 + +ABSL_LEVELS = {ABSL_FATAL: 'FATAL', + ABSL_ERROR: 'ERROR', + ABSL_WARNING: 'WARNING', + ABSL_INFO: 'INFO', + ABSL_DEBUG: 'DEBUG'} + +# Inverts the ABSL_LEVELS dictionary +ABSL_NAMES = {'FATAL': ABSL_FATAL, + 'ERROR': ABSL_ERROR, + 'WARNING': ABSL_WARNING, + 'WARN': ABSL_WARNING, # Deprecated name. + 'INFO': ABSL_INFO, + 'DEBUG': ABSL_DEBUG} + +ABSL_TO_STANDARD = {ABSL_FATAL: STANDARD_CRITICAL, + ABSL_ERROR: STANDARD_ERROR, + ABSL_WARNING: STANDARD_WARNING, + ABSL_INFO: STANDARD_INFO, + ABSL_DEBUG: STANDARD_DEBUG} + +# Inverts the ABSL_TO_STANDARD +STANDARD_TO_ABSL = dict((v, k) for (k, v) in ABSL_TO_STANDARD.items()) + + +def get_initial_for_level(level): + """Gets the initial that should start the log line for the given level. + + It returns: + + * ``'I'`` when: ``level < STANDARD_WARNING``. + * ``'W'`` when: ``STANDARD_WARNING <= level < STANDARD_ERROR``. + * ``'E'`` when: ``STANDARD_ERROR <= level < STANDARD_CRITICAL``. + * ``'F'`` when: ``level >= STANDARD_CRITICAL``. + + Args: + level: int, a Python standard logging level. + + Returns: + The first initial as it would be logged by the C++ logging module. + """ + if level < STANDARD_WARNING: + return 'I' + elif level < STANDARD_ERROR: + return 'W' + elif level < STANDARD_CRITICAL: + return 'E' + else: + return 'F' + + +def absl_to_cpp(level): + """Converts an absl log level to a cpp log level. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in Abseil C++. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level >= 0: + # C++ log levels must be >= 0 + return 0 + else: + return -level + + +def absl_to_standard(level): + """Converts an integer level from the absl value to the standard value. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in standard logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < ABSL_FATAL: + level = ABSL_FATAL + if level <= ABSL_DEBUG: + return ABSL_TO_STANDARD[level] + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + + +def string_to_standard(level): + """Converts a string level to standard logging level value. + + Args: + level: str, case-insensitive ``'debug'``, ``'info'``, ``'warning'``, + ``'error'``, ``'fatal'``. + + Returns: + The corresponding integer level for use in standard logging. + """ + return absl_to_standard(ABSL_NAMES.get(level.upper())) + + +def standard_to_absl(level): + """Converts an integer level from the standard value to the absl value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in absl logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < 0: + level = 0 + if level < STANDARD_DEBUG: + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + elif level < STANDARD_INFO: + return ABSL_DEBUG + elif level < STANDARD_WARNING: + return ABSL_INFO + elif level < STANDARD_ERROR: + return ABSL_WARNING + elif level < STANDARD_CRITICAL: + return ABSL_ERROR + else: + return ABSL_FATAL + + +def standard_to_cpp(level): + """Converts an integer level from the standard value to the cpp value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in cpp logging. + """ + return absl_to_cpp(standard_to_absl(level)) diff --git a/MLPY/Lib/site-packages/absl/testing/__init__.py b/MLPY/Lib/site-packages/absl/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bd1cd51810385ca0e5e9fed3fb9a804febf27e --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea218de97789639991fc87dbfdb06da95064f540 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76abdd2175fe392045ffae100c947d024d9fa2a3 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cb471cfb93a0401b7482206c1935c83dd58ba1a Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/absltest.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/absltest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db59cf97444fe758c9bfd9133cb0781b21e5109 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/absltest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/flagsaver.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/flagsaver.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658ae6950beebe7c87a06a2d62a7a8a2d1a659f7 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/flagsaver.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/parameterized.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/parameterized.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50617f140e63642c4968477985946e7fcf9eada4 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/parameterized.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/__pycache__/xml_reporter.cpython-39.pyc b/MLPY/Lib/site-packages/absl/testing/__pycache__/xml_reporter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c4bb57378a29a64b33c43f14693da1bf94d3383 Binary files /dev/null and b/MLPY/Lib/site-packages/absl/testing/__pycache__/xml_reporter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/absl/testing/_bazelize_command.py b/MLPY/Lib/site-packages/absl/testing/_bazelize_command.py new file mode 100644 index 0000000000000000000000000000000000000000..9380d27427859371e323e12e0759ff83157223bc --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/_bazelize_command.py @@ -0,0 +1,68 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helper for running tests on Windows Bazel.""" + +import os + +from absl import flags + +FLAGS = flags.FLAGS + + +def get_executable_path(py_binary_name): + """Returns the executable path of a py_binary. + + This returns the executable path of a py_binary that is in another Bazel + target's data dependencies. + + On Linux/macOS, the path and __file__ has the same root directory. + On Windows, bazel builds an .exe file and we need to use the MANIFEST file + the location the actual binary. + + Args: + py_binary_name: string, the name of a py_binary that is in another Bazel + target's data dependencies. + + Raises: + RuntimeError: Raised when it cannot locate the executable path. + """ + + if os.name == 'nt': + py_binary_name += '.exe' + manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST') + workspace_name = os.environ['TEST_WORKSPACE'] + manifest_entry = '{}/{}'.format(workspace_name, py_binary_name) + with open(manifest_file, 'r') as manifest_fd: + for line in manifest_fd: + tokens = line.strip().split(' ') + if len(tokens) != 2: + continue + if manifest_entry == tokens[0]: + return tokens[1] + raise RuntimeError( + 'Cannot locate executable path for {}, MANIFEST file: {}.'.format( + py_binary_name, manifest_file)) + else: + # NOTE: __file__ may be .py or .pyc, depending on how the module was + # loaded and executed. + path = __file__ + + # Use the package name to find the root directory: every dot is + # a directory, plus one for ourselves. + for _ in range(__name__.count('.') + 1): + path = os.path.dirname(path) + + root_directory = path + return os.path.join(root_directory, py_binary_name) diff --git a/MLPY/Lib/site-packages/absl/testing/_pretty_print_reporter.py b/MLPY/Lib/site-packages/absl/testing/_pretty_print_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..b0dde07e4f5a98abedf1b3c5f6ac72fe256fa716 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/_pretty_print_reporter.py @@ -0,0 +1,91 @@ +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TestResult implementing default output for test execution status.""" + +import unittest + + +class TextTestResult(unittest.TextTestResult): + """TestResult class that provides the default text result formatting.""" + + def __init__(self, stream, descriptions, verbosity): + # Disable the verbose per-test output from the superclass, since it would + # conflict with our customized output. + super(TextTestResult, self).__init__(stream, descriptions, 0) + self._per_test_output = verbosity > 0 + + def _print_status(self, tag, test): + if self._per_test_output: + test_id = test.id() + if test_id.startswith('__main__.'): + test_id = test_id[len('__main__.'):] + print('[%s] %s' % (tag, test_id), file=self.stream) + self.stream.flush() + + def startTest(self, test): + super(TextTestResult, self).startTest(test) + self._print_status(' RUN ', test) + + def addSuccess(self, test): + super(TextTestResult, self).addSuccess(test) + self._print_status(' OK ', test) + + def addError(self, test, err): + super(TextTestResult, self).addError(test, err) + self._print_status(' FAILED ', test) + + def addFailure(self, test, err): + super(TextTestResult, self).addFailure(test, err) + self._print_status(' FAILED ', test) + + def addSkip(self, test, reason): + super(TextTestResult, self).addSkip(test, reason) + self._print_status(' SKIPPED ', test) + + def addExpectedFailure(self, test, err): + super(TextTestResult, self).addExpectedFailure(test, err) + self._print_status(' OK ', test) + + def addUnexpectedSuccess(self, test): + super(TextTestResult, self).addUnexpectedSuccess(test) + self._print_status(' FAILED ', test) + + +class TextTestRunner(unittest.TextTestRunner): + """A test runner that produces formatted text results.""" + + _TEST_RESULT_CLASS = TextTestResult + + # Set this to true at the class or instance level to run tests using a + # debug-friendly method (e.g, one that doesn't catch exceptions and interacts + # better with debuggers). + # Usually this is set using --pdb_post_mortem. + run_for_debugging = False + + def run(self, test): + # type: (TestCase) -> TestResult + if self.run_for_debugging: + return self._run_debug(test) + else: + return super(TextTestRunner, self).run(test) + + def _run_debug(self, test): + # type: (TestCase) -> TestResult + test.debug() + # Return an empty result to indicate success. + return self._makeResult() + + def _makeResult(self): + return TextTestResult(self.stream, self.descriptions, self.verbosity) diff --git a/MLPY/Lib/site-packages/absl/testing/absltest.py b/MLPY/Lib/site-packages/absl/testing/absltest.py new file mode 100644 index 0000000000000000000000000000000000000000..e43cb820654f80ca93eb1d71ee2c5f4b1748795d --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/absltest.py @@ -0,0 +1,2713 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base functionality for Abseil Python tests. + +This module contains base classes and high-level functions for Abseil-style +tests. +""" + +from collections import abc +import contextlib +import dataclasses +import difflib +import enum +import errno +import faulthandler +import getpass +import inspect +import io +import itertools +import json +import os +import random +import re +import shlex +import shutil +import signal +import stat +import subprocess +import sys +import tempfile +import textwrap +import typing +from typing import Any, AnyStr, BinaryIO, Callable, ContextManager, IO, Iterator, List, Mapping, MutableMapping, MutableSequence, NoReturn, Optional, Sequence, Text, TextIO, Tuple, Type, Union +import unittest +from unittest import mock # pylint: disable=unused-import Allow absltest.mock. +from urllib import parse + +from absl import app # pylint: disable=g-import-not-at-top +from absl import flags +from absl import logging +from absl.testing import _pretty_print_reporter +from absl.testing import xml_reporter + +# Use an if-type-checking block to prevent leakage of type-checking only +# symbols. We don't want people relying on these at runtime. +if typing.TYPE_CHECKING: + # Unbounded TypeVar for general usage + _T = typing.TypeVar('_T') + + import unittest.case # pylint: disable=g-import-not-at-top,g-bad-import-order + + _OutcomeType = unittest.case._Outcome # pytype: disable=module-attr + + +# pylint: enable=g-import-not-at-top + +# Re-export a bunch of unittest functions we support so that people don't +# have to import unittest to get them +# pylint: disable=invalid-name +skip = unittest.skip +skipIf = unittest.skipIf +skipUnless = unittest.skipUnless +SkipTest = unittest.SkipTest +expectedFailure = unittest.expectedFailure +# pylint: enable=invalid-name + +# End unittest re-exports + +FLAGS = flags.FLAGS + +_TEXT_OR_BINARY_TYPES = (str, bytes) + +# Suppress surplus entries in AssertionError stack traces. +__unittest = True # pylint: disable=invalid-name + + +def expectedFailureIf(condition, reason): # pylint: disable=invalid-name + """Expects the test to fail if the run condition is True. + + Example usage:: + + @expectedFailureIf(sys.version.major == 2, "Not yet working in py2") + def test_foo(self): + ... + + Args: + condition: bool, whether to expect failure or not. + reason: Text, the reason to expect failure. + Returns: + Decorator function + """ + del reason # Unused + if condition: + return unittest.expectedFailure + else: + return lambda f: f + + +class TempFileCleanup(enum.Enum): + # Always cleanup temp files when the test completes. + ALWAYS = 'always' + # Only cleanup temp file if the test passes. This allows easier inspection + # of tempfile contents on test failure. absltest.TEST_TMPDIR.value determines + # where tempfiles are created. + SUCCESS = 'success' + # Never cleanup temp files. + OFF = 'never' + + +# Many of the methods in this module have names like assertSameElements. +# This kind of name does not comply with PEP8 style, +# but it is consistent with the naming of methods in unittest.py. +# pylint: disable=invalid-name + + +def _get_default_test_random_seed(): + # type: () -> int + random_seed = 301 + value = os.environ.get('TEST_RANDOM_SEED', '') + try: + random_seed = int(value) + except ValueError: + pass + return random_seed + + +def get_default_test_srcdir(): + # type: () -> Text + """Returns default test source dir.""" + return os.environ.get('TEST_SRCDIR', '') + + +def get_default_test_tmpdir(): + # type: () -> Text + """Returns default test temp dir.""" + tmpdir = os.environ.get('TEST_TMPDIR', '') + if not tmpdir: + tmpdir = os.path.join(tempfile.gettempdir(), 'absl_testing') + + return tmpdir + + +def _get_default_randomize_ordering_seed(): + # type: () -> int + """Returns default seed to use for randomizing test order. + + This function first checks the --test_randomize_ordering_seed flag, and then + the TEST_RANDOMIZE_ORDERING_SEED environment variable. If the first value + we find is: + * (not set): disable test randomization + * 0: disable test randomization + * 'random': choose a random seed in [1, 4294967295] for test order + randomization + * positive integer: use this seed for test order randomization + + (The values used are patterned after + https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED). + + In principle, it would be simpler to return None if no override is provided; + however, the python random module has no `get_seed()`, only `getstate()`, + which returns far more data than we want to pass via an environment variable + or flag. + + Returns: + A default value for test case randomization (int). 0 means do not randomize. + + Raises: + ValueError: Raised when the flag or env value is not one of the options + above. + """ + if FLAGS['test_randomize_ordering_seed'].present: + randomize = FLAGS.test_randomize_ordering_seed + elif 'TEST_RANDOMIZE_ORDERING_SEED' in os.environ: + randomize = os.environ['TEST_RANDOMIZE_ORDERING_SEED'] + else: + randomize = '' + if not randomize: + return 0 + if randomize == 'random': + return random.Random().randint(1, 4294967295) + if randomize == '0': + return 0 + try: + seed = int(randomize) + if seed > 0: + return seed + except ValueError: + pass + raise ValueError( + 'Unknown test randomization seed value: {}'.format(randomize)) + + +TEST_SRCDIR = flags.DEFINE_string( + 'test_srcdir', + get_default_test_srcdir(), + 'Root of directory tree where source files live', + allow_override_cpp=True) +TEST_TMPDIR = flags.DEFINE_string( + 'test_tmpdir', + get_default_test_tmpdir(), + 'Directory for temporary testing files', + allow_override_cpp=True) + +flags.DEFINE_integer( + 'test_random_seed', + _get_default_test_random_seed(), + 'Random seed for testing. Some test frameworks may ' + 'change the default value of this flag between runs, so ' + 'it is not appropriate for seeding probabilistic tests.', + allow_override_cpp=True) +flags.DEFINE_string( + 'test_randomize_ordering_seed', + '', + 'If positive, use this as a seed to randomize the ' + 'execution order for test cases. If "random", pick a ' + 'random seed to use. If 0 or not set, do not randomize ' + 'test case execution order. This flag also overrides ' + 'the TEST_RANDOMIZE_ORDERING_SEED environment variable.', + allow_override_cpp=True) +flags.DEFINE_string('xml_output_file', '', 'File to store XML test results') + + +# We might need to monkey-patch TestResult so that it stops considering an +# unexpected pass as a as a "successful result". For details, see +# http://bugs.python.org/issue20165 +def _monkey_patch_test_result_for_unexpected_passes(): + # type: () -> None + """Workaround for .""" + + def wasSuccessful(self): + # type: () -> bool + """Tells whether or not this result was a success. + + Any unexpected pass is to be counted as a non-success. + + Args: + self: The TestResult instance. + + Returns: + Whether or not this result was a success. + """ + return (len(self.failures) == len(self.errors) == + len(self.unexpectedSuccesses) == 0) + + test_result = unittest.TestResult() + test_result.addUnexpectedSuccess(unittest.FunctionTestCase(lambda: None)) + if test_result.wasSuccessful(): # The bug is present. + unittest.TestResult.wasSuccessful = wasSuccessful + if test_result.wasSuccessful(): # Warn the user if our hot-fix failed. + sys.stderr.write('unittest.result.TestResult monkey patch to report' + ' unexpected passes as failures did not work.\n') + + +_monkey_patch_test_result_for_unexpected_passes() + + +def _open(filepath, mode, _open_func=open): + # type: (Text, Text, Callable[..., IO]) -> IO + """Opens a file. + + Like open(), but ensure that we can open real files even if tests stub out + open(). + + Args: + filepath: A filepath. + mode: A mode. + _open_func: A built-in open() function. + + Returns: + The opened file object. + """ + return _open_func(filepath, mode, encoding='utf-8') + + +class _TempDir(object): + """Represents a temporary directory for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Module-private: do not instantiate outside module.""" + self._path = path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the directory. + + TIP: Instead of e.g. `os.path.join(temp_dir.full_path)`, you can simply + do `os.path.join(temp_dir)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def create_file(self, file_path=None, content=None, mode='w', encoding='utf8', + errors='strict'): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text) -> _TempFile + """Create a file in the directory. + + NOTE: If the file already exists, it will be made writable and overwritten. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path + is the path that will be cleaned up, including any directories in the + path, e.g., 'foo/bar/baz.txt' will `rm -r foo` + content: Optional string or bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + + Returns: + A _TempFile representing the created file. + """ + tf, _ = _TempFile._create(self._path, file_path, content, mode, encoding, + errors) + return tf + + def mkdir(self, dir_path=None): + # type: (Optional[Text]) -> _TempDir + """Create a directory in the directory. + + Args: + dir_path: Optional path to the directory to create. If not given, + a unique name will be generated and used. + + Returns: + A _TempDir representing the created directory. + """ + if dir_path: + path = os.path.join(self._path, dir_path) + else: + path = tempfile.mkdtemp(dir=self._path) + + # Note: there's no need to clear the directory since the containing + # dir was cleared by the tempdir() function. + os.makedirs(path, exist_ok=True) + return _TempDir(path) + + +class _TempFile(object): + """Represents a tempfile for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Private: use _create instead.""" + self._path = path + + # pylint: disable=line-too-long + @classmethod + def _create(cls, base_path, file_path, content, mode, encoding, errors): + # type: (Text, Optional[Text], AnyStr, Text, Text, Text) -> Tuple[_TempFile, Text] + # pylint: enable=line-too-long + """Module-private: create a tempfile instance.""" + if file_path: + cleanup_path = os.path.join(base_path, _get_first_part(file_path)) + path = os.path.join(base_path, file_path) + os.makedirs(os.path.dirname(path), exist_ok=True) + # The file may already exist, in which case, ensure it's writable so that + # it can be truncated. + if os.path.exists(path) and not os.access(path, os.W_OK): + stat_info = os.stat(path) + os.chmod(path, stat_info.st_mode | stat.S_IWUSR) + else: + os.makedirs(base_path, exist_ok=True) + fd, path = tempfile.mkstemp(dir=str(base_path)) + os.close(fd) + cleanup_path = path + + tf = cls(path) + + if content: + if isinstance(content, str): + tf.write_text(content, mode=mode, encoding=encoding, errors=errors) + else: + tf.write_bytes(content, mode) + + else: + tf.write_bytes(b'') + + return tf, cleanup_path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the file. + + TIP: Instead of e.g. `os.path.join(temp_file.full_path)`, you can simply + do `os.path.join(temp_file)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def read_text(self, encoding='utf8', errors='strict'): + # type: (Text, Text) -> Text + """Return the contents of the file as text.""" + with self.open_text(encoding=encoding, errors=errors) as fp: + return fp.read() + + def read_bytes(self): + # type: () -> bytes + """Return the content of the file as bytes.""" + with self.open_bytes() as fp: + return fp.read() + + def write_text(self, text, mode='w', encoding='utf8', errors='strict'): + # type: (Text, Text, Text, Text) -> None + """Write text to the file. + + Args: + text: Text to write. In Python 2, it can be bytes, which will be + decoded using the `encoding` arg (this is as an aid for code that + is 2 and 3 compatible). + mode: The mode to open the file for writing. + encoding: The encoding to use when writing the text to the file. + errors: The error handling strategy to use when converting text to bytes. + """ + with self.open_text(mode, encoding=encoding, errors=errors) as fp: + fp.write(text) + + def write_bytes(self, data, mode='wb'): + # type: (bytes, Text) -> None + """Write bytes to the file. + + Args: + data: bytes to write. + mode: Mode to open the file for writing. The "b" flag is implicit if + not already present. It must not have the "t" flag. + """ + with self.open_bytes(mode) as fp: + fp.write(data) + + def open_text(self, mode='rt', encoding='utf8', errors='strict'): + # type: (Text, Text, Text) -> ContextManager[TextIO] + """Return a context manager for opening the file in text mode. + + Args: + mode: The mode to open the file in. The "t" flag is implicit if not + already present. It must not have the "b" flag. + encoding: The encoding to use when opening the file. + errors: How to handle decoding errors. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 'b' in mode: + raise ValueError('Invalid mode {!r}: "b" flag not allowed when opening ' + 'file in text mode'.format(mode)) + if 't' not in mode: + mode += 't' + cm = self._open(mode, encoding, errors) + return cm + + def open_bytes(self, mode='rb'): + # type: (Text) -> ContextManager[BinaryIO] + """Return a context manager for opening the file in binary mode. + + Args: + mode: The mode to open the file in. The "b" mode is implicit if not + already present. It must not have the "t" flag. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 't' in mode: + raise ValueError('Invalid mode {!r}: "t" flag not allowed when opening ' + 'file in binary mode'.format(mode)) + if 'b' not in mode: + mode += 'b' + cm = self._open(mode, encoding=None, errors=None) + return cm + + # TODO(b/123775699): Once pytype supports typing.Literal, use overload and + # Literal to express more precise return types. The contained type is + # currently `Any` to avoid [bad-return-type] errors in the open_* methods. + @contextlib.contextmanager + def _open( + self, + mode: str, + encoding: Optional[str] = 'utf8', + errors: Optional[str] = 'strict', + ) -> Iterator[Any]: + with io.open( + self.full_path, mode=mode, encoding=encoding, errors=errors) as fp: + yield fp + + +class _method(object): + """A decorator that supports both instance and classmethod invocations. + + Using similar semantics to the @property builtin, this decorator can augment + an instance method to support conditional logic when invoked on a class + object. This breaks support for invoking an instance method via the class + (e.g. Cls.method(self, ...)) but is still situationally useful. + """ + + def __init__(self, finstancemethod): + # type: (Callable[..., Any]) -> None + self._finstancemethod = finstancemethod + self._fclassmethod = None + + def classmethod(self, fclassmethod): + # type: (Callable[..., Any]) -> _method + self._fclassmethod = classmethod(fclassmethod) + return self + + def __doc__(self): + # type: () -> str + if getattr(self._finstancemethod, '__doc__'): + return self._finstancemethod.__doc__ + elif getattr(self._fclassmethod, '__doc__'): + return self._fclassmethod.__doc__ + return '' + + def __get__(self, obj, type_): + # type: (Optional[Any], Optional[Type[Any]]) -> Callable[..., Any] + func = self._fclassmethod if obj is None else self._finstancemethod + return func.__get__(obj, type_) # pytype: disable=attribute-error + + +class TestCase(unittest.TestCase): + """Extension of unittest.TestCase providing more power.""" + + # When to cleanup files/directories created by our `create_tempfile()` and + # `create_tempdir()` methods after each test case completes. This does *not* + # affect e.g., files created outside of those methods, e.g., using the stdlib + # tempfile module. This can be overridden at the class level, instance level, + # or with the `cleanup` arg of `create_tempfile()` and `create_tempdir()`. See + # `TempFileCleanup` for details on the different values. + # TODO(b/70517332): Remove the type comment and the disable once pytype has + # better support for enums. + tempfile_cleanup = TempFileCleanup.ALWAYS # type: TempFileCleanup # pytype: disable=annotation-type-mismatch + + maxDiff = 80 * 20 + longMessage = True + + # Exit stacks for per-test and per-class scopes. + if sys.version_info < (3, 11): + _exit_stack = None + _cls_exit_stack = None + + def __init__(self, *args, **kwargs): + super(TestCase, self).__init__(*args, **kwargs) + # This is to work around missing type stubs in unittest.pyi + self._outcome = getattr(self, '_outcome') # type: Optional[_OutcomeType] + + def setUp(self): + super(TestCase, self).setUp() + # NOTE: Only Python 3 contextlib has ExitStack and + # Python 3.11+ already has enterContext. + if hasattr(contextlib, 'ExitStack') and sys.version_info < (3, 11): + self._exit_stack = contextlib.ExitStack() + self.addCleanup(self._exit_stack.close) + + @classmethod + def setUpClass(cls): + super(TestCase, cls).setUpClass() + # NOTE: Only Python 3 contextlib has ExitStack, only Python 3.8+ has + # addClassCleanup and Python 3.11+ already has enterClassContext. + if ( + hasattr(contextlib, 'ExitStack') + and hasattr(cls, 'addClassCleanup') + and sys.version_info < (3, 11) + ): + cls._cls_exit_stack = contextlib.ExitStack() + cls.addClassCleanup(cls._cls_exit_stack.close) + + def create_tempdir(self, name=None, cleanup=None): + # type: (Optional[Text], Optional[TempFileCleanup]) -> _TempDir + """Create a temporary directory specific to the test. + + NOTE: The directory and its contents will be recursively cleared before + creation. This ensures that there is no pre-existing state. + + This creates a named directory on disk that is isolated to this test, and + will be properly cleaned up by the test. This avoids several pitfalls of + creating temporary directories for test purposes, as well as makes it easier + to setup directories and verify their contents. For example:: + + def test_foo(self): + out_dir = self.create_tempdir() + out_log = out_dir.create_file('output.log') + expected_outputs = [ + os.path.join(out_dir, 'data-0.txt'), + os.path.join(out_dir, 'data-1.txt'), + ] + code_under_test(out_dir) + self.assertTrue(os.path.exists(expected_paths[0])) + self.assertTrue(os.path.exists(expected_paths[1])) + self.assertEqual('foo', out_log.read_text()) + + See also: :meth:`create_tempfile` for creating temporary files. + + Args: + name: Optional name of the directory. If not given, a unique + name will be generated and used. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempDir representing the created directory; see _TempDir class docs + for usage. + """ + test_path = self._get_tempdir_path_test() + + if name: + path = os.path.join(test_path, name) + cleanup_path = os.path.join(test_path, _get_first_part(name)) + else: + os.makedirs(test_path, exist_ok=True) + path = tempfile.mkdtemp(dir=test_path) + cleanup_path = path + + _rmtree_ignore_errors(cleanup_path) + os.makedirs(path, exist_ok=True) + + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + + return _TempDir(path) + + # pylint: disable=line-too-long + def create_tempfile(self, file_path=None, content=None, mode='w', + encoding='utf8', errors='strict', cleanup=None): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text, Optional[TempFileCleanup]) -> _TempFile + # pylint: enable=line-too-long + """Create a temporary file specific to the test. + + This creates a named file on disk that is isolated to this test, and will + be properly cleaned up by the test. This avoids several pitfalls of + creating temporary files for test purposes, as well as makes it easier + to setup files, their data, read them back, and inspect them when + a test fails. For example:: + + def test_foo(self): + output = self.create_tempfile() + code_under_test(output) + self.assertGreater(os.path.getsize(output), 0) + self.assertEqual('foo', output.read_text()) + + NOTE: This will zero-out the file. This ensures there is no pre-existing + state. + NOTE: If the file already exists, it will be made writable and overwritten. + + See also: :meth:`create_tempdir` for creating temporary directories, and + ``_TempDir.create_file`` for creating files within a temporary directory. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path is + the path that will be cleaned up, including any directories in the path, + e.g., ``'foo/bar/baz.txt'`` will ``rm -r foo``. + content: Optional string or + bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempFile representing the created file; see _TempFile class docs for + usage. + """ + test_path = self._get_tempdir_path_test() + tf, cleanup_path = _TempFile._create(test_path, file_path, content=content, + mode=mode, encoding=encoding, + errors=errors) + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + return tf + + @_method + def enter_context(self, manager): + # type: (ContextManager[_T]) -> _T + """Returns the CM's value after registering it with the exit stack. + + Entering a context pushes it onto a stack of contexts. When `enter_context` + is called on the test instance (e.g. `self.enter_context`), the context is + exited after the test case's tearDown call. When called on the test class + (e.g. `TestCase.enter_context`), the context is exited after the test + class's tearDownClass call. + + Contexts are exited in the reverse order of entering. They will always + be exited, regardless of test failure/success. + + This is useful to eliminate per-test boilerplate when context managers + are used. For example, instead of decorating every test with `@mock.patch`, + simply do `self.foo = self.enter_context(mock.patch(...))' in `setUp()`. + + NOTE: The context managers will always be exited without any error + information. This is an unfortunate implementation detail due to some + internals of how unittest runs tests. + + Args: + manager: The context manager to enter. + """ + if sys.version_info >= (3, 11): + return self.enterContext(manager) + + if not self._exit_stack: + raise AssertionError( + 'self._exit_stack is not set: enter_context is Py3-only; also make ' + 'sure that AbslTest.setUp() is called.') + return self._exit_stack.enter_context(manager) + + @enter_context.classmethod + def enter_context(cls, manager): # pylint: disable=no-self-argument + # type: (ContextManager[_T]) -> _T + if sys.version_info >= (3, 11): + return cls.enterClassContext(manager) + + if not cls._cls_exit_stack: + raise AssertionError( + 'cls._cls_exit_stack is not set: cls.enter_context requires ' + 'Python 3.8+; also make sure that AbslTest.setUpClass() is called.') + return cls._cls_exit_stack.enter_context(manager) + + @classmethod + def _get_tempdir_path_cls(cls): + # type: () -> Text + return os.path.join(TEST_TMPDIR.value, + cls.__qualname__.replace('__main__.', '')) + + def _get_tempdir_path_test(self): + # type: () -> Text + return os.path.join(self._get_tempdir_path_cls(), self._testMethodName) + + def _get_tempfile_cleanup(self, override): + # type: (Optional[TempFileCleanup]) -> TempFileCleanup + if override is not None: + return override + return self.tempfile_cleanup + + def _maybe_add_temp_path_cleanup(self, path, cleanup): + # type: (Text, Optional[TempFileCleanup]) -> None + cleanup = self._get_tempfile_cleanup(cleanup) + if cleanup == TempFileCleanup.OFF: + return + elif cleanup == TempFileCleanup.ALWAYS: + self.addCleanup(_rmtree_ignore_errors, path) + elif cleanup == TempFileCleanup.SUCCESS: + self._internal_add_cleanup_on_success(_rmtree_ignore_errors, path) + else: + raise AssertionError('Unexpected cleanup value: {}'.format(cleanup)) + + def _internal_add_cleanup_on_success( + self, + function: Callable[..., Any], + *args: Any, + **kwargs: Any, + ) -> None: + """Adds `function` as cleanup when the test case succeeds.""" + outcome = self._outcome + assert outcome is not None + previous_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + def _call_cleaner_on_success(*args, **kwargs): + if not self._internal_ran_and_passed_when_called_during_cleanup( + previous_failure_count): + return + function(*args, **kwargs) + self.addCleanup(_call_cleaner_on_success, *args, **kwargs) + + def _internal_ran_and_passed_when_called_during_cleanup( + self, + previous_failure_count: int, + ) -> bool: + """Returns whether test is passed. Expected to be called during cleanup.""" + outcome = self._outcome + if sys.version_info[:2] >= (3, 11): + assert outcome is not None + current_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + return current_failure_count == previous_failure_count + else: + # Before Python 3.11 https://github.com/python/cpython/pull/28180, errors + # were bufferred in _Outcome before calling cleanup. + result = self.defaultTestResult() + self._feedErrorsToResult(result, outcome.errors) # pytype: disable=attribute-error + return result.wasSuccessful() + + def shortDescription(self): + # type: () -> Text + """Formats both the test method name and the first line of its docstring. + + If no docstring is given, only returns the method name. + + This method overrides unittest.TestCase.shortDescription(), which + only returns the first line of the docstring, obscuring the name + of the test upon failure. + + Returns: + desc: A short description of a test method. + """ + desc = self.id() + + # Omit the main name so that test name can be directly copy/pasted to + # the command line. + if desc.startswith('__main__.'): + desc = desc[len('__main__.'):] + + # NOTE: super() is used here instead of directly invoking + # unittest.TestCase.shortDescription(self), because of the + # following line that occurs later on: + # unittest.TestCase = TestCase + # Because of this, direct invocation of what we think is the + # superclass will actually cause infinite recursion. + doc_first_line = super(TestCase, self).shortDescription() + if doc_first_line is not None: + desc = '\n'.join((desc, doc_first_line)) + return desc + + def assertStartsWith(self, actual, expected_start, msg=None): + """Asserts that actual.startswith(expected_start) is True. + + Args: + actual: str + expected_start: str + msg: Optional message to report on failure. + """ + if not actual.startswith(expected_start): + self.fail('%r does not start with %r' % (actual, expected_start), msg) + + def assertNotStartsWith(self, actual, unexpected_start, msg=None): + """Asserts that actual.startswith(unexpected_start) is False. + + Args: + actual: str + unexpected_start: str + msg: Optional message to report on failure. + """ + if actual.startswith(unexpected_start): + self.fail('%r does start with %r' % (actual, unexpected_start), msg) + + def assertEndsWith(self, actual, expected_end, msg=None): + """Asserts that actual.endswith(expected_end) is True. + + Args: + actual: str + expected_end: str + msg: Optional message to report on failure. + """ + if not actual.endswith(expected_end): + self.fail('%r does not end with %r' % (actual, expected_end), msg) + + def assertNotEndsWith(self, actual, unexpected_end, msg=None): + """Asserts that actual.endswith(unexpected_end) is False. + + Args: + actual: str + unexpected_end: str + msg: Optional message to report on failure. + """ + if actual.endswith(unexpected_end): + self.fail('%r does end with %r' % (actual, unexpected_end), msg) + + def assertSequenceStartsWith(self, prefix, whole, msg=None): + """An equality assertion for the beginning of ordered sequences. + + If prefix is an empty sequence, it will raise an error unless whole is also + an empty sequence. + + If prefix is not a sequence, it will raise an error if the first element of + whole does not match. + + Args: + prefix: A sequence expected at the beginning of the whole parameter. + whole: The sequence in which to look for prefix. + msg: Optional message to report on failure. + """ + try: + prefix_len = len(prefix) + except (TypeError, NotImplementedError): + prefix = [prefix] + prefix_len = 1 + + if isinstance(whole, abc.Mapping) or isinstance(whole, abc.Set): + self.fail( + 'For whole: Mapping or Set objects are not supported, found type: %s' + % type(whole), + msg, + ) + try: + whole_len = len(whole) + except (TypeError, NotImplementedError): + self.fail('For whole: len(%s) is not supported, it appears to be type: ' + '%s' % (whole, type(whole)), msg) + + assert prefix_len <= whole_len, self._formatMessage( + msg, + 'Prefix length (%d) is longer than whole length (%d).' % + (prefix_len, whole_len) + ) + + if not prefix_len and whole_len: + self.fail('Prefix length is 0 but whole length is %d: %s' % + (len(whole), whole), msg) + + try: + self.assertSequenceEqual(prefix, whole[:prefix_len], msg) + except AssertionError: + self.fail('prefix: %s not found at start of whole: %s.' % + (prefix, whole), msg) + + def assertEmpty(self, container, msg=None): + """Asserts that an object has zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of {}.'.format(container, len(container)), msg) + + def assertNotEmpty(self, container, msg=None): + """Asserts that an object has non-zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if not len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of 0.'.format(container), msg) + + def assertLen(self, container, expected_len, msg=None): + """Asserts that an object has the expected length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + expected_len: The expected length of the container. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + if len(container) != expected_len: + container_repr = unittest.util.safe_repr(container) # pytype: disable=module-attr + self.fail('{} has length of {}, expected {}.'.format( + container_repr, len(container), expected_len), msg) + + def assertSequenceAlmostEqual(self, expected_seq, actual_seq, places=None, + msg=None, delta=None): + """An approximate equality assertion for ordered sequences. + + Fail if the two sequences are unequal as determined by their value + differences rounded to the given number of decimal places (default 7) and + comparing to zero, or by comparing that the difference between each value + in the two sequences is more than the given delta. + + Note that decimal places (from zero) are usually not the same as significant + digits (measured from the most significant digit). + + If the two sequences compare equal then they will automatically compare + almost equal. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + places: The number of decimal places to compare. + msg: The message to be printed if the test fails. + delta: The OK difference between compared values. + """ + if len(expected_seq) != len(actual_seq): + self.fail('Sequence size mismatch: {} vs {}'.format( + len(expected_seq), len(actual_seq)), msg) + + err_list = [] + for idx, (exp_elem, act_elem) in enumerate(zip(expected_seq, actual_seq)): + try: + # assertAlmostEqual should be called with at most one of `places` and + # `delta`. However, it's okay for assertSequenceAlmostEqual to pass + # both because we want the latter to fail if the former does. + # pytype: disable=wrong-keyword-args + self.assertAlmostEqual(exp_elem, act_elem, places=places, msg=msg, + delta=delta) + # pytype: enable=wrong-keyword-args + except self.failureException as err: + err_list.append('At index {}: {}'.format(idx, err)) + + if err_list: + if len(err_list) > 30: + err_list = err_list[:30] + ['...'] + msg = self._formatMessage(msg, '\n'.join(err_list)) + self.fail(msg) + + def assertContainsSubset(self, expected_subset, actual_set, msg=None): + """Checks whether actual iterable is a superset of expected iterable.""" + missing = set(expected_subset) - set(actual_set) + if not missing: + return + + self.fail('Missing elements %s\nExpected: %s\nActual: %s' % ( + missing, expected_subset, actual_set), msg) + + def assertNoCommonElements(self, expected_seq, actual_seq, msg=None): + """Checks whether actual iterable and expected iterable are disjoint.""" + common = set(expected_seq) & set(actual_seq) + if not common: + return + + self.fail('Common elements %s\nExpected: %s\nActual: %s' % ( + common, expected_seq, actual_seq), msg) + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + """Deprecated, please use assertCountEqual instead. + + This is equivalent to assertCountEqual. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + super().assertCountEqual(expected_seq, actual_seq, msg) + + def assertSameElements(self, expected_seq, actual_seq, msg=None): + """Asserts that two sequences have the same elements (in any order). + + This method, unlike assertCountEqual, doesn't care about any + duplicates in the expected and actual sequences:: + + # Doesn't raise an AssertionError + assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) + + If possible, you should use assertCountEqual instead of + assertSameElements. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + # `unittest2.TestCase` used to have assertSameElements, but it was + # removed in favor of assertItemsEqual. As there's a unit test + # that explicitly checks this behavior, I am leaving this method + # alone. + # Fail on strings: empirically, passing strings to this test method + # is almost always a bug. If comparing the character sets of two strings + # is desired, cast the inputs to sets or lists explicitly. + if (isinstance(expected_seq, _TEXT_OR_BINARY_TYPES) or + isinstance(actual_seq, _TEXT_OR_BINARY_TYPES)): + self.fail('Passing string/bytes to assertSameElements is usually a bug. ' + 'Did you mean to use assertEqual?\n' + 'Expected: %s\nActual: %s' % (expected_seq, actual_seq)) + try: + expected = dict([(element, None) for element in expected_seq]) + actual = dict([(element, None) for element in actual_seq]) + missing = [element for element in expected if element not in actual] + unexpected = [element for element in actual if element not in expected] + missing.sort() + unexpected.sort() + except TypeError: + # Fall back to slower list-compare if any of the objects are + # not hashable. + expected = list(expected_seq) + actual = list(actual_seq) + expected.sort() + actual.sort() + missing, unexpected = _sorted_list_difference(expected, actual) + errors = [] + if msg: + errors.extend((msg, ':\n')) + if missing: + errors.append('Expected, but missing:\n %r\n' % missing) + if unexpected: + errors.append('Unexpected, but present:\n %r\n' % unexpected) + if missing or unexpected: + self.fail(''.join(errors)) + + # unittest.TestCase.assertMultiLineEqual works very similarly, but it + # has a different error format. However, I find this slightly more readable. + def assertMultiLineEqual(self, first, second, msg=None, **kwargs): + """Asserts that two multi-line strings are equal.""" + assert isinstance(first, + str), ('First argument is not a string: %r' % (first,)) + assert isinstance(second, + str), ('Second argument is not a string: %r' % (second,)) + line_limit = kwargs.pop('line_limit', 0) + if kwargs: + raise TypeError('Unexpected keyword args {}'.format(tuple(kwargs))) + + if first == second: + return + if msg: + failure_message = [msg + ':\n'] + else: + failure_message = ['\n'] + if line_limit: + line_limit += len(failure_message) + for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)): + failure_message.append(line) + if not line.endswith('\n'): + failure_message.append('\n') + if line_limit and len(failure_message) > line_limit: + n_omitted = len(failure_message) - line_limit + failure_message = failure_message[:line_limit] + failure_message.append( + '(... and {} more delta lines omitted for brevity.)\n'.format( + n_omitted)) + + raise self.failureException(''.join(failure_message)) + + def assertBetween(self, value, minv, maxv, msg=None): + """Asserts that value is between minv and maxv (inclusive).""" + msg = self._formatMessage(msg, + '"%r" unexpectedly not between "%r" and "%r"' % + (value, minv, maxv)) + self.assertTrue(minv <= value, msg) + self.assertTrue(maxv >= value, msg) + + def assertRegexMatch(self, actual_str, regexes, message=None): + r"""Asserts that at least one regex in regexes matches str. + + If possible you should use `assertRegex`, which is a simpler + version of this method. `assertRegex` takes a single regular + expression (a string or re compiled object) instead of a list. + + Notes: + + 1. This function uses substring matching, i.e. the matching + succeeds if *any* substring of the error message matches *any* + regex in the list. This is more convenient for the user than + full-string matching. + + 2. If regexes is the empty list, the matching will always fail. + + 3. Use regexes=[''] for a regex that will always pass. + + 4. '.' matches any single character *except* the newline. To + match any character, use '(.|\n)'. + + 5. '^' matches the beginning of each line, not just the beginning + of the string. Similarly, '$' matches the end of each line. + + 6. An exception will be thrown if regexes contains an invalid + regex. + + Args: + actual_str: The string we try to match with the items in regexes. + regexes: The regular expressions we want to match against str. + See "Notes" above for detailed notes on how this is interpreted. + message: The message to be printed if the test fails. + """ + if isinstance(regexes, _TEXT_OR_BINARY_TYPES): + self.fail('regexes is string or bytes; use assertRegex instead.', + message) + if not regexes: + self.fail('No regexes specified.', message) + + regex_type = type(regexes[0]) + for regex in regexes[1:]: + if type(regex) is not regex_type: # pylint: disable=unidiomatic-typecheck + self.fail('regexes list must all be the same type.', message) + + if regex_type is bytes and isinstance(actual_str, str): + regexes = [regex.decode('utf-8') for regex in regexes] + regex_type = str + elif regex_type is str and isinstance(actual_str, bytes): + regexes = [regex.encode('utf-8') for regex in regexes] + regex_type = bytes + + if regex_type is str: + regex = u'(?:%s)' % u')|(?:'.join(regexes) + elif regex_type is bytes: + regex = b'(?:' + (b')|(?:'.join(regexes)) + b')' + else: + self.fail('Only know how to deal with unicode str or bytes regexes.', + message) + + if not re.search(regex, actual_str, re.MULTILINE): + self.fail('"%s" does not contain any of these regexes: %s.' % + (actual_str, regexes), message) + + def assertCommandSucceeds(self, command, regexes=(b'',), env=None, + close_fds=True, msg=None): + """Asserts that a shell command succeeds (i.e. exits with code 0). + + Args: + command: List or string representing the command to run. + regexes: List of regular expression byte strings that match success. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertEqual( + ret_code, 0, + self._formatMessage(msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s' % (_quote_long_string(command_string), + ret_code, + _quote_long_string(err))) + ) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + def assertCommandFails(self, command, regexes, env=None, close_fds=True, + msg=None): + """Asserts a shell command fails and the error matches a regex in a list. + + Args: + command: List or string representing the command to run. + regexes: the list of regular expression strings. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertNotEqual( + ret_code, 0, + self._formatMessage(msg, 'The following command succeeded ' + 'while expected to fail:\n%s' % + _quote_long_string(command_string))) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + class _AssertRaisesContext(object): + + def __init__(self, expected_exception, test_case, test_func, msg=None): + self.expected_exception = expected_exception + self.test_case = test_case + self.test_func = test_func + self.msg = msg + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + self.test_case.fail(self.expected_exception.__name__ + ' not raised', + self.msg) + if not issubclass(exc_type, self.expected_exception): + return False + self.test_func(exc_value) + if exc_value: + self.exception = exc_value.with_traceback(None) + return True + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate, callable_obj: Callable[..., Any], + *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithPredicateMatch(self, expected_exception, predicate, + callable_obj=None, *args, **kwargs): + """Asserts that exception is thrown and predicate(exception) is true. + + Args: + expected_exception: Exception class expected to be raised. + predicate: Function of one argument that inspects the passed-in exception + and returns True (success) or False (please fail the test). + callable_obj: Function to be called. + *args: Extra args. + **kwargs: Extra keyword args. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + self.assertTrue(predicate(err), + '%r does not match predicate %r' % (err, predicate)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message + ) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message, + callable_obj: Callable[..., Any], *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithLiteralMatch(self, expected_exception, + expected_exception_message, + callable_obj=None, *args, **kwargs): + """Asserts that the message in a raised exception equals the given string. + + Unlike assertRaisesRegex, this method takes a literal string, not + a regular expression. + + with self.assertRaisesWithLiteralMatch(ExType, 'message'): + DoSomething() + + Args: + expected_exception: Exception class expected to be raised. + expected_exception_message: String message expected in the raised + exception. For a raise exception e, expected_exception_message must + equal str(e). + callable_obj: Function to be called, or None to return a context. + *args: Extra args. + **kwargs: Extra kwargs. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + actual_exception_message = str(err) + self.assertTrue(expected_exception_message == actual_exception_message, + 'Exception message does not match.\n' + 'Expected: %r\n' + 'Actual: %r' % (expected_exception_message, + actual_exception_message)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + def assertContainsInOrder(self, strings, target, msg=None): + """Asserts that the strings provided are found in the target in order. + + This may be useful for checking HTML output. + + Args: + strings: A list of strings, such as [ 'fox', 'dog' ] + target: A target string in which to look for the strings, such as + 'The quick brown fox jumped over the lazy dog'. + msg: Optional message to report on failure. + """ + if isinstance(strings, (bytes, unicode if str is bytes else str)): + strings = (strings,) + + current_index = 0 + last_string = None + for string in strings: + index = target.find(str(string), current_index) + if index == -1 and current_index == 0: + self.fail("Did not find '%s' in '%s'" % + (string, target), msg) + elif index == -1: + self.fail("Did not find '%s' after '%s' in '%s'" % + (string, last_string, target), msg) + last_string = string + current_index = index + + def assertContainsSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as a subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, but possibly with other elements interspersed. For example, [1, 2, 3] + is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be a subsequence of container. + msg: Optional message to report on failure. + """ + first_nonmatching = None + reversed_container = list(reversed(container)) + subsequence = list(subsequence) + + for e in subsequence: + if e not in reversed_container: + first_nonmatching = e + break + while e != reversed_container.pop(): + pass + + if first_nonmatching is not None: + self.fail('%s not a subsequence of %s. First non-matching element: %s' % + (subsequence, container, first_nonmatching), msg) + + def assertContainsExactSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as an exact subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, and without other elements interspersed. For example, [1, 2, 3] is an + exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be an exact subsequence of container. + msg: Optional message to report on failure. + """ + container = list(container) + subsequence = list(subsequence) + longest_match = 0 + + for start in range(1 + len(container) - len(subsequence)): + if longest_match == len(subsequence): + break + index = 0 + while (index < len(subsequence) and + subsequence[index] == container[start + index]): + index += 1 + longest_match = max(longest_match, index) + + if longest_match < len(subsequence): + self.fail('%s not an exact subsequence of %s. ' + 'Longest matching prefix: %s' % + (subsequence, container, subsequence[:longest_match]), msg) + + def assertTotallyOrdered(self, *groups, **kwargs): + """Asserts that total ordering has been implemented correctly. + + For example, say you have a class A that compares only on its attribute x. + Comparators other than ``__lt__`` are omitted for brevity:: + + class A(object): + def __init__(self, x, y): + self.x = x + self.y = y + + def __hash__(self): + return hash(self.x) + + def __lt__(self, other): + try: + return self.x < other.x + except AttributeError: + return NotImplemented + + assertTotallyOrdered will check that instances can be ordered correctly. + For example:: + + self.assertTotallyOrdered( + [None], # None should come before everything else. + [1], # Integers sort earlier. + [A(1, 'a')], + [A(2, 'b')], # 2 is after 1. + [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. + [A(4, 'z')], + ['foo']) # Strings sort last. + + Args: + *groups: A list of groups of elements. Each group of elements is a list + of objects that are equal. The elements in each group must be less + than the elements in the group after it. For example, these groups are + totally ordered: ``[None]``, ``[1]``, ``[2, 2]``, ``[3]``. + **kwargs: optional msg keyword argument can be passed. + """ + + def CheckOrder(small, big): + """Ensures small is ordered before big.""" + self.assertFalse(small == big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertTrue(small != big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertLess(small, big, msg) + self.assertFalse(big < small, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (big, small))) + self.assertLessEqual(small, big, msg) + self.assertFalse(big <= small, self._formatMessage( + '%r unexpectedly less than or equal to %r' % (big, small), msg + )) + self.assertGreater(big, small, msg) + self.assertFalse(small > big, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (small, big))) + self.assertGreaterEqual(big, small) + self.assertFalse(small >= big, self._formatMessage( + msg, + '%r unexpectedly greater than or equal to %r' % (small, big))) + + def CheckEqual(a, b): + """Ensures that a and b are equal.""" + self.assertEqual(a, b, msg) + self.assertFalse(a != b, + self._formatMessage(msg, '%r unexpectedly unequals %r' % + (a, b))) + + # Objects that compare equal must hash to the same value, but this only + # applies if both objects are hashable. + if (isinstance(a, abc.Hashable) and + isinstance(b, abc.Hashable)): + self.assertEqual( + hash(a), hash(b), + self._formatMessage( + msg, 'hash %d of %r unexpectedly not equal to hash %d of %r' % + (hash(a), a, hash(b), b))) + + self.assertFalse(a < b, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (a, b))) + self.assertFalse(b < a, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (b, a))) + self.assertLessEqual(a, b, msg) + self.assertLessEqual(b, a, msg) # pylint: disable=arguments-out-of-order + self.assertFalse(a > b, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (a, b))) + self.assertFalse(b > a, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (b, a))) + self.assertGreaterEqual(a, b, msg) + self.assertGreaterEqual(b, a, msg) # pylint: disable=arguments-out-of-order + + msg = kwargs.get('msg') + + # For every combination of elements, check the order of every pair of + # elements. + for elements in itertools.product(*groups): + elements = list(elements) + for index, small in enumerate(elements[:-1]): + for big in elements[index + 1:]: + CheckOrder(small, big) + + # Check that every element in each group is equal. + for group in groups: + for a in group: + CheckEqual(a, a) + for a, b in itertools.product(group, group): + CheckEqual(a, b) + + def assertDictEqual(self, a, b, msg=None): + """Raises AssertionError if a and b are not equal dictionaries. + + Args: + a: A dict, the expected value. + b: A dict, the actual value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dictionaries are not equal. + """ + self.assertIsInstance(a, dict, self._formatMessage( + msg, + 'First argument is not a dictionary' + )) + self.assertIsInstance(b, dict, self._formatMessage( + msg, + 'Second argument is not a dictionary' + )) + + def Sorted(list_of_items): + try: + return sorted(list_of_items) # In 3.3, unordered are possible. + except TypeError: + return list_of_items + + if a == b: + return + a_items = Sorted(list(a.items())) + b_items = Sorted(list(b.items())) + + unexpected = [] + missing = [] + different = [] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + + def Repr(dikt): + """Deterministic repr for dict.""" + # Sort the entries based on their repr, not based on their sort order, + # which will be non-deterministic across executions, for many types. + entries = sorted((safe_repr(k), safe_repr(v)) for k, v in dikt.items()) + return '{%s}' % (', '.join('%s: %s' % pair for pair in entries)) + + message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')] + + # The standard library default output confounds lexical difference with + # value difference; treat them separately. + for a_key, a_value in a_items: + if a_key not in b: + missing.append((a_key, a_value)) + elif a_value != b[a_key]: + different.append((a_key, a_value, b[a_key])) + + for b_key, b_value in b_items: + if b_key not in a: + unexpected.append((b_key, b_value)) + + if unexpected: + message.append( + 'Unexpected, but present entries:\n%s' % ''.join( + '%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected)) + + if different: + message.append( + 'repr() of differing entries:\n%s' % ''.join( + '%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value), + safe_repr(b_value)) + for k, a_value, b_value in different)) + + if missing: + message.append( + 'Missing entries:\n%s' % ''.join( + ('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing))) + + raise self.failureException('\n'.join(message)) + + def assertDataclassEqual(self, first, second, msg=None): + """Asserts two dataclasses are equal with more informative errors. + + Arguments must both be dataclasses. This compares equality of individual + fields and takes care to not compare fields that are marked as + non-comparable. It gives per field differences, which are easier to parse + than the comparison of the string representations from assertEqual. + + In cases where the dataclass has a custom __eq__, and it is defined in a + way that is inconsistent with equality of comparable fields, we raise an + exception without further trying to figure out how they are different. + + Args: + first: A dataclass, the first value. + second: A dataclass, the second value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dataclasses are not equal. + """ + + if not dataclasses.is_dataclass(first) or isinstance(first, type): + raise self.failureException('First argument is not a dataclass instance.') + if not dataclasses.is_dataclass(second) or isinstance(second, type): + raise self.failureException( + 'Second argument is not a dataclass instance.' + ) + + if first == second: + return + + if type(first) is not type(second): + self.fail( + 'Found different dataclass types: %s != %s' + % (type(first), type(second)), + msg, + ) + + # Make sure to skip fields that are marked compare=False. + different = [ + (f.name, getattr(first, f.name), getattr(second, f.name)) + for f in dataclasses.fields(first) + if f.compare and getattr(first, f.name) != getattr(second, f.name) + ] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + message = ['%s != %s' % (safe_repr(first), safe_repr(second))] + if different: + message.append('Fields that differ:') + message.extend( + '%s: %s != %s' % (k, safe_repr(first_v), safe_repr(second_v)) + for k, first_v, second_v in different + ) + else: + message.append( + 'Cannot detect difference by examining the fields of the dataclass.' + ) + + raise self.fail('\n'.join(message), msg) + + def assertUrlEqual(self, a, b, msg=None): + """Asserts that urls are equal, ignoring ordering of query params.""" + parsed_a = parse.urlparse(a) + parsed_b = parse.urlparse(b) + self.assertEqual(parsed_a.scheme, parsed_b.scheme, msg) + self.assertEqual(parsed_a.netloc, parsed_b.netloc, msg) + self.assertEqual(parsed_a.path, parsed_b.path, msg) + self.assertEqual(parsed_a.fragment, parsed_b.fragment, msg) + self.assertEqual(sorted(parsed_a.params.split(';')), + sorted(parsed_b.params.split(';')), msg) + self.assertDictEqual( + parse.parse_qs(parsed_a.query, keep_blank_values=True), + parse.parse_qs(parsed_b.query, keep_blank_values=True), msg) + + def assertSameStructure(self, a, b, aname='a', bname='b', msg=None): + """Asserts that two values contain the same structural content. + + The two arguments should be data trees consisting of trees of dicts and + lists. They will be deeply compared by walking into the contents of dicts + and lists; other items will be compared using the == operator. + If the two structures differ in content, the failure message will indicate + the location within the structures where the first difference is found. + This may be helpful when comparing large structures. + + Mixed Sequence and Set types are supported. Mixed Mapping types are + supported, but the order of the keys will not be considered in the + comparison. + + Args: + a: The first structure to compare. + b: The second structure to compare. + aname: Variable name to use for the first structure in assertion messages. + bname: Variable name to use for the second structure. + msg: Additional text to include in the failure message. + """ + + # Accumulate all the problems found so we can report all of them at once + # rather than just stopping at the first + problems = [] + + _walk_structure_for_problems(a, b, aname, bname, problems, + self.assertEqual, self.failureException) + + # Avoid spamming the user toooo much + if self.maxDiff is not None: + max_problems_to_show = self.maxDiff // 80 + if len(problems) > max_problems_to_show: + problems = problems[0:max_problems_to_show-1] + ['...'] + + if problems: + self.fail('; '.join(problems), msg) + + def assertJsonEqual(self, first, second, msg=None): + """Asserts that the JSON objects defined in two strings are equal. + + A summary of the differences will be included in the failure message + using assertSameStructure. + + Args: + first: A string containing JSON to decode and compare to second. + second: A string containing JSON to decode and compare to first. + msg: Additional text to include in the failure message. + """ + try: + first_structured = json.loads(first) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode first JSON value %s: %s' % (first, e))) + + try: + second_structured = json.loads(second) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode second JSON value %s: %s' % (second, e))) + + self.assertSameStructure(first_structured, second_structured, + aname='first', bname='second', msg=msg) + + def _getAssertEqualityFunc(self, first, second): + # type: (Any, Any) -> Callable[..., None] + try: + return super(TestCase, self)._getAssertEqualityFunc(first, second) + except AttributeError: + # This is a workaround if unittest.TestCase.__init__ was never run. + # It usually means that somebody created a subclass just for the + # assertions and has overridden __init__. "assertTrue" is a safe + # value that will not make __init__ raise a ValueError. + test_method = getattr(self, '_testMethodName', 'assertTrue') + super(TestCase, self).__init__(test_method) + + return super(TestCase, self)._getAssertEqualityFunc(first, second) + + def fail(self, msg=None, user_msg=None) -> NoReturn: + """Fail immediately with the given standard message and user message.""" + return super(TestCase, self).fail(self._formatMessage(user_msg, msg)) + + +def _sorted_list_difference(expected, actual): + # type: (List[_T], List[_T]) -> Tuple[List[_T], List[_T]] + """Finds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + + Args: + expected: The list we expected. + actual: The list we actually got. + Returns: + (missing, unexpected) + missing: items in expected that are not in actual. + unexpected: items in actual that are not in expected. + """ + i = j = 0 + missing = [] + unexpected = [] + while True: + try: + e = expected[i] + a = actual[j] + if e < a: + missing.append(e) + i += 1 + while expected[i] == e: + i += 1 + elif e > a: + unexpected.append(a) + j += 1 + while actual[j] == a: + j += 1 + else: + i += 1 + try: + while expected[i] == e: + i += 1 + finally: + j += 1 + while actual[j] == a: + j += 1 + except IndexError: + missing.extend(expected[i:]) + unexpected.extend(actual[j:]) + break + return missing, unexpected + + +def _are_both_of_integer_type(a, b): + # type: (object, object) -> bool + return isinstance(a, int) and isinstance(b, int) + + +def _are_both_of_sequence_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Sequence) and isinstance( + b, abc.Sequence) and not isinstance( + a, _TEXT_OR_BINARY_TYPES) and not isinstance(b, _TEXT_OR_BINARY_TYPES) + + +def _are_both_of_set_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Set) and isinstance(b, abc.Set) + + +def _are_both_of_mapping_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Mapping) and isinstance( + b, abc.Mapping) + + +def _walk_structure_for_problems( + a, b, aname, bname, problem_list, leaf_assert_equal_func, failure_exception +): + """The recursive comparison behind assertSameStructure.""" + if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck + _are_both_of_integer_type(a, b) or _are_both_of_sequence_type(a, b) or + _are_both_of_set_type(a, b) or _are_both_of_mapping_type(a, b)): + # We do not distinguish between int and long types as 99.99% of Python 2 + # code should never care. They collapse into a single type in Python 3. + problem_list.append('%s is a %r but %s is a %r' % + (aname, type(a), bname, type(b))) + # If they have different types there's no point continuing + return + + if isinstance(a, abc.Set): + for k in a: + if k not in b: + problem_list.append( + '%s has %r but %s does not' % (aname, k, bname)) + for k in b: + if k not in a: + problem_list.append('%s lacks %r but %s has it' % (aname, k, bname)) + + # NOTE: a or b could be a defaultdict, so we must take care that the traversal + # doesn't modify the data. + elif isinstance(a, abc.Mapping): + for k in a: + if k in b: + _walk_structure_for_problems( + a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k), + problem_list, leaf_assert_equal_func, failure_exception) + else: + problem_list.append( + "%s has [%r] with value %r but it's missing in %s" % + (aname, k, a[k], bname)) + for k in b: + if k not in a: + problem_list.append( + '%s lacks [%r] but %s has it with value %r' % + (aname, k, bname, b[k])) + + # Strings/bytes are Sequences but we'll just do those with regular != + elif (isinstance(a, abc.Sequence) and + not isinstance(a, _TEXT_OR_BINARY_TYPES)): + minlen = min(len(a), len(b)) + for i in range(minlen): + _walk_structure_for_problems( + a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i), + problem_list, leaf_assert_equal_func, failure_exception) + for i in range(minlen, len(a)): + problem_list.append('%s has [%i] with value %r but %s does not' % + (aname, i, a[i], bname)) + for i in range(minlen, len(b)): + problem_list.append('%s lacks [%i] but %s has it with value %r' % + (aname, i, bname, b[i])) + + else: + try: + leaf_assert_equal_func(a, b) + except failure_exception: + problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b)) + + +def get_command_string(command): + """Returns an escaped string that can be used as a shell command. + + Args: + command: List or string representing the command to run. + Returns: + A string suitable for use as a shell command. + """ + if isinstance(command, str): + return command + else: + if os.name == 'nt': + return ' '.join(command) + else: + # The following is identical to Python 3's shlex.quote function. + command_string = '' + for word in command: + # Single quote word, and replace each ' in word with '"'"' + command_string += "'" + word.replace("'", "'\"'\"'") + "' " + return command_string[:-1] + + +def get_command_stderr(command, env=None, close_fds=True): + """Runs the given shell command and returns a tuple. + + Args: + command: List or string representing the command to run. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after forking. + On Windows, this is ignored and close_fds is always False. + + Returns: + Tuple of (exit status, text printed to stdout and stderr by the command). + """ + if env is None: env = {} + if os.name == 'nt': + # Windows does not support setting close_fds to True while also redirecting + # standard handles. + close_fds = False + + use_shell = isinstance(command, str) + process = subprocess.Popen( + command, + close_fds=close_fds, + env=env, + shell=use_shell, + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE) + output = process.communicate()[0] + exit_status = process.wait() + return (exit_status, output) + + +def _quote_long_string(s): + # type: (Union[Text, bytes, bytearray]) -> Text + """Quotes a potentially multi-line string to make the start and end obvious. + + Args: + s: A string. + + Returns: + The quoted string. + """ + if isinstance(s, (bytes, bytearray)): + try: + s = s.decode('utf-8') + except UnicodeDecodeError: + s = str(s) + return ('8<-----------\n' + + s + '\n' + + '----------->8\n') + + +def print_python_version(): + # type: () -> None + # Having this in the test output logs by default helps debugging when all + # you've got is the log and no other idea of which Python was used. + sys.stderr.write('Running tests under Python {0[0]}.{0[1]}.{0[2]}: ' + '{1}\n'.format( + sys.version_info, + sys.executable if sys.executable else 'embedded.')) + + +def main(*args, **kwargs): + # type: (Text, Any) -> None + """Executes a set of Python unit tests. + + Usually this function is called without arguments, so the + unittest.TestProgram instance will get created with the default settings, + so it will run all test methods of all TestCase classes in the ``__main__`` + module. + + Args: + *args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + **kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + print_python_version() + _run_in_app(run_tests, args, kwargs) + + +def _is_in_app_main(): + # type: () -> bool + """Returns True iff app.run is active.""" + f = sys._getframe().f_back # pylint: disable=protected-access + while f: + if f.f_code == app.run.__code__: + return True + f = f.f_back + return False + + +def _register_sigterm_with_faulthandler(): + # type: () -> None + """Have faulthandler dump stacks on SIGTERM. Useful to diagnose timeouts.""" + if getattr(faulthandler, 'register', None): + # faulthandler.register is not available on Windows. + # faulthandler.enable() is already called by app.run. + try: + faulthandler.register(signal.SIGTERM, chain=True) # pytype: disable=module-attr + except Exception as e: # pylint: disable=broad-except + sys.stderr.write('faulthandler.register(SIGTERM) failed ' + '%r; ignoring.\n' % e) + + +def _run_in_app(function, args, kwargs): + # type: (Callable[..., None], Sequence[Text], Mapping[Text, Any]) -> None + """Executes a set of Python unit tests, ensuring app.run. + + This is a private function, users should call absltest.main(). + + _run_in_app calculates argv to be the command-line arguments of this program + (without the flags), sets the default of FLAGS.alsologtostderr to True, + then it calls function(argv, args, kwargs), making sure that `function' + will get called within app.run(). _run_in_app does this by checking whether + it is called by app.run(), or by calling app.run() explicitly. + + The reason why app.run has to be ensured is to make sure that + flags are parsed and stripped properly, and other initializations done by + the app module are also carried out, no matter if absltest.run() is called + from within or outside app.run(). + + If _run_in_app is called from within app.run(), then it will reparse + sys.argv and pass the result without command-line flags into the argv + argument of `function'. The reason why this parsing is needed is that + __main__.main() calls absltest.main() without passing its argv. So the + only way _run_in_app could get to know the argv without the flags is that + it reparses sys.argv. + + _run_in_app changes the default of FLAGS.alsologtostderr to True so that the + test program's stderr will contain all the log messages unless otherwise + specified on the command-line. This overrides any explicit assignment to + FLAGS.alsologtostderr by the test program prior to the call to _run_in_app() + (e.g. in __main__.main). + + Please note that _run_in_app (and the function it calls) is allowed to make + changes to kwargs. + + Args: + function: absltest.run_tests or a similar function. It will be called as + function(argv, args, kwargs) where argv is a list containing the + elements of sys.argv without the command-line flags. + args: Positional arguments passed through to unittest.TestProgram.__init__. + kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. + """ + if _is_in_app_main(): + _register_sigterm_with_faulthandler() + + # Change the default of alsologtostderr from False to True, so the test + # programs's stderr will contain all the log messages. + # If --alsologtostderr=false is specified in the command-line, or user + # has called FLAGS.alsologtostderr = False before, then the value is kept + # False. + FLAGS.set_default('alsologtostderr', True) + + # Here we only want to get the `argv` without the flags. To avoid any + # side effects of parsing flags, we temporarily stub out the `parse` method + stored_parse_methods = {} + noop_parse = lambda _: None + for name in FLAGS: + # Avoid any side effects of parsing flags. + stored_parse_methods[name] = FLAGS[name].parse + # This must be a separate loop since multiple flag names (short_name=) can + # point to the same flag object. + for name in FLAGS: + FLAGS[name].parse = noop_parse + try: + argv = FLAGS(sys.argv) + finally: + for name in FLAGS: + FLAGS[name].parse = stored_parse_methods[name] + sys.stdout.flush() + + function(argv, args, kwargs) + else: + # Send logging to stderr. Use --alsologtostderr instead of --logtostderr + # in case tests are reading their own logs. + FLAGS.set_default('alsologtostderr', True) + + def main_function(argv): + _register_sigterm_with_faulthandler() + function(argv, args, kwargs) + + app.run(main=main_function) + + +def _is_suspicious_attribute(testCaseClass, name): + # type: (Type, Text) -> bool + """Returns True if an attribute is a method named like a test method.""" + if name.startswith('Test') and len(name) > 4 and name[4].isupper(): + attr = getattr(testCaseClass, name) + if inspect.isfunction(attr) or inspect.ismethod(attr): + args = inspect.getfullargspec(attr) + return (len(args.args) == 1 and args.args[0] == 'self' and + args.varargs is None and args.varkw is None and + not args.kwonlyargs) + return False + + +def skipThisClass(reason): + # type: (Text) -> Callable[[_T], _T] + """Skip tests in the decorated TestCase, but not any of its subclasses. + + This decorator indicates that this class should skip all its tests, but not + any of its subclasses. Useful for if you want to share testMethod or setUp + implementations between a number of concrete testcase classes. + + Example usage, showing how you can share some common test methods between + subclasses. In this example, only ``BaseTest`` will be marked as skipped, and + not RealTest or SecondRealTest:: + + @absltest.skipThisClass("Shared functionality") + class BaseTest(absltest.TestCase): + def test_simple_functionality(self): + self.assertEqual(self.system_under_test.method(), 1) + + class RealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(argument) + + def test_specific_behavior(self): + ... + + class SecondRealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(other_arguments) + + def test_other_behavior(self): + ... + + Args: + reason: The reason we have a skip in place. For instance: 'shared test + methods' or 'shared assertion methods'. + + Returns: + Decorator function that will cause a class to be skipped. + """ + if isinstance(reason, type): + raise TypeError('Got {!r}, expected reason as string'.format(reason)) + + def _skip_class(test_case_class): + if not issubclass(test_case_class, unittest.TestCase): + raise TypeError( + 'Decorating {!r}, expected TestCase subclass'.format(test_case_class)) + + # Only shadow the setUpClass method if it is directly defined. If it is + # in the parent class we invoke it via a super() call instead of holding + # a reference to it. + shadowed_setupclass = test_case_class.__dict__.get('setUpClass', None) + + @classmethod + def replacement_setupclass(cls, *args, **kwargs): + # Skip this class if it is the one that was decorated with @skipThisClass + if cls is test_case_class: + raise SkipTest(reason) + if shadowed_setupclass: + # Pass along `cls` so the MRO chain doesn't break. + # The original method is a `classmethod` descriptor, which can't + # be directly called, but `__func__` has the underlying function. + return shadowed_setupclass.__func__(cls, *args, **kwargs) + else: + # Because there's no setUpClass() defined directly on test_case_class, + # we call super() ourselves to continue execution of the inheritance + # chain. + return super(test_case_class, cls).setUpClass(*args, **kwargs) + + test_case_class.setUpClass = replacement_setupclass + return test_case_class + + return _skip_class + + +class TestLoader(unittest.TestLoader): + """A test loader which supports common test features. + + Supported features include: + * Banning untested methods with test-like names: methods attached to this + testCase with names starting with `Test` are ignored by the test runner, + and often represent mistakenly-omitted test cases. This loader will raise + a TypeError when attempting to load a TestCase with such methods. + * Randomization of test case execution order (optional). + """ + + _ERROR_MSG = textwrap.dedent("""Method '%s' is named like a test case but + is not one. This is often a bug. If you want it to be a test method, + name it with 'test' in lowercase. If not, rename the method to not begin + with 'Test'.""") + + def __init__(self, *args, **kwds): + super(TestLoader, self).__init__(*args, **kwds) + seed = _get_default_randomize_ordering_seed() + if seed: + self._randomize_ordering_seed = seed + self._random = random.Random(self._randomize_ordering_seed) + else: + self._randomize_ordering_seed = None + self._random = None + + def getTestCaseNames(self, testCaseClass): # pylint:disable=invalid-name + """Validates and returns a (possibly randomized) list of test case names.""" + for name in dir(testCaseClass): + if _is_suspicious_attribute(testCaseClass, name): + raise TypeError(TestLoader._ERROR_MSG % name) + names = list(super(TestLoader, self).getTestCaseNames(testCaseClass)) + if self._randomize_ordering_seed is not None: + logging.info( + 'Randomizing test order with seed: %d', self._randomize_ordering_seed) + logging.info( + 'To reproduce this order, re-run with ' + '--test_randomize_ordering_seed=%d', self._randomize_ordering_seed) + self._random.shuffle(names) + return names + + +def get_default_xml_output_filename(): + # type: () -> Optional[Text] + if os.environ.get('XML_OUTPUT_FILE'): + return os.environ['XML_OUTPUT_FILE'] + elif os.environ.get('RUNNING_UNDER_TEST_DAEMON'): + return os.path.join(os.path.dirname(TEST_TMPDIR.value), 'test_detail.xml') + elif os.environ.get('TEST_XMLOUTPUTDIR'): + return os.path.join( + os.environ['TEST_XMLOUTPUTDIR'], + os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.xml') + + +def _setup_filtering(argv: MutableSequence[str]) -> bool: + """Implements the bazel test filtering protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_ONLY: string, if set, is forwarded to the unittest + framework to use as a test filter. Its value is split with shlex, then: + 1. On Python 3.6 and before, split values are passed as positional + arguments on argv. + 2. On Python 3.7+, split values are passed to unittest's `-k` flag. Tests + are matched by glob patterns or substring. See + https://docs.python.org/3/library/unittest.html#cmdoption-unittest-k + + Args: + argv: the argv to mutate in-place. + + Returns: + Whether test filtering is requested. + """ + test_filter = os.environ.get('TESTBRIDGE_TEST_ONLY') + if argv is None or not test_filter: + return False + + filters = shlex.split(test_filter) + if sys.version_info[:2] >= (3, 7): + filters = ['-k=' + test_filter for test_filter in filters] + + argv[1:1] = filters + return True + + +def _setup_test_runner_fail_fast(argv): + # type: (MutableSequence[Text]) -> None + """Implements the bazel test fail fast protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_RUNNER_FAIL_FAST=<1|0> + + If set to 1, --failfast is passed to the unittest framework to return upon + first failure. + + Args: + argv: the argv to mutate in-place. + """ + + if argv is None: + return + + if os.environ.get('TESTBRIDGE_TEST_RUNNER_FAIL_FAST') != '1': + return + + argv[1:1] = ['--failfast'] + + +def _setup_sharding( + custom_loader: Optional[unittest.TestLoader] = None, +) -> Tuple[unittest.TestLoader, Optional[int]]: + """Implements the bazel sharding protocol. + + The following environment variables are used in this method: + + TEST_SHARD_STATUS_FILE: string, if set, points to a file. We write a blank + file to tell the test runner that this test implements the test sharding + protocol. + + TEST_TOTAL_SHARDS: int, if set, sharding is requested. + + TEST_SHARD_INDEX: int, must be set if TEST_TOTAL_SHARDS is set. Specifies + the shard index for this instance of the test process. Must satisfy: + 0 <= TEST_SHARD_INDEX < TEST_TOTAL_SHARDS. + + Args: + custom_loader: A TestLoader to be made sharded. + + Returns: + A tuple of ``(test_loader, shard_index)``. ``test_loader`` is for + shard-filtering or the standard test loader depending on the sharding + environment variables. ``shard_index`` is the shard index, or ``None`` when + sharding is not used. + """ + + # It may be useful to write the shard file even if the other sharding + # environment variables are not set. Test runners may use this functionality + # to query whether a test binary implements the test sharding protocol. + if 'TEST_SHARD_STATUS_FILE' in os.environ: + try: + with open(os.environ['TEST_SHARD_STATUS_FILE'], 'w') as f: + f.write('') + except IOError: + sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.' + % os.environ['TEST_SHARD_STATUS_FILE']) + sys.exit(1) + + base_loader = custom_loader or TestLoader() + if 'TEST_TOTAL_SHARDS' not in os.environ: + # Not using sharding, use the expected test loader. + return base_loader, None + + total_shards = int(os.environ['TEST_TOTAL_SHARDS']) + shard_index = int(os.environ['TEST_SHARD_INDEX']) + + if shard_index < 0 or shard_index >= total_shards: + sys.stderr.write('ERROR: Bad sharding values. index=%d, total=%d\n' % + (shard_index, total_shards)) + sys.exit(1) + + # Replace the original getTestCaseNames with one that returns + # the test case names for this shard. + delegate_get_names = base_loader.getTestCaseNames + + bucket_iterator = itertools.cycle(range(total_shards)) + + def getShardedTestCaseNames(testCaseClass): + filtered_names = [] + # We need to sort the list of tests in order to determine which tests this + # shard is responsible for; however, it's important to preserve the order + # returned by the base loader, e.g. in the case of randomized test ordering. + ordered_names = delegate_get_names(testCaseClass) + for testcase in sorted(ordered_names): + bucket = next(bucket_iterator) + if bucket == shard_index: + filtered_names.append(testcase) + return [x for x in ordered_names if x in filtered_names] + + base_loader.getTestCaseNames = getShardedTestCaseNames + return base_loader, shard_index + + +def _run_and_get_tests_result( + argv: MutableSequence[str], + args: Sequence[Any], + kwargs: MutableMapping[str, Any], + xml_test_runner_class: Type[unittest.TextTestRunner], +) -> Tuple[unittest.TestResult, bool]: + """Same as run_tests, but it doesn't exit. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + xml_test_runner_class: The type of the test runner class. + + Returns: + A tuple of ``(test_result, fail_when_no_tests_ran)``. + ``fail_when_no_tests_ran`` indicates whether the test should fail when + no tests ran. + """ + + # The entry from kwargs overrides argv. + argv = kwargs.pop('argv', argv) + + if sys.version_info[:2] >= (3, 12): + # Python 3.12 unittest changed the behavior from PASS to FAIL in + # https://github.com/python/cpython/pull/102051. absltest follows this. + fail_when_no_tests_ran = True + else: + # Historically, absltest and unittest before Python 3.12 passes if no tests + # ran. + fail_when_no_tests_ran = False + + # Set up test filtering if requested in environment. + if _setup_filtering(argv): + # When test filtering is requested, ideally we also want to fail when no + # tests ran. However, the test filters are usually done when running bazel. + # When you run multiple targets, e.g. `bazel test //my_dir/... + # --test_filter=MyTest`, you don't necessarily want individual tests to fail + # because no tests match in that particular target. + # Due to this use case, we don't fail when test filtering is requested via + # the environment variable from bazel. + fail_when_no_tests_ran = False + + # Set up --failfast as requested in environment + _setup_test_runner_fail_fast(argv) + + # Shard the (default or custom) loader if sharding is turned on. + kwargs['testLoader'], shard_index = _setup_sharding( + kwargs.get('testLoader', None) + ) + if shard_index is not None and shard_index > 0: + # When sharding is requested, all the shards except the first one shall not + # fail when no tests ran. This happens when the shard count is greater than + # the test case count. + fail_when_no_tests_ran = False + + # XML file name is based upon (sorted by priority): + # --xml_output_file flag, XML_OUTPUT_FILE variable, + # TEST_XMLOUTPUTDIR variable or RUNNING_UNDER_TEST_DAEMON variable. + if not FLAGS.xml_output_file: + FLAGS.xml_output_file = get_default_xml_output_filename() + xml_output_file = FLAGS.xml_output_file + + xml_buffer = None + if xml_output_file: + xml_output_dir = os.path.dirname(xml_output_file) + if xml_output_dir and not os.path.isdir(xml_output_dir): + try: + os.makedirs(xml_output_dir) + except OSError as e: + # File exists error can occur with concurrent tests + if e.errno != errno.EEXIST: + raise + # Fail early if we can't write to the XML output file. This is so that we + # don't waste people's time running tests that will just fail anyways. + with _open(xml_output_file, 'w'): + pass + + # We can reuse testRunner if it supports XML output (e. g. by inheriting + # from xml_reporter.TextAndXMLTestRunner). Otherwise we need to use + # xml_reporter.TextAndXMLTestRunner. + if (kwargs.get('testRunner') is not None + and not hasattr(kwargs['testRunner'], 'set_default_xml_stream')): + sys.stderr.write('WARNING: XML_OUTPUT_FILE or --xml_output_file setting ' + 'overrides testRunner=%r setting (possibly from --pdb)' + % (kwargs['testRunner'])) + # Passing a class object here allows TestProgram to initialize + # instances based on its kwargs and/or parsed command-line args. + kwargs['testRunner'] = xml_test_runner_class + if kwargs.get('testRunner') is None: + kwargs['testRunner'] = xml_test_runner_class + # Use an in-memory buffer (not backed by the actual file) to store the XML + # report, because some tools modify the file (e.g., create a placeholder + # with partial information, in case the test process crashes). + xml_buffer = io.StringIO() + kwargs['testRunner'].set_default_xml_stream(xml_buffer) # pytype: disable=attribute-error + + # If we've used a seed to randomize test case ordering, we want to record it + # as a top-level attribute in the `testsuites` section of the XML output. + randomize_ordering_seed = getattr( + kwargs['testLoader'], '_randomize_ordering_seed', None) + setter = getattr(kwargs['testRunner'], 'set_testsuites_property', None) + if randomize_ordering_seed and setter: + setter('test_randomize_ordering_seed', randomize_ordering_seed) + elif kwargs.get('testRunner') is None: + kwargs['testRunner'] = _pretty_print_reporter.TextTestRunner + + if FLAGS.pdb_post_mortem: + runner = kwargs['testRunner'] + # testRunner can be a class or an instance, which must be tested for + # differently. + # Overriding testRunner isn't uncommon, so only enable the debugging + # integration if the runner claims it does; we don't want to accidentally + # clobber something on the runner. + if ((isinstance(runner, type) and + issubclass(runner, _pretty_print_reporter.TextTestRunner)) or + isinstance(runner, _pretty_print_reporter.TextTestRunner)): + runner.run_for_debugging = True + + # Make sure tmpdir exists. + if not os.path.isdir(TEST_TMPDIR.value): + try: + os.makedirs(TEST_TMPDIR.value) + except OSError as e: + # Concurrent test might have created the directory. + if e.errno != errno.EEXIST: + raise + + # Let unittest.TestProgram.__init__ do its own argv parsing, e.g. for '-v', + # on argv, which is sys.argv without the command-line flags. + kwargs['argv'] = argv + + # Request unittest.TestProgram to not exit. The exit will be handled by + # `absltest.run_tests`. + kwargs['exit'] = False + + try: + test_program = unittest.TestProgram(*args, **kwargs) + return test_program.result, fail_when_no_tests_ran + finally: + if xml_buffer: + try: + with _open(xml_output_file, 'w') as f: + f.write(xml_buffer.getvalue()) + finally: + xml_buffer.close() + + +def run_tests( + argv: MutableSequence[Text], + args: Sequence[Any], + kwargs: MutableMapping[Text, Any], +) -> None: + """Executes a set of Python unit tests. + + Most users should call absltest.main() instead of run_tests. + + Please note that run_tests should be called from app.run. + Calling absltest.main() would ensure that. + + Please note that run_tests is allowed to make changes to kwargs. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + result, fail_when_no_tests_ran = _run_and_get_tests_result( + argv, args, kwargs, xml_reporter.TextAndXMLTestRunner + ) + if fail_when_no_tests_ran and result.testsRun == 0 and not result.skipped: + # Python 3.12 unittest exits with 5 when no tests ran. The exit code 5 comes + # from pytest which does the same thing. + sys.exit(5) + sys.exit(not result.wasSuccessful()) + + +def _rmtree_ignore_errors(path): + # type: (Text) -> None + if os.path.isfile(path): + try: + os.unlink(path) + except OSError: + pass + else: + shutil.rmtree(path, ignore_errors=True) + + +def _get_first_part(path): + # type: (Text) -> Text + parts = path.split(os.sep, 1) + return parts[0] diff --git a/MLPY/Lib/site-packages/absl/testing/flagsaver.py b/MLPY/Lib/site-packages/absl/testing/flagsaver.py new file mode 100644 index 0000000000000000000000000000000000000000..7df072292674f14d45b668fe4c36d61208697479 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/flagsaver.py @@ -0,0 +1,386 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Decorator and context manager for saving and restoring flag values. + +There are many ways to save and restore. Always use the most convenient method +for a given use case. + +Here are examples of each method. They all call ``do_stuff()`` while +``FLAGS.someflag`` is temporarily set to ``'foo'``:: + + from absl.testing import flagsaver + + # Use a decorator which can optionally override flags via arguments. + @flagsaver.flagsaver(someflag='foo') + def some_func(): + do_stuff() + + # Use a decorator which can optionally override flags with flagholders. + @flagsaver.flagsaver((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, 23)) + def some_func(): + do_stuff() + + # Use a decorator which does not override flags itself. + @flagsaver.flagsaver + def some_func(): + FLAGS.someflag = 'foo' + do_stuff() + + # Use a context manager which can optionally override flags via arguments. + with flagsaver.flagsaver(someflag='foo'): + do_stuff() + + # Save and restore the flag values yourself. + saved_flag_values = flagsaver.save_flag_values() + try: + FLAGS.someflag = 'foo' + do_stuff() + finally: + flagsaver.restore_flag_values(saved_flag_values) + + # Use the parsing version to emulate users providing the flags. + # Note that all flags must be provided as strings (unparsed). + @flagsaver.as_parsed(some_int_flag='123') + def some_func(): + # Because the flag was parsed it is considered "present". + assert FLAGS.some_int_flag.present + do_stuff() + + # flagsaver.as_parsed() can also be used as a context manager just like + # flagsaver.flagsaver() + with flagsaver.as_parsed(some_int_flag='123'): + do_stuff() + + # The flagsaver.as_parsed() interface also supports FlagHolder objects. + @flagsaver.as_parsed((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, '23')) + def some_func(): + do_stuff() + + # Using as_parsed with a multi_X flag requires a sequence of strings. + @flagsaver.as_parsed(some_multi_int_flag=['123', '456']) + def some_func(): + assert FLAGS.some_multi_int_flag.present + do_stuff() + + # If a flag name includes non-identifier characters it can be specified like + # so: + @flagsaver.as_parsed(**{'i-like-dashes': 'true'}) + def some_func(): + do_stuff() + +We save and restore a shallow copy of each Flag object's ``__dict__`` attribute. +This preserves all attributes of the flag, such as whether or not it was +overridden from its default value. + +WARNING: Currently a flag that is saved and then deleted cannot be restored. An +exception will be raised. However if you *add* a flag after saving flag values, +and then restore flag values, the added flag will be deleted with no errors. +""" + +import collections +import functools +import inspect +from typing import overload, Any, Callable, Mapping, Tuple, TypeVar, Type, Sequence, Union + +from absl import flags + +FLAGS = flags.FLAGS + + +# The type of pre/post wrapped functions. +_CallableT = TypeVar('_CallableT', bound=Callable) + + +@overload +def flagsaver(*args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def flagsaver(func: _CallableT) -> _CallableT: + ... + + +def flagsaver(*args, **kwargs): + """The main flagsaver interface. See module doc for usage.""" + return _construct_overrider(_FlagOverrider, *args, **kwargs) + + +@overload +def as_parsed(*args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def as_parsed(func: _CallableT) -> _CallableT: + ... + + +def as_parsed(*args, **kwargs): + """Overrides flags by parsing strings, saves flag state similar to flagsaver. + + This function can be used as either a decorator or context manager similar to + flagsaver.flagsaver(). However, where flagsaver.flagsaver() directly sets the + flags to new values, this function will parse the provided arguments as if + they were provided on the command line. Among other things, this will cause + `FLAGS['flag_name'].present == True`. + + A note on unparsed input: For many flag types, the unparsed version will be + a single string. However for multi_x (multi_string, multi_integer, multi_enum) + the unparsed version will be a Sequence of strings. + + Args: + *args: Tuples of FlagHolders and their unparsed value. + **kwargs: The keyword args are flag names, and the values are unparsed + values. + + Returns: + _ParsingFlagOverrider that serves as a context manager or decorator. Will + save previous flag state and parse new flags, then on cleanup it will + restore the previous flag state. + """ + return _construct_overrider(_ParsingFlagOverrider, *args, **kwargs) + + +# NOTE: the order of these overload declarations matters. The type checker will +# pick the first match which could be incorrect. +@overload +def _construct_overrider( + flag_overrider_cls: Type['_ParsingFlagOverrider'], + *args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + *args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + func: _CallableT) -> _CallableT: + ... + + +def _construct_overrider(flag_overrider_cls, *args, **kwargs): + """Handles the args/kwargs returning an instance of flag_overrider_cls. + + If flag_overrider_cls is _FlagOverrider then values should be native python + types matching the python types. Otherwise if flag_overrider_cls is + _ParsingFlagOverrider the values should be strings or sequences of strings. + + Args: + flag_overrider_cls: The class that will do the overriding. + *args: Tuples of FlagHolder and the new flag value. + **kwargs: Keword args mapping flag name to new flag value. + + Returns: + A _FlagOverrider to be used as a decorator or context manager. + """ + if not args: + return flag_overrider_cls(**kwargs) + # args can be [func] if used as `@flagsaver` instead of `@flagsaver(...)` + if len(args) == 1 and callable(args[0]): + if kwargs: + raise ValueError( + "It's invalid to specify both positional and keyword parameters.") + func = args[0] + if inspect.isclass(func): + raise TypeError('@flagsaver.flagsaver cannot be applied to a class.') + return _wrap(flag_overrider_cls, func, {}) + # args can be a list of (FlagHolder, value) pairs. + # In which case they augment any specified kwargs. + for arg in args: + if not isinstance(arg, tuple) or len(arg) != 2: + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + holder, value = arg + if not isinstance(holder, flags.FlagHolder): + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + if holder.name in kwargs: + raise ValueError('Cannot set --%s multiple times' % holder.name) + kwargs[holder.name] = value + return flag_overrider_cls(**kwargs) + + +def save_flag_values( + flag_values: flags.FlagValues = FLAGS) -> Mapping[str, Mapping[str, Any]]: + """Returns copy of flag values as a dict. + + Args: + flag_values: FlagValues, the FlagValues instance with which the flag will be + saved. This should almost never need to be overridden. + + Returns: + Dictionary mapping keys to values. Keys are flag names, values are + corresponding ``__dict__`` members. E.g. ``{'key': value_dict, ...}``. + """ + return {name: _copy_flag_dict(flag_values[name]) for name in flag_values} + + +def restore_flag_values(saved_flag_values: Mapping[str, Mapping[str, Any]], + flag_values: flags.FlagValues = FLAGS): + """Restores flag values based on the dictionary of flag values. + + Args: + saved_flag_values: {'flag_name': value_dict, ...} + flag_values: FlagValues, the FlagValues instance from which the flag will be + restored. This should almost never need to be overridden. + """ + new_flag_names = list(flag_values) + for name in new_flag_names: + saved = saved_flag_values.get(name) + if saved is None: + # If __dict__ was not saved delete "new" flag. + delattr(flag_values, name) + else: + if flag_values[name].value != saved['_value']: + flag_values[name].value = saved['_value'] # Ensure C++ value is set. + flag_values[name].__dict__ = saved + + +@overload +def _wrap(flag_overrider_cls: Type['_FlagOverrider'], func: _CallableT, + overrides: Mapping[str, Any]) -> _CallableT: + ... + + +@overload +def _wrap(flag_overrider_cls: Type['_ParsingFlagOverrider'], func: _CallableT, + overrides: Mapping[str, Union[str, Sequence[str]]]) -> _CallableT: + ... + + +def _wrap(flag_overrider_cls, func, overrides): + """Creates a wrapper function that saves/restores flag values. + + Args: + flag_overrider_cls: The class that will be used as a context manager. + func: This will be called between saving flags and restoring flags. + overrides: Flag names mapped to their values. These flags will be set after + saving the original flag state. The type of the values depends on if + _FlagOverrider or _ParsingFlagOverrider was specified. + + Returns: + A wrapped version of func. + """ + + @functools.wraps(func) + def _flagsaver_wrapper(*args, **kwargs): + """Wrapper function that saves and restores flags.""" + with flag_overrider_cls(**overrides): + return func(*args, **kwargs) + + return _flagsaver_wrapper + + +class _FlagOverrider(object): + """Overrides flags for the duration of the decorated function call. + + It also restores all original values of flags after decorated method + completes. + """ + + def __init__(self, **overrides: Any): + self._overrides = overrides + self._saved_flag_values = None + + def __call__(self, func: _CallableT) -> _CallableT: + if inspect.isclass(func): + raise TypeError('flagsaver cannot be applied to a class.') + return _wrap(self.__class__, func, self._overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + FLAGS._set_attributes(**self._overrides) + except: + # It may fail because of flag validators. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + def __exit__(self, exc_type, exc_value, traceback): + restore_flag_values(self._saved_flag_values, FLAGS) + + +class _ParsingFlagOverrider(_FlagOverrider): + """Context manager for overriding flags. + + Simulates command line parsing. + + This is simlar to _FlagOverrider except that all **overrides should be + strings or sequences of strings, and when context is entered this class calls + .parse(value) + + This results in the flags having .present set properly. + """ + + def __init__(self, **overrides: Union[str, Sequence[str]]): + for flag_name, new_value in overrides.items(): + if isinstance(new_value, str): + continue + if (isinstance(new_value, collections.abc.Sequence) and + all(isinstance(single_value, str) for single_value in new_value)): + continue + raise TypeError( + f'flagsaver.as_parsed() cannot parse {flag_name}. Expected a single ' + f'string or sequence of strings but {type(new_value)} was provided.') + super().__init__(**overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + for flag_name, unparsed_value in self._overrides.items(): + # LINT.IfChange(flag_override_parsing) + FLAGS[flag_name].parse(unparsed_value) + FLAGS[flag_name].using_default_value = False + # LINT.ThenChange() + + # Perform the validation on all modified flags. This is something that + # FLAGS._set_attributes() does for you in _FlagOverrider. + for flag_name in self._overrides: + FLAGS._assert_validators(FLAGS[flag_name].validators) + + except KeyError as e: + # If a flag doesn't exist, an UnrecognizedFlagError is more specific. + restore_flag_values(self._saved_flag_values, FLAGS) + raise flags.UnrecognizedFlagError('Unknown command line flag.') from e + + except: + # It may fail because of flag validators or general parsing issues. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + +def _copy_flag_dict(flag: flags.Flag) -> Mapping[str, Any]: + """Returns a copy of the flag object's ``__dict__``. + + It's mostly a shallow copy of the ``__dict__``, except it also does a shallow + copy of the validator list. + + Args: + flag: flags.Flag, the flag to copy. + + Returns: + A copy of the flag object's ``__dict__``. + """ + copy = flag.__dict__.copy() + copy['_value'] = flag.value # Ensure correct restore for C++ flags. + copy['validators'] = list(flag.validators) + return copy diff --git a/MLPY/Lib/site-packages/absl/testing/parameterized.py b/MLPY/Lib/site-packages/absl/testing/parameterized.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d2c2b51f83358d6e2c83531226c7ea07f526fe --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/parameterized.py @@ -0,0 +1,724 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters):: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name and the parameters for that test. + +The id method of the test, used internally by the unittest framework, is also +modified to show the arguments (but note that the name reported by `id()` +doesn't match the actual test name, see below). To make sure that test names +stay the same across several invocations, object representations like:: + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into ``__main__.Foo``. When selecting a subset of test cases to run +on the command-line, the test cases contain an index suffix for each argument +in the order they were passed to :func:`parameters` (eg. testAddition0, +testAddition1, etc.) This naming scheme is subject to change; for more reliable +and stable names, especially in test logs, use :func:`named_parameters` instead. + +Tests using :func:`named_parameters` are similar to :func:`parameters`, except +only tuples or dicts of args are supported. For tuples, the first parameter arg +has to be a string (or an object that returns an apt name when converted via +``str()``). For dicts, a value for the key ``testcase_name`` must be present and +must be a string (or an object that returns an apt name when converted via +``str()``):: + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + {'testcase_name': 'Normal', + 'result': True, 'string': 'aaa', 'prefix': 'aa'}, + {'testcase_name': 'EmptyPrefix', + 'result': True, 'string': 'abc', 'prefix': ''}, + {'testcase_name': 'BothEmpty', + 'result': True, 'string': '', 'prefix': ''}) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line:: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== + +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated:: + + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg1, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== + +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ + +If a test method takes only one argument, the single arguments must not be +wrapped into a tuple:: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) + + +List/tuple as a Single Argument +=============================== + +If a test method takes a single argument of a list/tuple, it must be wrapped +inside a tuple:: + + class ZeroSumExample(parameterized.TestCase): + @parameterized.parameters( + ([-1, 0, 1], ), + ([-2, 0, 2], ), + ) + def testSumIsZero(self, arg): + self.assertEqual(0, sum(arg)) + + +Cartesian product of Parameter Values as Parameterized Test Cases +================================================================= + +If required to test method over a cartesian product of parameters, +`parameterized.product` may be used to facilitate generation of parameters +test combinations:: + + class TestModuloExample(parameterized.TestCase): + @parameterized.product( + num=[0, 20, 80], + modulo=[2, 4], + expected=[0] + ) + def testModuloResult(self, num, modulo, expected): + self.assertEqual(expected, num % modulo) + +This results in 6 test cases being created - one for each combination of the +parameters. It is also possible to supply sequences of keyword argument dicts +as elements of the cartesian product:: + + @parameterized.product( + (dict(num=5, modulo=3, expected=2), + dict(num=7, modulo=4, expected=3)), + dtype=(int, float) + ) + def testModuloResult(self, num, modulo, expected, dtype): + self.assertEqual(expected, dtype(num) % modulo) + +This results in 4 test cases being created - for each of the two sets of test +data (supplied as kwarg dicts) and for each of the two data types (supplied as +a named parameter). Multiple keyword argument dicts may be supplied if required. + +Async Support +============= + +If a test needs to call async functions, it can inherit from both +parameterized.TestCase and another TestCase that supports async calls, such +as [asynctest](https://github.com/Martiusweb/asynctest):: + + import asynctest + + class AsyncExample(parameterized.TestCase, asynctest.TestCase): + @parameterized.parameters( + ('a', 1), + ('b', 2), + ) + async def testSomeAsyncFunction(self, arg, expected): + actual = await someAsyncFunction(arg) + self.assertEqual(actual, expected) +""" + +from collections import abc +import functools +import inspect +import itertools +import re +import types +import unittest +import warnings + +from absl.testing import absltest + + +_ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_NAMED = object() +_ARGUMENT_REPR = object() +_NAMED_DICT_KEY = 'testcase_name' + + +class NoTestsError(Exception): + """Raised when parameterized decorators do not generate any tests.""" + + +class DuplicateTestNameError(Exception): + """Raised when a parameterized test has the same test name multiple times.""" + + def __init__(self, test_class_name, new_test_name, original_test_name): + super(DuplicateTestNameError, self).__init__( + 'Duplicate parameterized test name in {}: generated test name {!r} ' + '(generated from {!r}) already exists. Consider using ' + 'named_parameters() to give your tests unique names and/or renaming ' + 'the conflicting test method.'.format( + test_class_name, new_test_name, original_test_name)) + + +def _clean_repr(obj): + return _ADDR_RE.sub(r'<\1>', repr(obj)) + + +def _non_string_or_bytes_iterable(obj): + return (isinstance(obj, abc.Iterable) and not isinstance(obj, str) and + not isinstance(obj, bytes)) + + +def _format_parameter_list(testcase_params): + if isinstance(testcase_params, abc.Mapping): + return ', '.join('%s=%s' % (argname, _clean_repr(value)) + for argname, value in testcase_params.items()) + elif _non_string_or_bytes_iterable(testcase_params): + return ', '.join(map(_clean_repr, testcase_params)) + else: + return _format_parameter_list((testcase_params,)) + + +def _async_wrapped(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + return await func(*args, **kwargs) + return wrapper + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type, original_name=None): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter tuples/dicts for + individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + original_name: The original test method name. When decorated on a test + method, None is passed to __init__ and test_method.__name__ is used. + Note test_method.__name__ might be different than the original defined + test method because of the use of other decorators. A more accurate + value is set by TestGeneratorMetaclass.__new__ later. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + if original_name is None: + original_name = test_method.__name__ + self._original_name = original_name + self.__name__ = _ParameterizedTestIter.__name__ + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run. You may also ' + 'be using another decorator before the parameterized ' + 'one, in which case you should reverse the order.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def make_bound_param_test(testcase_params): + @functools.wraps(test_method) + def bound_param_test(self): + if isinstance(testcase_params, abc.Mapping): + return test_method(self, **testcase_params) + elif _non_string_or_bytes_iterable(testcase_params): + return test_method(self, *testcase_params) + else: + return test_method(self, testcase_params) + + if naming_type is _NAMED: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + bound_param_test.__x_use_name__ = True + + testcase_name = None + if isinstance(testcase_params, abc.Mapping): + if _NAMED_DICT_KEY not in testcase_params: + raise RuntimeError( + 'Dict for named tests must contain key "%s"' % _NAMED_DICT_KEY) + # Create a new dict to avoid modifying the supplied testcase_params. + testcase_name = testcase_params[_NAMED_DICT_KEY] + testcase_params = { + k: v for k, v in testcase_params.items() if k != _NAMED_DICT_KEY + } + elif _non_string_or_bytes_iterable(testcase_params): + if not isinstance(testcase_params[0], str): + raise RuntimeError( + 'The first element of named test parameters is the test name ' + 'suffix and must be a string') + testcase_name = testcase_params[0] + testcase_params = testcase_params[1:] + else: + raise RuntimeError( + 'Named tests must be passed a dict or non-string iterable.') + + test_method_name = self._original_name + # Support PEP-8 underscore style for test naming if used. + if (test_method_name.startswith('test_') + and testcase_name + and not testcase_name.startswith('_')): + test_method_name += '_' + + bound_param_test.__name__ = test_method_name + str(testcase_name) + elif naming_type is _ARGUMENT_REPR: + # If it's a generator, convert it to a tuple and treat them as + # parameters. + if isinstance(testcase_params, types.GeneratorType): + testcase_params = tuple(testcase_params) + # The metaclass creates a unique, but non-descriptive method name for + # _ARGUMENT_REPR tests using an indexed suffix. + # To keep test names descriptive, only the original method name is used. + # To make sure test names are unique, we add a unique descriptive suffix + # __x_params_repr__ for every test. + params_repr = '(%s)' % (_format_parameter_list(testcase_params),) + bound_param_test.__x_params_repr__ = params_repr + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + bound_param_test.__doc__ = '%s(%s)' % ( + bound_param_test.__name__, _format_parameter_list(testcase_params)) + if test_method.__doc__: + bound_param_test.__doc__ += '\n%s' % (test_method.__doc__,) + if inspect.iscoroutinefunction(test_method): + return _async_wrapped(bound_param_test) + return bound_param_test + + return (make_bound_param_test(c) for c in self.testcases) + + +def _modify_class(class_object, testcases, naming_type): + assert not getattr(class_object, '_test_params_reprs', None), ( + 'Cannot add parameters to %s. Either it already has parameterized ' + 'methods, or its super class is also a parameterized class.' % ( + class_object,)) + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + class_object._test_params_reprs = test_params_reprs = {} + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _update_class_dict_for_param_test_case( + class_object.__name__, methods, test_params_reprs, name, + _ParameterizedTestIter(obj, testcases, naming_type, name)) + for meth_name, meth in methods.items(): + setattr(class_object, meth_name, meth) + + +def _parameter_decorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A function for modifying the decorated object. + """ + def _apply(obj): + if isinstance(obj, type): + _modify_class(obj, testcases, naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if (len(testcases) == 1 and + not isinstance(testcases[0], tuple) and + not isinstance(testcases[0], abc.Mapping)): + # Support using a single non-tuple parameter as a list of test cases. + # Note that the single non-tuple parameter can't be Mapping either, which + # means a single dict parameter case. + assert _non_string_or_bytes_iterable(testcases[0]), ( + 'Single parameter argument must be a non-string non-Mapping iterable') + testcases = testcases[0] + + if not isinstance(testcases, abc.Sequence): + testcases = list(testcases) + if not testcases: + raise NoTestsError( + 'parameterized test decorators did not generate any tests. ' + 'Make sure you specify non-empty parameters, ' + 'and do not reuse generators more than once.') + + return _apply + + +def parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests with only one + argument). + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. For every parameter tuple + passed, the first element of the tuple should be a string and will be appended + to the name of the test method. Each parameter dict passed must have a value + for the key "testcase_name", the string representation of that value will be + appended to the name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single iterable, + or a list of tuples or dicts. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_NAMED, testcases) + + +def product(*kwargs_seqs, **testgrid): + """A decorator for running tests over cartesian product of parameters values. + + See the module docstring for a usage example. The test will be run for every + possible combination of the parameters. + + Args: + *kwargs_seqs: Each positional parameter is a sequence of keyword arg dicts; + every test case generated will include exactly one kwargs dict from each + positional parameter; these will then be merged to form an overall list + of arguments for the test case. + **testgrid: A mapping of parameter names and their possible values. Possible + values should given as either a list or a tuple. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + + for name, values in testgrid.items(): + assert isinstance(values, (list, tuple)), ( + 'Values of {} must be given as list or tuple, found {}'.format( + name, type(values))) + + prior_arg_names = set() + for kwargs_seq in kwargs_seqs: + assert ((isinstance(kwargs_seq, (list, tuple))) and + all(isinstance(kwargs, dict) for kwargs in kwargs_seq)), ( + 'Positional parameters must be a sequence of keyword arg' + 'dicts, found {}' + .format(kwargs_seq)) + if kwargs_seq: + arg_names = set(kwargs_seq[0]) + assert all(set(kwargs) == arg_names for kwargs in kwargs_seq), ( + 'Keyword argument dicts within a single parameter must all have the ' + 'same keys, found {}'.format(kwargs_seq)) + assert not (arg_names & prior_arg_names), ( + 'Keyword argument dict sequences must all have distinct argument ' + 'names, found duplicate(s) {}' + .format(sorted(arg_names & prior_arg_names))) + prior_arg_names |= arg_names + + assert not (prior_arg_names & set(testgrid)), ( + 'Arguments supplied in kwargs dicts in positional parameters must not ' + 'overlap with arguments supplied as named parameters; found duplicate ' + 'argument(s) {}'.format(sorted(prior_arg_names & set(testgrid)))) + + # Convert testgrid into a sequence of sequences of kwargs dicts and combine + # with the positional parameters. + # So foo=[1,2], bar=[3,4] --> [[{foo: 1}, {foo: 2}], [{bar: 3, bar: 4}]] + testgrid = (tuple({k: v} for v in vs) for k, vs in testgrid.items()) + testgrid = tuple(kwargs_seqs) + tuple(testgrid) + + # Create all possible combinations of parameters as a cartesian product + # of parameter values. + testcases = [ + dict(itertools.chain.from_iterable(case.items() + for case in cases)) + for cases in itertools.product(*testgrid) + ] + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for adding tests generated by parameterized decorators.""" + + def __new__(cls, class_name, bases, dct): + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + test_params_reprs = dct.setdefault('_test_params_reprs', {}) + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _non_string_or_bytes_iterable(obj)): + # NOTE: `obj` might not be a _ParameterizedTestIter in two cases: + # 1. a class-level iterable named test* that isn't a test, such as + # a list of something. Such attributes get deleted from the class. + # + # 2. If a decorator is applied to the parameterized test, e.g. + # @morestuff + # @parameterized.parameters(...) + # def test_foo(...): ... + # + # This is OK so long as the underlying parameterized function state + # is forwarded (e.g. using functool.wraps() and **without** + # accessing explicitly accessing the internal attributes. + if isinstance(obj, _ParameterizedTestIter): + # Update the original test method name so it's more accurate. + # The mismatch might happen when another decorator is used inside + # the parameterized decrators, and the inner decorator doesn't + # preserve its __name__. + obj._original_name = name + iterator = iter(obj) + dct.pop(name) + _update_class_dict_for_param_test_case( + class_name, dct, test_params_reprs, name, iterator) + # If the base class is a subclass of parameterized.TestCase, inherit its + # _test_params_reprs too. + for base in bases: + # Check if the base has _test_params_reprs first, then check if it's a + # subclass of parameterized.TestCase. Otherwise when this is called for + # the parameterized.TestCase definition itself, this raises because + # itself is not defined yet. This works as long as absltest.TestCase does + # not define _test_params_reprs. + base_test_params_reprs = getattr(base, '_test_params_reprs', None) + if base_test_params_reprs and issubclass(base, TestCase): + for test_method, test_method_id in base_test_params_reprs.items(): + # test_method may both exists in base and this class. + # This class's method overrides base class's. + # That's why it should only inherit it if it does not exist. + test_params_reprs.setdefault(test_method, test_method_id) + + return type.__new__(cls, class_name, bases, dct) + + +def _update_class_dict_for_param_test_case( + test_class_name, dct, test_params_reprs, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + test_class_name: The name of the class tests are added to. + dct: The target dictionary. + test_params_reprs: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + + Raises: + DuplicateTestNameError: Raised when a test name occurs multiple times. + RuntimeError: If non-parameterized functions are generated. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if not (getattr(func, '__x_use_name__', None) or + getattr(func, '__x_params_repr__', None)): + raise RuntimeError( + '{}.{} generated a test function without using the parameterized ' + 'decorators. Only tests generated using the decorators are ' + 'supported.'.format(test_class_name, name)) + + if getattr(func, '__x_use_name__', False): + original_name = func.__name__ + new_name = original_name + else: + original_name = name + new_name = '%s%d' % (original_name, idx) + + if new_name in dct: + raise DuplicateTestNameError(test_class_name, new_name, original_name) + + dct[new_name] = func + test_params_reprs[new_name] = getattr(func, '__x_params_repr__', '') + + +class TestCase(absltest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + # visibility: private; do not call outside this class. + def _get_params_repr(self): + return self._test_params_reprs.get(self._testMethodName, '') + + def __str__(self): + params_repr = self._get_params_repr() + if params_repr: + params_repr = ' ' + params_repr + return '{}{} ({})'.format( + self._testMethodName, params_repr, + unittest.util.strclass(self.__class__)) + + def id(self): + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + base = super(TestCase, self).id() + params_repr = self._get_params_repr() + if params_repr: + # We include the params in the id so that, when reported in the + # test.xml file, the value is more informative than just "test_foo0". + # Use a space to separate them so that it's copy/paste friendly and + # easy to identify the actual test id. + return '{} {}'.format(base, params_repr) + else: + return base + + +# This function is kept CamelCase because it's used as a class's base class. +def CoopTestCase(other_base_class): # pylint: disable=invalid-name + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + ``mox.MoxTestBase``. + + Only works with metaclasses that do not override ``type.__new__``. + + Example:: + + from absl.testing import parameterized + + class ExampleTest(parameterized.CoopTestCase(OtherTestCase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + # If the other base class has a metaclass of 'type' then trying to combine + # the metaclasses will result in an MRO error. So simply combine them and + # return. + if type(other_base_class) == type: # pylint: disable=unidiomatic-typecheck + warnings.warn( + 'CoopTestCase is only necessary when combining with a class that uses' + ' a metaclass. Use multiple inheritance like this instead: class' + f' ExampleTest(paramaterized.TestCase, {other_base_class.__name__}):', + stacklevel=2, + ) + + class CoopTestCaseBase(other_base_class, TestCase): + pass + + return CoopTestCaseBase + else: + + class CoopMetaclass(type(other_base_class), TestGeneratorMetaclass): # pylint: disable=unused-variable + pass + + class CoopTestCaseBase(other_base_class, TestCase, metaclass=CoopMetaclass): + pass + + return CoopTestCaseBase diff --git a/MLPY/Lib/site-packages/absl/testing/xml_reporter.py b/MLPY/Lib/site-packages/absl/testing/xml_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..4fcb60c8545c6fbdcd993964dab61b9f833298e0 --- /dev/null +++ b/MLPY/Lib/site-packages/absl/testing/xml_reporter.py @@ -0,0 +1,563 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A Python test reporter that generates test reports in JUnit XML format.""" + +import datetime +import re +import sys +import threading +import time +import traceback +import unittest +from xml.sax import saxutils +from absl.testing import _pretty_print_reporter + + +# See http://www.w3.org/TR/REC-xml/#NT-Char +_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD} + + +_control_character_conversions = { + chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes} + + +_escape_xml_attr_conversions = { + '"': '"', + "'": ''', + '\n': ' ', + '\t': ' ', + '\r': ' ', + ' ': ' '} +_escape_xml_attr_conversions.update(_control_character_conversions) + + +# When class or module level function fails, unittest/suite.py adds a +# _ErrorHolder instance instead of a real TestCase, and it has a description +# like "setUpClass (__main__.MyTestCase)". +_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$') + + +# NOTE: while saxutils.quoteattr() theoretically does the same thing; it +# seems to often end up being too smart for it's own good not escaping properly. +# This function is much more reliable. +def _escape_xml_attr(content): + """Escapes xml attributes.""" + # Note: saxutils doesn't escape the quotes. + return saxutils.escape(content, _escape_xml_attr_conversions) + + +def _escape_cdata(s): + """Escapes a string to be used as XML CDATA. + + CDATA characters are treated strictly as character data, not as XML markup, + but there are still certain restrictions on them. + + Args: + s: the string to be escaped. + Returns: + An escaped version of the input string. + """ + for char, escaped in _control_character_conversions.items(): + s = s.replace(char, escaped) + return s.replace(']]>', ']] >') + + +def _iso8601_timestamp(timestamp): + """Produces an ISO8601 datetime. + + Args: + timestamp: an Epoch based timestamp in seconds. + + Returns: + A iso8601 format timestamp if the input is a valid timestamp, None otherwise + """ + if timestamp is None or timestamp < 0: + return None + return datetime.datetime.fromtimestamp( + timestamp, tz=datetime.timezone.utc).isoformat() + + +def _print_xml_element_header(element, attributes, stream, indentation=''): + """Prints an XML header of an arbitrary element. + + Args: + element: element name (testsuites, testsuite, testcase) + attributes: 2-tuple list with (attributes, values) already escaped + stream: output stream to write test report XML to + indentation: indentation added to the element header + """ + stream.write('%s<%s' % (indentation, element)) + for attribute in attributes: + if (len(attribute) == 2 and attribute[0] is not None and + attribute[1] is not None): + stream.write(' %s="%s"' % (attribute[0], attribute[1])) + stream.write('>\n') + +# Copy time.time which ensures the real time is used internally. +# This prevents bad interactions with tests that stub out time. +_time_copy = time.time + +if hasattr(traceback, '_some_str'): + # Use the traceback module str function to format safely. + _safe_str = traceback._some_str +else: + _safe_str = str # pylint: disable=invalid-name + + +class _TestCaseResult(object): + """Private helper for _TextAndXMLTestResult that represents a test result. + + Attributes: + test: A TestCase instance of an individual test method. + name: The name of the individual test method. + full_class_name: The full name of the test class. + run_time: The duration (in seconds) it took to run the test. + start_time: Epoch relative timestamp of when test started (in seconds) + errors: A list of error 4-tuples. Error tuple entries are + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: A string explaining why the test was skipped. + """ + + def __init__(self, test): + self.run_time = -1 + self.start_time = -1 + self.skip_reason = None + self.errors = [] + self.test = test + + # Parse the test id to get its test name and full class path. + # Unfortunately there is no better way of knowning the test and class. + # Worse, unittest uses _ErrorHandler instances to represent class / module + # level failures. + test_desc = test.id() or str(test) + # Check if it's something like "setUpClass (__main__.TestCase)". + match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc) + if match: + name = match.group(1) + full_class_name = match.group(2) + else: + class_name = unittest.util.strclass(test.__class__) + if isinstance(test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + class_name = unittest.util.strclass(test.test_case.__class__) + if test_desc.startswith(class_name + '.'): + # In a typical unittest.TestCase scenario, test.id() returns with + # a class name formatted using unittest.util.strclass. + name = test_desc[len(class_name)+1:] + full_class_name = class_name + else: + # Otherwise make a best effort to guess the test name and full class + # path. + parts = test_desc.rsplit('.', 1) + name = parts[-1] + full_class_name = parts[0] if len(parts) == 2 else '' + self.name = _escape_xml_attr(name) + self.full_class_name = _escape_xml_attr(full_class_name) + + def set_run_time(self, time_in_secs): + self.run_time = time_in_secs + + def set_start_time(self, time_in_secs): + self.start_time = time_in_secs + + def print_xml_summary(self, stream): + """Prints an XML Summary of a TestCase. + + Status and result are populated as per JUnit XML test result reporter. + A test that has been skipped will always have a skip reason, + as every skip method in Python's unittest requires the reason arg to be + passed. + + Args: + stream: output stream to write test report XML to + """ + + if self.skip_reason is None: + status = 'run' + result = 'completed' + else: + status = 'notrun' + result = 'suppressed' + + test_case_attributes = [ + ('name', '%s' % self.name), + ('status', '%s' % status), + ('result', '%s' % result), + ('time', '%.3f' % self.run_time), + ('classname', self.full_class_name), + ('timestamp', _iso8601_timestamp(self.start_time)), + ] + _print_xml_element_header('testcase', test_case_attributes, stream, ' ') + self._print_testcase_details(stream) + stream.write(' \n') + + def _print_testcase_details(self, stream): + for error in self.errors: + outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence + message = _escape_xml_attr(_safe_str(message)) + exception_type = _escape_xml_attr(str(exception_type)) + error_msg = _escape_cdata(error_msg) + stream.write(' <%s message="%s" type="%s">\n' + % (outcome, message, exception_type, error_msg, outcome)) + + +class _TestSuiteResult(object): + """Private helper for _TextAndXMLTestResult.""" + + def __init__(self): + self.suites = {} + self.failure_counts = {} + self.error_counts = {} + self.overall_start_time = -1 + self.overall_end_time = -1 + self._testsuites_properties = {} + + def add_test_case_result(self, test_case_result): + suite_name = type(test_case_result.test).__name__ + if suite_name == '_ErrorHolder': + # _ErrorHolder is a special case created by unittest for class / module + # level functions. + suite_name = test_case_result.full_class_name.rsplit('.')[-1] + if isinstance(test_case_result.test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + suite_name = type(test_case_result.test.test_case).__name__ + + self._setup_test_suite(suite_name) + self.suites[suite_name].append(test_case_result) + for error in test_case_result.errors: + # Only count the first failure or error so that the sum is equal to the + # total number of *testcases* that have failures or errors. + if error[0] == 'failure': + self.failure_counts[suite_name] += 1 + break + elif error[0] == 'error': + self.error_counts[suite_name] += 1 + break + + def print_xml_summary(self, stream): + overall_test_count = sum(len(x) for x in self.suites.values()) + overall_failures = sum(self.failure_counts.values()) + overall_errors = sum(self.error_counts.values()) + overall_attributes = [ + ('name', ''), + ('tests', '%d' % overall_test_count), + ('failures', '%d' % overall_failures), + ('errors', '%d' % overall_errors), + ('time', '%.3f' % (self.overall_end_time - self.overall_start_time)), + ('timestamp', _iso8601_timestamp(self.overall_start_time)), + ] + _print_xml_element_header('testsuites', overall_attributes, stream) + if self._testsuites_properties: + stream.write(' \n') + for name, value in sorted(self._testsuites_properties.items()): + stream.write(' \n' % + (_escape_xml_attr(name), _escape_xml_attr(str(value)))) + stream.write(' \n') + + for suite_name in self.suites: + suite = self.suites[suite_name] + suite_end_time = max(x.start_time + x.run_time for x in suite) + suite_start_time = min(x.start_time for x in suite) + failures = self.failure_counts[suite_name] + errors = self.error_counts[suite_name] + suite_attributes = [ + ('name', '%s' % suite_name), + ('tests', '%d' % len(suite)), + ('failures', '%d' % failures), + ('errors', '%d' % errors), + ('time', '%.3f' % (suite_end_time - suite_start_time)), + ('timestamp', _iso8601_timestamp(suite_start_time)), + ] + _print_xml_element_header('testsuite', suite_attributes, stream) + + # test_case_result entries are not guaranteed to be in any user-friendly + # order, especially when using subtests. So sort them. + for test_case_result in sorted(suite, key=lambda t: t.name): + test_case_result.print_xml_summary(stream) + stream.write('\n') + stream.write('\n') + + def _setup_test_suite(self, suite_name): + """Adds a test suite to the set of suites tracked by this test run. + + Args: + suite_name: string, The name of the test suite being initialized. + """ + if suite_name in self.suites: + return + self.suites[suite_name] = [] + self.failure_counts[suite_name] = 0 + self.error_counts[suite_name] = 0 + + def set_end_time(self, timestamp_in_secs): + """Sets the start timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_end_time = timestamp_in_secs + + def set_start_time(self, timestamp_in_secs): + """Sets the end timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_start_time = timestamp_in_secs + + +class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult): + """Private TestResult class that produces both formatted text results and XML. + + Used by TextAndXMLTestRunner. + """ + + _TEST_SUITE_RESULT_CLASS = _TestSuiteResult + _TEST_CASE_RESULT_CLASS = _TestCaseResult + + def __init__(self, xml_stream, stream, descriptions, verbosity, + time_getter=_time_copy, testsuites_properties=None): + super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity) + self.xml_stream = xml_stream + self.pending_test_case_results = {} + self.suite = self._TEST_SUITE_RESULT_CLASS() + if testsuites_properties: + self.suite._testsuites_properties = testsuites_properties + self.time_getter = time_getter + + # This lock guards any mutations on pending_test_case_results. + self._pending_test_case_results_lock = threading.RLock() + + def startTest(self, test): + self.start_time = self.time_getter() + super(_TextAndXMLTestResult, self).startTest(test) + + def stopTest(self, test): + # Grabbing the write lock to avoid conflicting with stopTestRun. + with self._pending_test_case_results_lock: + super(_TextAndXMLTestResult, self).stopTest(test) + result = self.get_pending_test_case_result(test) + if not result: + test_name = test.id() or str(test) + sys.stderr.write('No pending test case: %s\n' % test_name) + return + if getattr(self, 'start_time', None) is None: + # startTest may not be called for skipped tests since Python 3.12.1. + self.start_time = self.time_getter() + test_id = id(test) + run_time = self.time_getter() - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + del self.pending_test_case_results[test_id] + + def startTestRun(self): + self.suite.set_start_time(self.time_getter()) + super(_TextAndXMLTestResult, self).startTestRun() + + def stopTestRun(self): + self.suite.set_end_time(self.time_getter()) + # All pending_test_case_results will be added to the suite and removed from + # the pending_test_case_results dictionary. Grabbing the write lock to avoid + # results from being added during this process to avoid duplicating adds or + # accidentally erasing newly appended pending results. + with self._pending_test_case_results_lock: + # Errors in the test fixture (setUpModule, tearDownModule, + # setUpClass, tearDownClass) can leave a pending result which + # never gets added to the suite. The runner calls stopTestRun + # which gives us an opportunity to add these errors for + # reporting here. + for test_id in self.pending_test_case_results: + result = self.pending_test_case_results[test_id] + if getattr(self, 'start_time', None) is not None: + run_time = self.suite.overall_end_time - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + self.pending_test_case_results.clear() + + def _exc_info_to_string(self, err, test=None): + """Converts a sys.exc_info()-style tuple of values into a string. + + This method must be overridden because the method signature in + unittest.TestResult changed between Python 2.2 and 2.4. + + Args: + err: A sys.exc_info() tuple of values for an error. + test: The test method. + + Returns: + A formatted exception string. + """ + if test: + return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test) + return ''.join(traceback.format_exception(*err)) + + def add_pending_test_case_result(self, test, error_summary=None, + skip_reason=None): + """Adds result information to a test case result which may still be running. + + If a result entry for the test already exists, add_pending_test_case_result + will add error summary tuples and/or overwrite skip_reason for the result. + If it does not yet exist, a result entry will be created. + Note that a test result is considered to have been run and passed + only if there are no errors or skip_reason. + + Args: + test: A test method as defined by unittest + error_summary: A 4-tuple with the following entries: + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: a string explaining why the test was skipped + """ + with self._pending_test_case_results_lock: + test_id = id(test) + if test_id not in self.pending_test_case_results: + self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS( + test) + if error_summary: + self.pending_test_case_results[test_id].errors.append(error_summary) + if skip_reason: + self.pending_test_case_results[test_id].skip_reason = skip_reason + + def delete_pending_test_case_result(self, test): + with self._pending_test_case_results_lock: + test_id = id(test) + del self.pending_test_case_results[test_id] + + def get_pending_test_case_result(self, test): + test_id = id(test) + return self.pending_test_case_results.get(test_id, None) + + def addSuccess(self, test): + super(_TextAndXMLTestResult, self).addSuccess(test) + self.add_pending_test_case_result(test) + + def addError(self, test, err): + super(_TextAndXMLTestResult, self).addError(test, err) + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addFailure(self, test, err): + super(_TextAndXMLTestResult, self).addFailure(test, err) + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSkip(self, test, reason): + super(_TextAndXMLTestResult, self).addSkip(test, reason) + self.add_pending_test_case_result(test, skip_reason=reason) + + def addExpectedFailure(self, test, err): + super(_TextAndXMLTestResult, self).addExpectedFailure(test, err) + if callable(getattr(test, 'recordProperty', None)): + test.recordProperty('EXPECTED_FAILURE', + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test) + + def addUnexpectedSuccess(self, test): + super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test) + test_name = test.id() or str(test) + error_summary = ('error', '', '', + 'Test case %s should have failed, but passed.' + % (test_name)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name + super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err) + if err is not None: + if issubclass(err[0], test.failureException): + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = None + self.add_pending_test_case_result(subtest, error_summary=error_summary) + + def printErrors(self): + super(_TextAndXMLTestResult, self).printErrors() + self.xml_stream.write('\n') + self.suite.print_xml_summary(self.xml_stream) + + +class TextAndXMLTestRunner(unittest.TextTestRunner): + """A test runner that produces both formatted text results and XML. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + + _TEST_RESULT_CLASS = _TextAndXMLTestResult + + _xml_stream = None + _testsuites_properties = {} + + def __init__(self, xml_stream=None, *args, **kwargs): + """Initialize a TextAndXMLTestRunner. + + Args: + xml_stream: file-like or None; XML-formatted test results are output + via this object's write() method. If None (the default), the + new instance behaves as described in the set_default_xml_stream method + documentation below. + *args: passed unmodified to unittest.TextTestRunner.__init__. + **kwargs: passed unmodified to unittest.TextTestRunner.__init__. + """ + super(TextAndXMLTestRunner, self).__init__(*args, **kwargs) + if xml_stream is not None: + self._xml_stream = xml_stream + # else, do not set self._xml_stream to None -- this allows implicit fallback + # to the class attribute's value. + + @classmethod + def set_default_xml_stream(cls, xml_stream): + """Sets the default XML stream for the class. + + Args: + xml_stream: file-like or None; used for instances when xml_stream is None + or not passed to their constructors. If None is passed, instances + created with xml_stream=None will act as ordinary TextTestRunner + instances; this is the default state before any calls to this method + have been made. + """ + cls._xml_stream = xml_stream + + def _makeResult(self): + if self._xml_stream is None: + return super(TextAndXMLTestRunner, self)._makeResult() + else: + return self._TEST_RESULT_CLASS( + self._xml_stream, self.stream, self.descriptions, self.verbosity, + testsuites_properties=self._testsuites_properties) + + @classmethod + def set_testsuites_property(cls, key, value): + cls._testsuites_properties[key] = value diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/AUTHORS b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..23b11ada16bb8e69695cf52e5994784d98054e0d --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/AUTHORS @@ -0,0 +1,7 @@ +# This is the list of Abseil authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. + +Google Inc. diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/LICENSE b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/METADATA b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..53e63acaec1b4983f4b540908cfe5741da93d274 --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/METADATA @@ -0,0 +1,84 @@ +Metadata-Version: 2.1 +Name: absl-py +Version: 2.1.0 +Summary: Abseil Python Common Libraries, see https://github.com/abseil/abseil-py. +Home-page: https://github.com/abseil/abseil-py +Author: The Abseil Authors +License: Apache 2.0 +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS + +# Abseil Python Common Libraries + +This repository is a collection of Python library code for building Python +applications. The code is collected from Google's own Python code base, and has +been extensively tested and used in production. + +## Features + +* Simple application startup +* Distributed commandline flags system +* Custom logging module with additional features +* Testing utilities + +## Getting Started + +### Installation + +To install the package, simply run: + +```bash +pip install absl-py +``` + +Or install from source: + +```bash +python setup.py install +``` + +### Running Tests + +To run Abseil tests, you can clone the git repo and run +[bazel](https://bazel.build/): + +```bash +git clone https://github.com/abseil/abseil-py.git +cd abseil-py +bazel test absl/... +``` + +### Example Code + +Please refer to +[smoke_tests/sample_app.py](https://github.com/abseil/abseil-py/blob/main/smoke_tests/sample_app.py) +as an example to get started. + +## Documentation + +See the [Abseil Python Developer Guide](https://abseil.io/docs/python/). + +## Future Releases + +The current repository includes an initial set of libraries for early adoption. +More components and interoperability with Abseil C++ Common Libraries +will come in future releases. + +## License + +The Abseil Python library is licensed under the terms of the Apache +license. See [LICENSE](LICENSE) for more information. diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/RECORD b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a4285b3d6750f5eacecf737dd408d2b86e72aed6 --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/RECORD @@ -0,0 +1,53 @@ +absl/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584 +absl/__pycache__/__init__.cpython-39.pyc,, +absl/__pycache__/app.cpython-39.pyc,, +absl/__pycache__/command_name.cpython-39.pyc,, +absl/app.py,sha256=DQROJ_Ovex6w2_nr_s7AHgXQle951XmcVtlNrMjfSFA,15374 +absl/app.pyi,sha256=DqRvFRos3oFk00lZJSKaHZuL_3-LnZl-ylg_VAXtPcc,1737 +absl/command_name.py,sha256=C7CuwMMedDLUOX88Et92QZb2se__nU7txgpO-01amxg,2301 +absl/flags/__init__.py,sha256=FgR_NxQG1xLA2ZxLU51HTrLWV5kbN9eSCI-47Z7D3WA,7728 +absl/flags/__pycache__/__init__.cpython-39.pyc,, +absl/flags/__pycache__/_argument_parser.cpython-39.pyc,, +absl/flags/__pycache__/_defines.cpython-39.pyc,, +absl/flags/__pycache__/_exceptions.cpython-39.pyc,, +absl/flags/__pycache__/_flag.cpython-39.pyc,, +absl/flags/__pycache__/_flagvalues.cpython-39.pyc,, +absl/flags/__pycache__/_helpers.cpython-39.pyc,, +absl/flags/__pycache__/_validators.cpython-39.pyc,, +absl/flags/__pycache__/_validators_classes.cpython-39.pyc,, +absl/flags/__pycache__/argparse_flags.cpython-39.pyc,, +absl/flags/_argument_parser.py,sha256=TQFhT0OcQuRO_1GTJoUvYC1KU6wV9f4Lc7jQmajBGi0,20934 +absl/flags/_defines.py,sha256=s_YA_tAHFU4wxrJqKLH5uMldTl1DtlUfSvgBbflXkQ8,52783 +absl/flags/_exceptions.py,sha256=Lws7ZZrlLJG83VHuOB4Z4CNfcSoKX5pJnsNRCtp-dMw,3657 +absl/flags/_flag.py,sha256=Sv_d7kDSZh-VNr4JGrBy4g7VxnbRspOOd5hO6wA94qk,19895 +absl/flags/_flagvalues.py,sha256=Gferpr9yg8Ntc6ij9tPiChliYz5jYWfVJoKzAREwNFw,54127 +absl/flags/_helpers.py,sha256=uWWeqbhc19kTXonfM7mNZT68ZakmJgu-v5IHeS9A9Xc,14081 +absl/flags/_validators.py,sha256=_hpVwThXQhL6PFOA9-L2ZRI-7zLu2UxU_hRJJWXYoHw,14144 +absl/flags/_validators_classes.py,sha256=KLBJhJAt8C18gy2Uq-q7bUFNS_AhPBlxlwGiNm5gWXU,6157 +absl/flags/argparse_flags.py,sha256=57E1HFa40tvnQ3DQzY3x1qdBUIxtfTTYAYONT_k8HOI,14485 +absl/logging/__init__.py,sha256=mzF3rusWjzLbuVdZI8SfPiIoqfWO9kBUhxVOvGZQTv4,42082 +absl/logging/__init__.pyi,sha256=NPAna_9rrYTVNIHLXUbdvsAZcNlv4IJs9yNnL59mxr8,5794 +absl/logging/__pycache__/__init__.cpython-39.pyc,, +absl/logging/__pycache__/converter.cpython-39.pyc,, +absl/logging/converter.py,sha256=eTucx1Ojix7YWMQUyWKzPRTrxGLuCkNsTmJa1GW6k94,6353 +absl/testing/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584 +absl/testing/__pycache__/__init__.cpython-39.pyc,, +absl/testing/__pycache__/_bazelize_command.cpython-39.pyc,, +absl/testing/__pycache__/_pretty_print_reporter.cpython-39.pyc,, +absl/testing/__pycache__/absltest.cpython-39.pyc,, +absl/testing/__pycache__/flagsaver.cpython-39.pyc,, +absl/testing/__pycache__/parameterized.cpython-39.pyc,, +absl/testing/__pycache__/xml_reporter.cpython-39.pyc,, +absl/testing/_bazelize_command.py,sha256=R4rV4j5AOSp3PNkVQKP1I-SKYzQbXyeuiOT3d23cTLA,2302 +absl/testing/_pretty_print_reporter.py,sha256=nL5qSsYWF6O_C6L9PexwFSPxs68Wc85RhdhRBN2AgTw,3140 +absl/testing/absltest.py,sha256=sgb0TPgNP0_nLKcxrHBlifvUsgufnYURVR8Vau3f278,101119 +absl/testing/flagsaver.py,sha256=514JmVdCn-P0jsTntskCtUfxrHyp3urLdn2bzDd991s,13392 +absl/testing/parameterized.py,sha256=PT1P3X__WkFC_NyGWifUdJeqn-BM4JI3yy-1zsGaFEI,27807 +absl/testing/xml_reporter.py,sha256=k_9cWhw01RGCQImGDciTa_RrBEEuPZ3IPD5IASoRwwM,21720 +absl_py-2.1.0.dist-info/AUTHORS,sha256=YoLudsylaQg7W5mLn4FroQMuEnuNx8RpQrhkd_xvv6U,296 +absl_py-2.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +absl_py-2.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +absl_py-2.1.0.dist-info/METADATA,sha256=CTp5OILgEjYv4Y7dpCHzW5QmM57hl-2i-AizwFlnRYA,2311 +absl_py-2.1.0.dist-info/RECORD,, +absl_py-2.1.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +absl_py-2.1.0.dist-info/top_level.txt,sha256=0M_1z27Hi5Bsj1EhTfE_ajdJdFxeP_aw0xXnR4BXXhI,5 diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/WHEEL b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..46022f6ff2f40132150ed408c691d0f8d4ce88a5 --- /dev/null +++ b/MLPY/Lib/site-packages/absl_py-2.1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +absl diff --git a/MLPY/Lib/site-packages/adodbapi/__init__.py b/MLPY/Lib/site-packages/adodbapi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0d769e058d51f5261953293e14e1efd108319c26 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/__init__.py @@ -0,0 +1,74 @@ +"""adodbapi - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/adodbapi +""" +import sys +import time + +from .adodbapi import Connection, Cursor, __version__, connect, dateconverter +from .apibase import ( + BINARY, + DATETIME, + NUMBER, + ROWID, + STRING, + DatabaseError, + DataError, + Error, + FetchFailedError, + IntegrityError, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + ProgrammingError, + Warning, + apilevel, + paramstyle, + threadsafety, +) + + +def Binary(aString): + """This function constructs an object capable of holding a binary (long) string value.""" + return bytes(aString) + + +def Date(year, month, day): + "This function constructs an object holding a date value." + return dateconverter.Date(year, month, day) + + +def Time(hour, minute, second): + "This function constructs an object holding a time value." + return dateconverter.Time(hour, minute, second) + + +def Timestamp(year, month, day, hour, minute, second): + "This function constructs an object holding a time stamp value." + return dateconverter.Timestamp(year, month, day, hour, minute, second) + + +def DateFromTicks(ticks): + """This function constructs an object holding a date value from the given ticks value + (number of seconds since the epoch; see the documentation of the standard Python time module for details). + """ + return Date(*time.gmtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + """This function constructs an object holding a time value from the given ticks value + (number of seconds since the epoch; see the documentation of the standard Python time module for details). + """ + return Time(*time.gmtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + """This function constructs an object holding a time stamp value from the given + ticks value (number of seconds since the epoch; + see the documentation of the standard Python time module for details).""" + return Timestamp(*time.gmtime(ticks)[:6]) + + +version = "adodbapi v" + __version__ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b25b5843618cff8f6d33b636adcb80f3e11683af Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/ado_consts.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/ado_consts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78bf3b1cd18e0450b195c0fc7914a56d8de3634d Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/ado_consts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/adodbapi.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/adodbapi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbaaf164e9fc856c35693cab7bcac91a025a6c09 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/adodbapi.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/apibase.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/apibase.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16fdc75a700f86a62e4395345984ec1ccc8d5001 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/apibase.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/is64bit.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/is64bit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..803ceb04e433ff6024e076213d2f94ce27437424 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/is64bit.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/process_connect_string.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/process_connect_string.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32aa6ef27b09944392dee93a0892f2142ba91bcf Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/process_connect_string.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/remote.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/remote.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e5b26ebbdc5885ae69d18225d3e9ae272940e53 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/remote.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/schema_table.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/schema_table.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89a29623e3019a691056c462b9c5ffb6f8b1ebf0 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/schema_table.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ae670fbae846219085e296197b7865f4e9e02d5 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/ado_consts.py b/MLPY/Lib/site-packages/adodbapi/ado_consts.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb2147dc4c3568789c8f6901bb1be2c61ddcacf --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/ado_consts.py @@ -0,0 +1,281 @@ +# ADO enumerated constants documented on MSDN: +# http://msdn.microsoft.com/en-us/library/ms678353(VS.85).aspx + +# IsolationLevelEnum +adXactUnspecified = -1 +adXactBrowse = 0x100 +adXactChaos = 0x10 +adXactCursorStability = 0x1000 +adXactIsolated = 0x100000 +adXactReadCommitted = 0x1000 +adXactReadUncommitted = 0x100 +adXactRepeatableRead = 0x10000 +adXactSerializable = 0x100000 + +# CursorLocationEnum +adUseClient = 3 +adUseServer = 2 + +# CursorTypeEnum +adOpenDynamic = 2 +adOpenForwardOnly = 0 +adOpenKeyset = 1 +adOpenStatic = 3 +adOpenUnspecified = -1 + +# CommandTypeEnum +adCmdText = 1 +adCmdStoredProc = 4 +adSchemaTables = 20 + +# ParameterDirectionEnum +adParamInput = 1 +adParamInputOutput = 3 +adParamOutput = 2 +adParamReturnValue = 4 +adParamUnknown = 0 +directions = { + 0: "Unknown", + 1: "Input", + 2: "Output", + 3: "InputOutput", + 4: "Return", +} + + +def ado_direction_name(ado_dir): + try: + return "adParam" + directions[ado_dir] + except: + return "unknown direction (" + str(ado_dir) + ")" + + +# ObjectStateEnum +adStateClosed = 0 +adStateOpen = 1 +adStateConnecting = 2 +adStateExecuting = 4 +adStateFetching = 8 + +# FieldAttributeEnum +adFldMayBeNull = 0x40 + +# ConnectModeEnum +adModeUnknown = 0 +adModeRead = 1 +adModeWrite = 2 +adModeReadWrite = 3 +adModeShareDenyRead = 4 +adModeShareDenyWrite = 8 +adModeShareExclusive = 12 +adModeShareDenyNone = 16 +adModeRecursive = 0x400000 + +# XactAttributeEnum +adXactCommitRetaining = 131072 +adXactAbortRetaining = 262144 + +ado_error_TIMEOUT = -2147217871 + +# DataTypeEnum - ADO Data types documented at: +# http://msdn2.microsoft.com/en-us/library/ms675318.aspx +adArray = 0x2000 +adEmpty = 0x0 +adBSTR = 0x8 +adBigInt = 0x14 +adBinary = 0x80 +adBoolean = 0xB +adChapter = 0x88 +adChar = 0x81 +adCurrency = 0x6 +adDBDate = 0x85 +adDBTime = 0x86 +adDBTimeStamp = 0x87 +adDate = 0x7 +adDecimal = 0xE +adDouble = 0x5 +adError = 0xA +adFileTime = 0x40 +adGUID = 0x48 +adIDispatch = 0x9 +adIUnknown = 0xD +adInteger = 0x3 +adLongVarBinary = 0xCD +adLongVarChar = 0xC9 +adLongVarWChar = 0xCB +adNumeric = 0x83 +adPropVariant = 0x8A +adSingle = 0x4 +adSmallInt = 0x2 +adTinyInt = 0x10 +adUnsignedBigInt = 0x15 +adUnsignedInt = 0x13 +adUnsignedSmallInt = 0x12 +adUnsignedTinyInt = 0x11 +adUserDefined = 0x84 +adVarBinary = 0xCC +adVarChar = 0xC8 +adVarNumeric = 0x8B +adVarWChar = 0xCA +adVariant = 0xC +adWChar = 0x82 +# Additional constants used by introspection but not ADO itself +AUTO_FIELD_MARKER = -1000 + +adTypeNames = { + adBSTR: "adBSTR", + adBigInt: "adBigInt", + adBinary: "adBinary", + adBoolean: "adBoolean", + adChapter: "adChapter", + adChar: "adChar", + adCurrency: "adCurrency", + adDBDate: "adDBDate", + adDBTime: "adDBTime", + adDBTimeStamp: "adDBTimeStamp", + adDate: "adDate", + adDecimal: "adDecimal", + adDouble: "adDouble", + adEmpty: "adEmpty", + adError: "adError", + adFileTime: "adFileTime", + adGUID: "adGUID", + adIDispatch: "adIDispatch", + adIUnknown: "adIUnknown", + adInteger: "adInteger", + adLongVarBinary: "adLongVarBinary", + adLongVarChar: "adLongVarChar", + adLongVarWChar: "adLongVarWChar", + adNumeric: "adNumeric", + adPropVariant: "adPropVariant", + adSingle: "adSingle", + adSmallInt: "adSmallInt", + adTinyInt: "adTinyInt", + adUnsignedBigInt: "adUnsignedBigInt", + adUnsignedInt: "adUnsignedInt", + adUnsignedSmallInt: "adUnsignedSmallInt", + adUnsignedTinyInt: "adUnsignedTinyInt", + adUserDefined: "adUserDefined", + adVarBinary: "adVarBinary", + adVarChar: "adVarChar", + adVarNumeric: "adVarNumeric", + adVarWChar: "adVarWChar", + adVariant: "adVariant", + adWChar: "adWChar", +} + + +def ado_type_name(ado_type): + return adTypeNames.get(ado_type, "unknown type (" + str(ado_type) + ")") + + +# here in decimal, sorted by value +# adEmpty 0 Specifies no value (DBTYPE_EMPTY). +# adSmallInt 2 Indicates a two-byte signed integer (DBTYPE_I2). +# adInteger 3 Indicates a four-byte signed integer (DBTYPE_I4). +# adSingle 4 Indicates a single-precision floating-point value (DBTYPE_R4). +# adDouble 5 Indicates a double-precision floating-point value (DBTYPE_R8). +# adCurrency 6 Indicates a currency value (DBTYPE_CY). Currency is a fixed-point number +# with four digits to the right of the decimal point. It is stored in an eight-byte signed integer scaled by 10,000. +# adDate 7 Indicates a date value (DBTYPE_DATE). A date is stored as a double, the whole part of which is +# the number of days since December 30, 1899, and the fractional part of which is the fraction of a day. +# adBSTR 8 Indicates a null-terminated character string (Unicode) (DBTYPE_BSTR). +# adIDispatch 9 Indicates a pointer to an IDispatch interface on a COM object (DBTYPE_IDISPATCH). +# adError 10 Indicates a 32-bit error code (DBTYPE_ERROR). +# adBoolean 11 Indicates a boolean value (DBTYPE_BOOL). +# adVariant 12 Indicates an Automation Variant (DBTYPE_VARIANT). +# adIUnknown 13 Indicates a pointer to an IUnknown interface on a COM object (DBTYPE_IUNKNOWN). +# adDecimal 14 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_DECIMAL). +# adTinyInt 16 Indicates a one-byte signed integer (DBTYPE_I1). +# adUnsignedTinyInt 17 Indicates a one-byte unsigned integer (DBTYPE_UI1). +# adUnsignedSmallInt 18 Indicates a two-byte unsigned integer (DBTYPE_UI2). +# adUnsignedInt 19 Indicates a four-byte unsigned integer (DBTYPE_UI4). +# adBigInt 20 Indicates an eight-byte signed integer (DBTYPE_I8). +# adUnsignedBigInt 21 Indicates an eight-byte unsigned integer (DBTYPE_UI8). +# adFileTime 64 Indicates a 64-bit value representing the number of 100-nanosecond intervals since +# January 1, 1601 (DBTYPE_FILETIME). +# adGUID 72 Indicates a globally unique identifier (GUID) (DBTYPE_GUID). +# adBinary 128 Indicates a binary value (DBTYPE_BYTES). +# adChar 129 Indicates a string value (DBTYPE_STR). +# adWChar 130 Indicates a null-terminated Unicode character string (DBTYPE_WSTR). +# adNumeric 131 Indicates an exact numeric value with a fixed precision and scale (DBTYPE_NUMERIC). +# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT). +# adUserDefined 132 Indicates a user-defined variable (DBTYPE_UDT). +# adDBDate 133 Indicates a date value (yyyymmdd) (DBTYPE_DBDATE). +# adDBTime 134 Indicates a time value (hhmmss) (DBTYPE_DBTIME). +# adDBTimeStamp 135 Indicates a date/time stamp (yyyymmddhhmmss plus a fraction in billionths) (DBTYPE_DBTIMESTAMP). +# adChapter 136 Indicates a four-byte chapter value that identifies rows in a child rowset (DBTYPE_HCHAPTER). +# adPropVariant 138 Indicates an Automation PROPVARIANT (DBTYPE_PROP_VARIANT). +# adVarNumeric 139 Indicates a numeric value (Parameter object only). +# adVarChar 200 Indicates a string value (Parameter object only). +# adLongVarChar 201 Indicates a long string value (Parameter object only). +# adVarWChar 202 Indicates a null-terminated Unicode character string (Parameter object only). +# adLongVarWChar 203 Indicates a long null-terminated Unicode string value (Parameter object only). +# adVarBinary 204 Indicates a binary value (Parameter object only). +# adLongVarBinary 205 Indicates a long binary value (Parameter object only). +# adArray (Does not apply to ADOX.) 0x2000 A flag value, always combined with another data type constant, +# that indicates an array of that other data type. + +# Error codes to names +adoErrors = { + 0xE7B: "adErrBoundToCommand", + 0xE94: "adErrCannotComplete", + 0xEA4: "adErrCantChangeConnection", + 0xC94: "adErrCantChangeProvider", + 0xE8C: "adErrCantConvertvalue", + 0xE8D: "adErrCantCreate", + 0xEA3: "adErrCatalogNotSet", + 0xE8E: "adErrColumnNotOnThisRow", + 0xD5D: "adErrDataConversion", + 0xE89: "adErrDataOverflow", + 0xE9A: "adErrDelResOutOfScope", + 0xEA6: "adErrDenyNotSupported", + 0xEA7: "adErrDenyTypeNotSupported", + 0xCB3: "adErrFeatureNotAvailable", + 0xEA5: "adErrFieldsUpdateFailed", + 0xC93: "adErrIllegalOperation", + 0xCAE: "adErrInTransaction", + 0xE87: "adErrIntegrityViolation", + 0xBB9: "adErrInvalidArgument", + 0xE7D: "adErrInvalidConnection", + 0xE7C: "adErrInvalidParamInfo", + 0xE82: "adErrInvalidTransaction", + 0xE91: "adErrInvalidURL", + 0xCC1: "adErrItemNotFound", + 0xBCD: "adErrNoCurrentRecord", + 0xE83: "adErrNotExecuting", + 0xE7E: "adErrNotReentrant", + 0xE78: "adErrObjectClosed", + 0xD27: "adErrObjectInCollection", + 0xD5C: "adErrObjectNotSet", + 0xE79: "adErrObjectOpen", + 0xBBA: "adErrOpeningFile", + 0xE80: "adErrOperationCancelled", + 0xE96: "adErrOutOfSpace", + 0xE88: "adErrPermissionDenied", + 0xE9E: "adErrPropConflicting", + 0xE9B: "adErrPropInvalidColumn", + 0xE9C: "adErrPropInvalidOption", + 0xE9D: "adErrPropInvalidValue", + 0xE9F: "adErrPropNotAllSettable", + 0xEA0: "adErrPropNotSet", + 0xEA1: "adErrPropNotSettable", + 0xEA2: "adErrPropNotSupported", + 0xBB8: "adErrProviderFailed", + 0xE7A: "adErrProviderNotFound", + 0xBBB: "adErrReadFile", + 0xE93: "adErrResourceExists", + 0xE92: "adErrResourceLocked", + 0xE97: "adErrResourceOutOfScope", + 0xE8A: "adErrSchemaViolation", + 0xE8B: "adErrSignMismatch", + 0xE81: "adErrStillConnecting", + 0xE7F: "adErrStillExecuting", + 0xE90: "adErrTreePermissionDenied", + 0xE8F: "adErrURLDoesNotExist", + 0xE99: "adErrURLNamedRowDoesNotExist", + 0xE98: "adErrUnavailable", + 0xE84: "adErrUnsafeOperation", + 0xE95: "adErrVolumeNotFound", + 0xBBC: "adErrWriteFile", +} diff --git a/MLPY/Lib/site-packages/adodbapi/adodbapi.py b/MLPY/Lib/site-packages/adodbapi/adodbapi.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7c045ea7531fc6ea91405252aaaf20c5eb6e9a --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/adodbapi.py @@ -0,0 +1,1223 @@ +"""adodbapi - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, versions 2.1 and later by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* https://github.com/mhammond/pywin32 +* http://sourceforge.net/projects/adodbapi + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + django adaptations and refactoring by Adam Vandenberg + +DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/ + +This module source should run correctly in CPython versions 2.7 and later, +or IronPython version 2.7 and later, +or, after running through 2to3.py, CPython 3.4 or later. +""" + +__version__ = "2.6.2.0" +version = "adodbapi v" + __version__ + +import copy +import decimal +import os +import sys +import weakref + +from . import ado_consts as adc, apibase as api, process_connect_string + +try: + verbose = int(os.environ["ADODBAPI_VERBOSE"]) +except: + verbose = False +if verbose: + print(version) + +# --- define objects to smooth out IronPython <-> CPython differences +onWin32 = False # assume the worst +if api.onIronPython: + from clr import Reference + from System import ( + Activator, + Array, + Byte, + DateTime, + DBNull, + Decimal as SystemDecimal, + Type, + ) + + def Dispatch(dispatch): + type = Type.GetTypeFromProgID(dispatch) + return Activator.CreateInstance(type) + + def getIndexedValue(obj, index): + return obj.Item[index] + +else: # try pywin32 + try: + import pythoncom + import pywintypes + import win32com.client + + onWin32 = True + + def Dispatch(dispatch): + return win32com.client.Dispatch(dispatch) + + except ImportError: + import warnings + + warnings.warn( + "pywin32 package (or IronPython) required for adodbapi.", ImportWarning + ) + + def getIndexedValue(obj, index): + return obj(index) + + +from collections.abc import Mapping + +# --- define objects to smooth out Python3000 <-> Python 2.x differences +unicodeType = str +longType = int +StringTypes = str +maxint = sys.maxsize + + +# ----------------- The .connect method ----------------- +def make_COM_connecter(): + try: + if onWin32: + pythoncom.CoInitialize() # v2.1 Paj + c = Dispatch("ADODB.Connection") # connect _after_ CoIninialize v2.1.1 adamvan + except: + raise api.InterfaceError( + "Windows COM Error: Dispatch('ADODB.Connection') failed." + ) + return c + + +def connect(*args, **kwargs): # --> a db-api connection object + """Connect to a database. + + call using: + :connection_string -- An ADODB formatted connection string, see: + * http://www.connectionstrings.com + * http://www.asp101.com/articles/john/connstring/default.asp + :timeout -- A command timeout value, in seconds (default 30 seconds) + """ + co = Connection() # make an empty connection object + + kwargs = process_connect_string.process(args, kwargs, True) + + try: # connect to the database, using the connection information in kwargs + co.connect(kwargs) + return co + except Exception as e: + message = 'Error opening connection to "%s"' % co.connection_string + raise api.OperationalError(e, message) + + +# so you could use something like: +# myConnection.paramstyle = 'named' +# The programmer may also change the default. +# For example, if I were using django, I would say: +# import adodbapi as Database +# Database.adodbapi.paramstyle = 'format' + +# ------- other module level defaults -------- +defaultIsolationLevel = adc.adXactReadCommitted +# Set defaultIsolationLevel on module level before creating the connection. +# For example: +# import adodbapi, ado_consts +# adodbapi.adodbapi.defaultIsolationLevel=ado_consts.adXactBrowse" +# +# Set defaultCursorLocation on module level before creating the connection. +# It may be one of the "adUse..." consts. +defaultCursorLocation = adc.adUseClient # changed from adUseServer as of v 2.3.0 + +dateconverter = api.pythonDateTimeConverter() # default + + +def format_parameters(ADOparameters, show_value=False): + """Format a collection of ADO Command Parameters. + + Used by error reporting in _execute_command. + """ + try: + if show_value: + desc = [ + 'Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s", Precision: %s, NumericScale: %s' + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Value, + p.Precision, + p.NumericScale, + ) + for p in ADOparameters + ] + else: + desc = [ + "Name: %s, Dir.: %s, Type: %s, Size: %s, Precision: %s, NumericScale: %s" + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Precision, + p.NumericScale, + ) + for p in ADOparameters + ] + return "[" + "\n".join(desc) + "]" + except: + return "[]" + + +def _configure_parameter(p, value, adotype, settings_known): + """Configure the given ADO Parameter 'p' with the Python 'value'.""" + + if adotype in api.adoBinaryTypes: + p.Size = len(value) + p.AppendChunk(value) + + elif isinstance(value, StringTypes): # v2.1 Jevon + L = len(value) + if adotype in api.adoStringTypes: # v2.2.1 Cole + if settings_known: + L = min(L, p.Size) # v2.1 Cole limit data to defined size + p.Value = value[:L] # v2.1 Jevon & v2.1 Cole + else: + p.Value = value # dont limit if db column is numeric + if L > 0: # v2.1 Cole something does not like p.Size as Zero + p.Size = L # v2.1 Jevon + + elif isinstance(value, decimal.Decimal): + if api.onIronPython: + s = str(value) + p.Value = s + p.Size = len(s) + else: + p.Value = value + exponent = value.as_tuple()[2] + digit_count = len(value.as_tuple()[1]) + p.Precision = digit_count + if exponent == 0: + p.NumericScale = 0 + elif exponent < 0: + p.NumericScale = -exponent + if p.Precision < p.NumericScale: + p.Precision = p.NumericScale + else: # exponent > 0: + p.NumericScale = 0 + p.Precision = digit_count + exponent + + elif type(value) in dateconverter.types: + if settings_known and adotype in api.adoDateTimeTypes: + p.Value = dateconverter.COMDate(value) + else: # probably a string + # provide the date as a string in the format 'YYYY-MM-dd' + s = dateconverter.DateObjectToIsoFormatString(value) + p.Value = s + p.Size = len(s) + + elif api.onIronPython and isinstance(value, longType): # Iron Python Long + s = str(value) # feature workaround for IPy 2.0 + p.Value = s + + elif adotype == adc.adEmpty: # ADO will not let you specify a null column + p.Type = ( + adc.adInteger + ) # so we will fake it to be an integer (just to have something) + p.Value = None # and pass in a Null *value* + + # For any other type, set the value and let pythoncom do the right thing. + else: + p.Value = value + + +# # # # # ----- the Class that defines a connection ----- # # # # # +class Connection(object): + # include connection attributes as class attributes required by api definition. + Warning = api.Warning + Error = api.Error + InterfaceError = api.InterfaceError + DataError = api.DataError + DatabaseError = api.DatabaseError + OperationalError = api.OperationalError + IntegrityError = api.IntegrityError + InternalError = api.InternalError + NotSupportedError = api.NotSupportedError + ProgrammingError = api.ProgrammingError + FetchFailedError = api.FetchFailedError # (special for django) + # ...class attributes... (can be overridden by instance attributes) + verbose = api.verbose + + @property + def dbapi(self): # a proposed db-api version 3 extension. + "Return a reference to the DBAPI module for this Connection." + return api + + def __init__(self): # now define the instance attributes + self.connector = None + self.paramstyle = api.paramstyle + self.supportsTransactions = False + self.connection_string = "" + self.cursors = weakref.WeakValueDictionary() + self.dbms_name = "" + self.dbms_version = "" + self.errorhandler = None # use the standard error handler for this instance + self.transaction_level = 0 # 0 == Not in a transaction, at the top level + self._autocommit = False + + def connect(self, kwargs, connection_maker=make_COM_connecter): + if verbose > 9: + print("kwargs=", repr(kwargs)) + try: + self.connection_string = ( + kwargs["connection_string"] % kwargs + ) # insert keyword arguments + except Exception as e: + self._raiseConnectionError( + KeyError, "Python string format error in connection string->" + ) + self.timeout = kwargs.get("timeout", 30) + self.mode = kwargs.get("mode", adc.adModeUnknown) + self.kwargs = kwargs + if verbose: + print('%s attempting: "%s"' % (version, self.connection_string)) + self.connector = connection_maker() + self.connector.ConnectionTimeout = self.timeout + self.connector.ConnectionString = self.connection_string + self.connector.Mode = self.mode + + try: + self.connector.Open() # Open the ADO connection + except api.Error: + self._raiseConnectionError( + api.DatabaseError, + "ADO error trying to Open=%s" % self.connection_string, + ) + + try: # Stefan Fuchs; support WINCCOLEDBProvider + if getIndexedValue(self.connector.Properties, "Transaction DDL").Value != 0: + self.supportsTransactions = True + except pywintypes.com_error: + pass # Stefan Fuchs + self.dbms_name = getIndexedValue(self.connector.Properties, "DBMS Name").Value + try: # Stefan Fuchs + self.dbms_version = getIndexedValue( + self.connector.Properties, "DBMS Version" + ).Value + except pywintypes.com_error: + pass # Stefan Fuchs + self.connector.CursorLocation = defaultCursorLocation # v2.1 Rose + if self.supportsTransactions: + self.connector.IsolationLevel = defaultIsolationLevel + self._autocommit = bool(kwargs.get("autocommit", False)) + if not self._autocommit: + self.transaction_level = ( + self.connector.BeginTrans() + ) # Disables autocommit & inits transaction_level + else: + self._autocommit = True + if "paramstyle" in kwargs: + self.paramstyle = kwargs["paramstyle"] # let setattr do the error checking + self.messages = [] + if verbose: + print("adodbapi New connection at %X" % id(self)) + + def _raiseConnectionError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self, None, errorclass, errorvalue) + + def _closeAdoConnection(self): # all v2.1 Rose + """close the underlying ADO Connection object, + rolling it back first if it supports transactions.""" + if self.connector is None: + return + if not self._autocommit: + if self.transaction_level: + try: + self.connector.RollbackTrans() + except: + pass + self.connector.Close() + if verbose: + print("adodbapi Closed connection at %X" % id(self)) + + def close(self): + """Close the connection now (rather than whenever __del__ is called). + + The connection will be unusable from this point forward; + an Error (or subclass) exception will be raised if any operation is attempted with the connection. + The same applies to all cursor objects trying to use the connection. + """ + for crsr in list(self.cursors.values())[ + : + ]: # copy the list, then close each one + crsr.close(dont_tell_me=True) # close without back-link clearing + self.messages = [] + try: + self._closeAdoConnection() # v2.1 Rose + except Exception as e: + self._raiseConnectionError(sys.exc_info()[0], sys.exc_info()[1]) + + self.connector = None # v2.4.2.2 fix subtle timeout bug + # per M.Hammond: "I expect the benefits of uninitializing are probably fairly small, + # so never uninitializing will probably not cause any problems." + + def commit(self): + """Commit any pending transaction to the database. + + Note that if the database supports an auto-commit feature, + this must be initially off. An interface method may be provided to turn it back on. + Database modules that do not support transactions should implement this method with void functionality. + """ + self.messages = [] + if not self.supportsTransactions: + return + + try: + self.transaction_level = self.connector.CommitTrans() + if verbose > 1: + print("commit done on connection at %X" % id(self)) + if not ( + self._autocommit + or (self.connector.Attributes & adc.adXactAbortRetaining) + ): + # If attributes has adXactCommitRetaining it performs retaining commits that is, + # calling CommitTrans automatically starts a new transaction. Not all providers support this. + # If not, we will have to start a new transaction by this command: + self.transaction_level = self.connector.BeginTrans() + except Exception as e: + self._raiseConnectionError(api.ProgrammingError, e) + + def _rollback(self): + """In case a database does provide transactions this method causes the the database to roll back to + the start of any pending transaction. Closing a connection without committing the changes first will + cause an implicit rollback to be performed. + + If the database does not support the functionality required by the method, the interface should + throw an exception in case the method is used. + The preferred approach is to not implement the method and thus have Python generate + an AttributeError in case the method is requested. This allows the programmer to check for database + capabilities using the standard hasattr() function. + + For some dynamically configured interfaces it may not be appropriate to require dynamically making + the method available. These interfaces should then raise a NotSupportedError to indicate the + non-ability to perform the roll back when the method is invoked. + """ + self.messages = [] + if ( + self.transaction_level + ): # trying to roll back with no open transaction causes an error + try: + self.transaction_level = self.connector.RollbackTrans() + if verbose > 1: + print("rollback done on connection at %X" % id(self)) + if not self._autocommit and not ( + self.connector.Attributes & adc.adXactAbortRetaining + ): + # If attributes has adXactAbortRetaining it performs retaining aborts that is, + # calling RollbackTrans automatically starts a new transaction. Not all providers support this. + # If not, we will have to start a new transaction by this command: + if ( + not self.transaction_level + ): # if self.transaction_level == 0 or self.transaction_level is None: + self.transaction_level = self.connector.BeginTrans() + except Exception as e: + self._raiseConnectionError(api.ProgrammingError, e) + + def __setattr__(self, name, value): + if name == "autocommit": # extension: allow user to turn autocommit on or off + if self.supportsTransactions: + object.__setattr__(self, "_autocommit", bool(value)) + try: + self._rollback() # must clear any outstanding transactions + except: + pass + return + elif name == "paramstyle": + if value not in api.accepted_paramstyles: + self._raiseConnectionError( + api.NotSupportedError, + 'paramstyle="%s" not in:%s' + % (value, repr(api.accepted_paramstyles)), + ) + elif name == "variantConversions": + value = copy.copy( + value + ) # make a new copy -- no changes in the default, please + object.__setattr__(self, name, value) + + def __getattr__(self, item): + if ( + item == "rollback" + ): # the rollback method only appears if the database supports transactions + if self.supportsTransactions: + return ( + self._rollback + ) # return the rollback method so the caller can execute it. + else: + raise AttributeError("this data provider does not support Rollback") + elif item == "autocommit": + return self._autocommit + else: + raise AttributeError( + 'no such attribute in ADO connection object as="%s"' % item + ) + + def cursor(self): + "Return a new Cursor Object using the connection." + self.messages = [] + c = Cursor(self) + return c + + def _i_am_here(self, crsr): + "message from a new cursor proclaiming its existence" + oid = id(crsr) + self.cursors[oid] = crsr + + def _i_am_closing(self, crsr): + "message from a cursor giving connection a chance to clean up" + try: + del self.cursors[id(crsr)] + except: + pass + + def printADOerrors(self): + j = self.connector.Errors.Count + if j: + print("ADO Errors:(%i)" % j) + for e in self.connector.Errors: + print("Description: %s" % e.Description) + print("Error: %s %s " % (e.Number, adc.adoErrors.get(e.Number, "unknown"))) + if e.Number == adc.ado_error_TIMEOUT: + print( + "Timeout Error: Try using adodbpi.connect(constr,timeout=Nseconds)" + ) + print("Source: %s" % e.Source) + print("NativeError: %s" % e.NativeError) + print("SQL State: %s" % e.SQLState) + + def _suggest_error_class(self): + """Introspect the current ADO Errors and determine an appropriate error class. + + Error.SQLState is a SQL-defined error condition, per the SQL specification: + http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt + + The 23000 class of errors are integrity errors. + Error 40002 is a transactional integrity error. + """ + if self.connector is not None: + for e in self.connector.Errors: + state = str(e.SQLState) + if state.startswith("23") or state == "40002": + return api.IntegrityError + return api.DatabaseError + + def __del__(self): + try: + self._closeAdoConnection() # v2.1 Rose + except: + pass + self.connector = None + + def __enter__(self): # Connections are context managers + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type: + self._rollback() # automatic rollback on errors + else: + self.commit() + + def get_table_names(self): + schema = self.connector.OpenSchema(20) # constant = adSchemaTables + + tables = [] + while not schema.EOF: + name = getIndexedValue(schema.Fields, "TABLE_NAME").Value + tables.append(name) + schema.MoveNext() + del schema + return tables + + +# # # # # ----- the Class that defines a cursor ----- # # # # # +class Cursor(object): + ## ** api required attributes: + ## description... + ## This read-only attribute is a sequence of 7-item sequences. + ## Each of these sequences contains information describing one result column: + ## (name, type_code, display_size, internal_size, precision, scale, null_ok). + ## This attribute will be None for operations that do not return rows or if the + ## cursor has not had an operation invoked via the executeXXX() method yet. + ## The type_code can be interpreted by comparing it to the Type Objects specified in the section below. + ## rowcount... + ## This read-only attribute specifies the number of rows that the last executeXXX() produced + ## (for DQL statements like select) or affected (for DML statements like update or insert). + ## The attribute is -1 in case no executeXXX() has been performed on the cursor or + ## the rowcount of the last operation is not determinable by the interface.[7] + ## arraysize... + ## This read/write attribute specifies the number of rows to fetch at a time with fetchmany(). + ## It defaults to 1 meaning to fetch a single row at a time. + ## Implementations must observe this value with respect to the fetchmany() method, + ## but are free to interact with the database a single row at a time. + ## It may also be used in the implementation of executemany(). + ## ** extension attributes: + ## paramstyle... + ## allows the programmer to override the connection's default paramstyle + ## errorhandler... + ## allows the programmer to override the connection's default error handler + + def __init__(self, connection): + self.command = None + self._ado_prepared = False + self.messages = [] + self.connection = connection + self.paramstyle = connection.paramstyle # used for overriding the paramstyle + self._parameter_names = [] + self.recordset_is_remote = False + self.rs = None # the ADO recordset for this cursor + self.converters = [] # conversion function for each column + self.columnNames = {} # names of columns {lowercase name : number,...} + self.numberOfColumns = 0 + self._description = None + self.rowcount = -1 + self.errorhandler = connection.errorhandler + self.arraysize = 1 + connection._i_am_here(self) + if verbose: + print( + "%s New cursor at %X on conn %X" + % (version, id(self), id(self.connection)) + ) + + def __iter__(self): # [2.1 Zamarev] + return iter(self.fetchone, None) # [2.1 Zamarev] + + def prepare(self, operation): + self.command = operation + self._description = None + self._ado_prepared = "setup" + + def __next__(self): + r = self.fetchone() + if r: + return r + raise StopIteration + + def __enter__(self): + "Allow database cursors to be used with context managers." + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + "Allow database cursors to be used with context managers." + self.close() + + def _raiseCursorError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self.connection, self, errorclass, errorvalue) + + def build_column_info(self, recordset): + self.converters = [] # convertion function for each column + self.columnNames = {} # names of columns {lowercase name : number,...} + self._description = None + + # if EOF and BOF are true at the same time, there are no records in the recordset + if (recordset is None) or (recordset.State == adc.adStateClosed): + self.rs = None + self.numberOfColumns = 0 + return + self.rs = recordset # v2.1.1 bkline + self.recordset_format = api.RS_ARRAY if api.onIronPython else api.RS_WIN_32 + self.numberOfColumns = recordset.Fields.Count + try: + varCon = self.connection.variantConversions + except AttributeError: + varCon = api.variantConversions + for i in range(self.numberOfColumns): + f = getIndexedValue(self.rs.Fields, i) + try: + self.converters.append( + varCon[f.Type] + ) # conversion function for this column + except KeyError: + self._raiseCursorError( + api.InternalError, "Data column of Unknown ADO type=%s" % f.Type + ) + self.columnNames[f.Name.lower()] = i # columnNames lookup + + def _makeDescriptionFromRS(self): + # Abort if closed or no recordset. + if self.rs is None: + self._description = None + return + desc = [] + for i in range(self.numberOfColumns): + f = getIndexedValue(self.rs.Fields, i) + if self.rs.EOF or self.rs.BOF: + display_size = None + else: + display_size = ( + f.ActualSize + ) # TODO: Is this the correct defintion according to the DB API 2 Spec ? + null_ok = bool(f.Attributes & adc.adFldMayBeNull) # v2.1 Cole + desc.append( + ( + f.Name, + f.Type, + display_size, + f.DefinedSize, + f.Precision, + f.NumericScale, + null_ok, + ) + ) + self._description = desc + + def get_description(self): + if not self._description: + self._makeDescriptionFromRS() + return self._description + + def __getattr__(self, item): + if item == "description": + return self.get_description() + object.__getattribute__( + self, item + ) # may get here on Remote attribute calls for existing attributes + + def format_description(self, d): + """Format db_api description tuple for printing.""" + if self.description is None: + self._makeDescriptionFromRS() + if isinstance(d, int): + d = self.description[d] + desc = ( + "Name= %s, Type= %s, DispSize= %s, IntSize= %s, Precision= %s, Scale= %s NullOK=%s" + % ( + d[0], + adc.adTypeNames.get(d[1], str(d[1]) + " (unknown type)"), + d[2], + d[3], + d[4], + d[5], + d[6], + ) + ) + return desc + + def close(self, dont_tell_me=False): + """Close the cursor now (rather than whenever __del__ is called). + The cursor will be unusable from this point forward; an Error (or subclass) + exception will be raised if any operation is attempted with the cursor. + """ + if self.connection is None: + return + self.messages = [] + if ( + self.rs and self.rs.State != adc.adStateClosed + ): # rs exists and is open #v2.1 Rose + self.rs.Close() # v2.1 Rose + self.rs = None # let go of the recordset so ADO will let it be disposed #v2.1 Rose + if not dont_tell_me: + self.connection._i_am_closing( + self + ) # take me off the connection's cursors list + self.connection = ( + None # this will make all future method calls on me throw an exception + ) + if verbose: + print("adodbapi Closed cursor at %X" % id(self)) + + def __del__(self): + try: + self.close() + except: + pass + + def _new_command(self, command_type=adc.adCmdText): + self.cmd = None + self.messages = [] + + if self.connection is None: + self._raiseCursorError(api.InterfaceError, None) + return + try: + self.cmd = Dispatch("ADODB.Command") + self.cmd.ActiveConnection = self.connection.connector + self.cmd.CommandTimeout = self.connection.timeout + self.cmd.CommandType = command_type + self.cmd.CommandText = self.commandText + self.cmd.Prepared = bool(self._ado_prepared) + except: + self._raiseCursorError( + api.DatabaseError, + 'Error creating new ADODB.Command object for "%s"' + % repr(self.commandText), + ) + + def _execute_command(self): + # Stored procedures may have an integer return value + self.return_value = None + recordset = None + count = -1 # default value + if verbose: + print('Executing command="%s"' % self.commandText) + try: + # ----- the actual SQL is executed here --- + if api.onIronPython: + ra = Reference[int]() + recordset = self.cmd.Execute(ra) + count = ra.Value + else: # pywin32 + recordset, count = self.cmd.Execute() + # ----- ------------------------------- --- + except Exception as e: + _message = "" + if hasattr(e, "args"): + _message += str(e.args) + "\n" + _message += "Command:\n%s\nParameters:\n%s" % ( + self.commandText, + format_parameters(self.cmd.Parameters, True), + ) + klass = self.connection._suggest_error_class() + self._raiseCursorError(klass, _message) + try: + self.rowcount = recordset.RecordCount + except: + self.rowcount = count + self.build_column_info(recordset) + + # The ADO documentation hints that obtaining the recordcount may be timeconsuming + # "If the Recordset object does not support approximate positioning, this property + # may be a significant drain on resources # [ekelund] + # Therefore, COM will not return rowcount for server-side cursors. [Cole] + # Client-side cursors (the default since v2.8) will force a static + # cursor, and rowcount will then be set accurately [Cole] + + def get_rowcount(self): + return self.rowcount + + def get_returned_parameters(self): + """with some providers, returned parameters and the .return_value are not available until + after the last recordset has been read. In that case, you must coll nextset() until it + returns None, then call this method to get your returned information.""" + + retLst = ( + [] + ) # store procedures may return altered parameters, including an added "return value" item + for p in tuple(self.cmd.Parameters): + if verbose > 2: + print( + 'Returned=Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s",' + " Precision: %s, NumericScale: %s" + % ( + p.Name, + adc.directions[p.Direction], + adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"), + p.Size, + p.Value, + p.Precision, + p.NumericScale, + ) + ) + pyObject = api.convert_to_python(p.Value, api.variantConversions[p.Type]) + if p.Direction == adc.adParamReturnValue: + self.returnValue = ( + pyObject # also load the undocumented attribute (Vernon's Error!) + ) + self.return_value = pyObject + else: + retLst.append(pyObject) + return retLst # return the parameter list to the caller + + def callproc(self, procname, parameters=None): + """Call a stored database procedure with the given name. + The sequence of parameters must contain one entry for each + argument that the sproc expects. The result of the + call is returned as modified copy of the input + sequence. Input parameters are left untouched, output and + input/output parameters replaced with possibly new values. + + The sproc may also provide a result set as output, + which is available through the standard .fetch*() methods. + Extension: A "return_value" property may be set on the + cursor if the sproc defines an integer return value. + """ + self._parameter_names = [] + self.commandText = procname + self._new_command(command_type=adc.adCmdStoredProc) + self._buildADOparameterList(parameters, sproc=True) + if verbose > 2: + print( + "Calling Stored Proc with Params=", + format_parameters(self.cmd.Parameters, True), + ) + self._execute_command() + return self.get_returned_parameters() + + def _reformat_operation(self, operation, parameters): + if self.paramstyle in ("format", "pyformat"): # convert %s to ? + operation, self._parameter_names = api.changeFormatToQmark(operation) + elif self.paramstyle == "named" or ( + self.paramstyle == "dynamic" and isinstance(parameters, Mapping) + ): + operation, self._parameter_names = api.changeNamedToQmark( + operation + ) # convert :name to ? + return operation + + def _buildADOparameterList(self, parameters, sproc=False): + self.parameters = parameters + if parameters is None: + parameters = [] + + # Note: ADO does not preserve the parameter list, even if "Prepared" is True, so we must build every time. + parameters_known = False + if sproc: # needed only if we are calling a stored procedure + try: # attempt to use ADO's parameter list + self.cmd.Parameters.Refresh() + if verbose > 2: + print( + "ADO detected Params=", + format_parameters(self.cmd.Parameters, True), + ) + print("Program Parameters=", repr(parameters)) + parameters_known = True + except api.Error: + if verbose: + print("ADO Parameter Refresh failed") + pass + else: + if len(parameters) != self.cmd.Parameters.Count - 1: + raise api.ProgrammingError( + "You must supply %d parameters for this stored procedure" + % (self.cmd.Parameters.Count - 1) + ) + if sproc or parameters != []: + i = 0 + if parameters_known: # use ado parameter list + if self._parameter_names: # named parameters + for i, pm_name in enumerate(self._parameter_names): + p = getIndexedValue(self.cmd.Parameters, i) + try: + _configure_parameter( + p, parameters[pm_name], p.Type, parameters_known + ) + except Exception as e: + _message = ( + "Error Converting Parameter %s: %s, %s <- %s\n" + % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(parameters[pm_name]), + ) + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + else: # regular sequence of parameters + for value in parameters: + p = getIndexedValue(self.cmd.Parameters, i) + if ( + p.Direction == adc.adParamReturnValue + ): # this is an extra parameter added by ADO + i += 1 # skip the extra + p = getIndexedValue(self.cmd.Parameters, i) + try: + _configure_parameter(p, value, p.Type, parameters_known) + except Exception as e: + _message = ( + "Error Converting Parameter %s: %s, %s <- %s\n" + % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(value), + ) + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + i += 1 + else: # -- build own parameter list + if ( + self._parameter_names + ): # we expect a dictionary of parameters, this is the list of expected names + for parm_name in self._parameter_names: + elem = parameters[parm_name] + adotype = api.pyTypeToADOType(elem) + p = self.cmd.CreateParameter( + parm_name, adotype, adc.adParamInput + ) + _configure_parameter(p, elem, adotype, parameters_known) + try: + self.cmd.Parameters.Append(p) + except Exception as e: + _message = "Error Building Parameter %s: %s, %s <- %s\n" % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(elem), + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + else: # expecting the usual sequence of parameters + if sproc: + p = self.cmd.CreateParameter( + "@RETURN_VALUE", adc.adInteger, adc.adParamReturnValue + ) + self.cmd.Parameters.Append(p) + + for elem in parameters: + name = "p%i" % i + adotype = api.pyTypeToADOType(elem) + p = self.cmd.CreateParameter( + name, adotype, adc.adParamInput + ) # Name, Type, Direction, Size, Value + _configure_parameter(p, elem, adotype, parameters_known) + try: + self.cmd.Parameters.Append(p) + except Exception as e: + _message = "Error Building Parameter %s: %s, %s <- %s\n" % ( + p.Name, + adc.ado_type_name(p.Type), + p.Value, + repr(elem), + ) + self._raiseCursorError( + api.DataError, _message + "->" + repr(e.args) + ) + i += 1 + if self._ado_prepared == "setup": + self._ado_prepared = ( + True # parameters will be "known" by ADO next loop + ) + + def execute(self, operation, parameters=None): + """Prepare and execute a database operation (query or command). + + Parameters may be provided as sequence or mapping and will be bound to variables in the operation. + Variables are specified in a database-specific notation + (see the module's paramstyle attribute for details). [5] + A reference to the operation will be retained by the cursor. + If the same operation object is passed in again, then the cursor + can optimize its behavior. This is most effective for algorithms + where the same operation is used, but different parameters are bound to it (many times). + + For maximum efficiency when reusing an operation, it is best to use + the setinputsizes() method to specify the parameter types and sizes ahead of time. + It is legal for a parameter to not match the predefined information; + the implementation should compensate, possibly with a loss of efficiency. + + The parameters may also be specified as list of tuples to e.g. insert multiple rows in + a single operation, but this kind of usage is depreciated: executemany() should be used instead. + + Return value is not defined. + + [5] The module will use the __getitem__ method of the parameters object to map either positions + (integers) or names (strings) to parameter values. This allows for both sequences and mappings + to be used as input. + The term "bound" refers to the process of binding an input value to a database execution buffer. + In practical terms, this means that the input value is directly used as a value in the operation. + The client should not be required to "escape" the value so that it can be used -- the value + should be equal to the actual database value.""" + if ( + self.command is not operation + or self._ado_prepared == "setup" + or not hasattr(self, "commandText") + ): + if self.command is not operation: + self._ado_prepared = False + self.command = operation + self._parameter_names = [] + self.commandText = ( + operation + if (self.paramstyle == "qmark" or not parameters) + else self._reformat_operation(operation, parameters) + ) + self._new_command() + self._buildADOparameterList(parameters) + if verbose > 3: + print("Params=", format_parameters(self.cmd.Parameters, True)) + self._execute_command() + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) + and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + + Return values are not defined. + """ + self.messages = list() + total_recordcount = 0 + + self.prepare(operation) + for params in seq_of_parameters: + self.execute(self.command, params) + if self.rowcount == -1: + total_recordcount = -1 + if total_recordcount != -1: + total_recordcount += self.rowcount + self.rowcount = total_recordcount + + def _fetch(self, limit=None): + """Fetch rows from the current recordset. + + limit -- Number of rows to fetch, or None (default) to fetch all rows. + """ + if self.connection is None or self.rs is None: + self._raiseCursorError( + api.FetchFailedError, "fetch() on closed connection or empty query set" + ) + return + + if self.rs.State == adc.adStateClosed or self.rs.BOF or self.rs.EOF: + return list() + if limit: # limit number of rows retrieved + ado_results = self.rs.GetRows(limit) + else: # get all rows + ado_results = self.rs.GetRows() + if ( + self.recordset_format == api.RS_ARRAY + ): # result of GetRows is a two-dimension array + length = ( + len(ado_results) // self.numberOfColumns + ) # length of first dimension + else: # pywin32 + length = len(ado_results[0]) # result of GetRows is tuples in a tuple + fetchObject = api.SQLrows( + ado_results, length, self + ) # new object to hold the results of the fetch + return fetchObject + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, + or None when no more data is available. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + result = self._fetch(1) + if result: # return record (not list of records) + return result[0] + return None + + def fetchmany(self, size=None): + """Fetch the next set of rows of a query result, returning a list of tuples. An empty sequence is returned when no more rows are available. + + The number of rows to fetch per call is specified by the parameter. + If it is not given, the cursor's arraysize determines the number of rows to be fetched. + The method should try to fetch as many rows as indicated by the size parameter. + If this is not possible due to the specified number of rows not being available, + fewer rows may be returned. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + + Note there are performance considerations involved with the size parameter. + For optimal performance, it is usually best to use the arraysize attribute. + If the size parameter is used, then it is best for it to retain the same value from + one fetchmany() call to the next. + """ + self.messages = [] + if size is None: + size = self.arraysize + return self._fetch(size) + + def fetchall(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). + + Note that the cursor's arraysize attribute + can affect the performance of this operation. + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + return self._fetch() + + def nextset(self): + """Skip to the next available recordset, discarding any remaining rows from the current recordset. + + If there are no more sets, the method returns None. Otherwise, it returns a true + value and subsequent calls to the fetch methods will return rows from the next result set. + + An Error (or subclass) exception is raised if the previous call to executeXXX() + did not produce any result set or no call was issued yet. + """ + self.messages = [] + if self.connection is None or self.rs is None: + self._raiseCursorError( + api.OperationalError, + ("nextset() on closed connection or empty query set"), + ) + return None + + if api.onIronPython: + try: + recordset = self.rs.NextRecordset() + except TypeError: + recordset = None + except api.Error as exc: + self._raiseCursorError(api.NotSupportedError, exc.args) + else: # pywin32 + try: # [begin 2.1 ekelund] + rsTuple = self.rs.NextRecordset() # + except pywintypes.com_error as exc: # return appropriate error + self._raiseCursorError( + api.NotSupportedError, exc.args + ) # [end 2.1 ekelund] + recordset = rsTuple[0] + if recordset is None: + return None + self.build_column_info(recordset) + return True + + def setinputsizes(self, sizes): + pass + + def setoutputsize(self, size, column=None): + pass + + def _last_query(self): # let the programmer see what query we actually used + try: + if self.parameters == None: + ret = self.commandText + else: + ret = "%s,parameters=%s" % (self.commandText, repr(self.parameters)) + except: + ret = None + return ret + + query = property(_last_query, None, None, "returns the last query executed") + + +if __name__ == "__main__": + raise api.ProgrammingError(version + " cannot be run as a main program.") diff --git a/MLPY/Lib/site-packages/adodbapi/apibase.py b/MLPY/Lib/site-packages/adodbapi/apibase.py new file mode 100644 index 0000000000000000000000000000000000000000..50770c96eeac6ff6d7d2ae1f3f89806b2e3e0c5f --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/apibase.py @@ -0,0 +1,794 @@ +"""adodbapi.apibase - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* http://sourceforge.net/projects/adodbapi +""" + +import datetime +import decimal +import numbers +import sys +import time + +# noinspection PyUnresolvedReferences +from . import ado_consts as adc + +verbose = False # debugging flag + +onIronPython = sys.platform == "cli" +if onIronPython: # we need type definitions for odd data we may need to convert + # noinspection PyUnresolvedReferences + from System import DateTime, DBNull + + NullTypes = (type(None), DBNull) +else: + DateTime = type(NotImplemented) # should never be seen on win32 + NullTypes = type(None) + +# --- define objects to smooth out Python3 <-> Python 2.x differences +unicodeType = str +longType = int +StringTypes = str +makeByteBuffer = bytes +memoryViewType = memoryview +_BaseException = Exception + +try: # jdhardy -- handle bytes under IronPython & Py3 + bytes +except NameError: + bytes = str # define it for old Pythons + + +# ------- Error handlers ------ +def standardErrorHandler(connection, cursor, errorclass, errorvalue): + err = (errorclass, errorvalue) + try: + connection.messages.append(err) + except: + pass + if cursor is not None: + try: + cursor.messages.append(err) + except: + pass + raise errorclass(errorvalue) + + +# Note: _BaseException is defined differently between Python 2.x and 3.x +class Error(_BaseException): + pass # Exception that is the base class of all other error + # exceptions. You can use this to catch all errors with one + # single 'except' statement. Warnings are not considered + # errors and thus should not use this class as base. It must + # be a subclass of the Python StandardError (defined in the + # module exceptions). + + +class Warning(_BaseException): + pass + + +class InterfaceError(Error): + pass + + +class DatabaseError(Error): + pass + + +class InternalError(DatabaseError): + pass + + +class OperationalError(DatabaseError): + pass + + +class ProgrammingError(DatabaseError): + pass + + +class IntegrityError(DatabaseError): + pass + + +class DataError(DatabaseError): + pass + + +class NotSupportedError(DatabaseError): + pass + + +class FetchFailedError(OperationalError): + """ + Error is used by RawStoredProcedureQuerySet to determine when a fetch + failed due to a connection being closed or there is no record set + returned. (Non-standard, added especially for django) + """ + + pass + + +# # # # # ----- Type Objects and Constructors ----- # # # # # +# Many databases need to have the input in a particular format for binding to an operation's input parameters. +# For example, if an input is destined for a DATE column, then it must be bound to the database in a particular +# string format. Similar problems exist for "Row ID" columns or large binary items (e.g. blobs or RAW columns). +# This presents problems for Python since the parameters to the executeXXX() method are untyped. +# When the database module sees a Python string object, it doesn't know if it should be bound as a simple CHAR +# column, as a raw BINARY item, or as a DATE. +# +# To overcome this problem, a module must provide the constructors defined below to create objects that can +# hold special values. When passed to the cursor methods, the module can then detect the proper type of +# the input parameter and bind it accordingly. + +# A Cursor Object's description attribute returns information about each of the result columns of a query. +# The type_code must compare equal to one of Type Objects defined below. Type Objects may be equal to more than +# one type code (e.g. DATETIME could be equal to the type codes for date, time and timestamp columns; +# see the Implementation Hints below for details). + +# SQL NULL values are represented by the Python None singleton on input and output. + +# Note: Usage of Unix ticks for database interfacing can cause troubles because of the limited date range they cover. + + +# def Date(year,month,day): +# "This function constructs an object holding a date value. " +# return dateconverter.date(year,month,day) #dateconverter.Date(year,month,day) +# +# def Time(hour,minute,second): +# "This function constructs an object holding a time value. " +# return dateconverter.time(hour, minute, second) # dateconverter.Time(hour,minute,second) +# +# def Timestamp(year,month,day,hour,minute,second): +# "This function constructs an object holding a time stamp value. " +# return dateconverter.datetime(year,month,day,hour,minute,second) +# +# def DateFromTicks(ticks): +# """This function constructs an object holding a date value from the given ticks value +# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """ +# return Date(*time.gmtime(ticks)[:3]) +# +# def TimeFromTicks(ticks): +# """This function constructs an object holding a time value from the given ticks value +# (number of seconds since the epoch; see the documentation of the standard Python time module for details). """ +# return Time(*time.gmtime(ticks)[3:6]) +# +# def TimestampFromTicks(ticks): +# """This function constructs an object holding a time stamp value from the given +# ticks value (number of seconds since the epoch; +# see the documentation of the standard Python time module for details). """ +# return Timestamp(*time.gmtime(ticks)[:6]) +# +# def Binary(aString): +# """This function constructs an object capable of holding a binary (long) string value. """ +# b = makeByteBuffer(aString) +# return b +# ----- Time converters ---------------------------------------------- +class TimeConverter(object): # this is a generic time converter skeleton + def __init__(self): # the details will be filled in by instances + self._ordinal_1899_12_31 = datetime.date(1899, 12, 31).toordinal() - 1 + # Use cls.types to compare if an input parameter is a datetime + self.types = { + type(self.Date(2000, 1, 1)), + type(self.Time(12, 1, 1)), + type(self.Timestamp(2000, 1, 1, 12, 1, 1)), + datetime.datetime, + datetime.time, + datetime.date, + } + + def COMDate(self, obj): + """Returns a ComDate from a date-time""" + try: # most likely a datetime + tt = obj.timetuple() + + try: + ms = obj.microsecond + except: + ms = 0 + return self.ComDateFromTuple(tt, ms) + except: # might be a tuple + try: + return self.ComDateFromTuple(obj) + except: # try an mxdate + try: + return obj.COMDate() + except: + raise ValueError('Cannot convert "%s" to COMdate.' % repr(obj)) + + def ComDateFromTuple(self, t, microseconds=0): + d = datetime.date(t[0], t[1], t[2]) + integerPart = d.toordinal() - self._ordinal_1899_12_31 + ms = (t[3] * 3600 + t[4] * 60 + t[5]) * 1000000 + microseconds + fractPart = float(ms) / 86400000000.0 + return integerPart + fractPart + + def DateObjectFromCOMDate(self, comDate): + "Returns an object of the wanted type from a ComDate" + raise NotImplementedError # "Abstract class" + + def Date(self, year, month, day): + "This function constructs an object holding a date value." + raise NotImplementedError # "Abstract class" + + def Time(self, hour, minute, second): + "This function constructs an object holding a time value." + raise NotImplementedError # "Abstract class" + + def Timestamp(self, year, month, day, hour, minute, second): + "This function constructs an object holding a time stamp value." + raise NotImplementedError # "Abstract class" + # all purpose date to ISO format converter + + def DateObjectToIsoFormatString(self, obj): + "This function should return a string in the format 'YYYY-MM-dd HH:MM:SS:ms' (ms optional)" + try: # most likely, a datetime.datetime + s = obj.isoformat(" ") + except (TypeError, AttributeError): + if isinstance(obj, datetime.date): + s = obj.isoformat() + " 00:00:00" # return exact midnight + else: + try: # maybe it has a strftime method, like mx + s = obj.strftime("%Y-%m-%d %H:%M:%S") + except AttributeError: + try: # but may be time.struct_time + s = time.strftime("%Y-%m-%d %H:%M:%S", obj) + except: + raise ValueError('Cannot convert "%s" to isoformat' % repr(obj)) + return s + + +# -- Optional: if mx extensions are installed you may use mxDateTime ---- +try: + import mx.DateTime + + mxDateTime = True +except: + mxDateTime = False +if mxDateTime: + + class mxDateTimeConverter(TimeConverter): # used optionally if installed + def __init__(self): + TimeConverter.__init__(self) + self.types.add(type(mx.DateTime)) + + def DateObjectFromCOMDate(self, comDate): + return mx.DateTime.DateTimeFromCOMDate(comDate) + + def Date(self, year, month, day): + return mx.DateTime.Date(year, month, day) + + def Time(self, hour, minute, second): + return mx.DateTime.Time(hour, minute, second) + + def Timestamp(self, year, month, day, hour, minute, second): + return mx.DateTime.Timestamp(year, month, day, hour, minute, second) + +else: + + class mxDateTimeConverter(TimeConverter): + pass # if no mx is installed + + +class pythonDateTimeConverter(TimeConverter): # standard since Python 2.3 + def __init__(self): + TimeConverter.__init__(self) + + def DateObjectFromCOMDate(self, comDate): + if isinstance(comDate, datetime.datetime): + odn = comDate.toordinal() + tim = comDate.time() + new = datetime.datetime.combine(datetime.datetime.fromordinal(odn), tim) + return new + # return comDate.replace(tzinfo=None) # make non aware + elif isinstance(comDate, DateTime): + fComDate = comDate.ToOADate() # ironPython clr Date/Time + else: + fComDate = float(comDate) # ComDate is number of days since 1899-12-31 + integerPart = int(fComDate) + floatpart = fComDate - integerPart + ##if floatpart == 0.0: + ## return datetime.date.fromordinal(integerPart + self._ordinal_1899_12_31) + dte = datetime.datetime.fromordinal( + integerPart + self._ordinal_1899_12_31 + ) + datetime.timedelta(milliseconds=floatpart * 86400000) + # millisecondsperday=86400000 # 24*60*60*1000 + return dte + + def Date(self, year, month, day): + return datetime.date(year, month, day) + + def Time(self, hour, minute, second): + return datetime.time(hour, minute, second) + + def Timestamp(self, year, month, day, hour, minute, second): + return datetime.datetime(year, month, day, hour, minute, second) + + +class pythonTimeConverter(TimeConverter): # the old, ?nix type date and time + def __init__(self): # caution: this Class gets confised by timezones and DST + TimeConverter.__init__(self) + self.types.add(time.struct_time) + + def DateObjectFromCOMDate(self, comDate): + "Returns ticks since 1970" + if isinstance(comDate, datetime.datetime): + return comDate.timetuple() + elif isinstance(comDate, DateTime): # ironPython clr date/time + fcomDate = comDate.ToOADate() + else: + fcomDate = float(comDate) + secondsperday = 86400 # 24*60*60 + # ComDate is number of days since 1899-12-31, gmtime epoch is 1970-1-1 = 25569 days + t = time.gmtime(secondsperday * (fcomDate - 25569.0)) + return t # year,month,day,hour,minute,second,weekday,julianday,daylightsaving=t + + def Date(self, year, month, day): + return self.Timestamp(year, month, day, 0, 0, 0) + + def Time(self, hour, minute, second): + return time.gmtime((hour * 60 + minute) * 60 + second) + + def Timestamp(self, year, month, day, hour, minute, second): + return time.localtime( + time.mktime((year, month, day, hour, minute, second, 0, 0, -1)) + ) + + +base_dateconverter = pythonDateTimeConverter() + +# ------ DB API required module attributes --------------------- +threadsafety = 1 # TODO -- find out whether this module is actually BETTER than 1. + +apilevel = "2.0" # String constant stating the supported DB API level. + +paramstyle = "qmark" # the default parameter style + +# ------ control for an extension which may become part of DB API 3.0 --- +accepted_paramstyles = ("qmark", "named", "format", "pyformat", "dynamic") + +# ------------------------------------------------------------------------------------------ +# define similar types for generic conversion routines +adoIntegerTypes = ( + adc.adInteger, + adc.adSmallInt, + adc.adTinyInt, + adc.adUnsignedInt, + adc.adUnsignedSmallInt, + adc.adUnsignedTinyInt, + adc.adBoolean, + adc.adError, +) # max 32 bits +adoRowIdTypes = (adc.adChapter,) # v2.1 Rose +adoLongTypes = (adc.adBigInt, adc.adFileTime, adc.adUnsignedBigInt) +adoExactNumericTypes = ( + adc.adDecimal, + adc.adNumeric, + adc.adVarNumeric, + adc.adCurrency, +) # v2.3 Cole +adoApproximateNumericTypes = (adc.adDouble, adc.adSingle) # v2.1 Cole +adoStringTypes = ( + adc.adBSTR, + adc.adChar, + adc.adLongVarChar, + adc.adLongVarWChar, + adc.adVarChar, + adc.adVarWChar, + adc.adWChar, +) +adoBinaryTypes = (adc.adBinary, adc.adLongVarBinary, adc.adVarBinary) +adoDateTimeTypes = (adc.adDBTime, adc.adDBTimeStamp, adc.adDate, adc.adDBDate) +adoRemainingTypes = ( + adc.adEmpty, + adc.adIDispatch, + adc.adIUnknown, + adc.adPropVariant, + adc.adArray, + adc.adUserDefined, + adc.adVariant, + adc.adGUID, +) + + +# this class is a trick to determine whether a type is a member of a related group of types. see PEP notes +class DBAPITypeObject(object): + def __init__(self, valuesTuple): + self.values = frozenset(valuesTuple) + + def __eq__(self, other): + return other in self.values + + def __ne__(self, other): + return other not in self.values + + +"""This type object is used to describe columns in a database that are string-based (e.g. CHAR). """ +STRING = DBAPITypeObject(adoStringTypes) + +"""This type object is used to describe (long) binary columns in a database (e.g. LONG, RAW, BLOBs). """ +BINARY = DBAPITypeObject(adoBinaryTypes) + +"""This type object is used to describe numeric columns in a database. """ +NUMBER = DBAPITypeObject( + adoIntegerTypes + adoLongTypes + adoExactNumericTypes + adoApproximateNumericTypes +) + +"""This type object is used to describe date/time columns in a database. """ + +DATETIME = DBAPITypeObject(adoDateTimeTypes) +"""This type object is used to describe the "Row ID" column in a database. """ +ROWID = DBAPITypeObject(adoRowIdTypes) + +OTHER = DBAPITypeObject(adoRemainingTypes) + +# ------- utilities for translating python data types to ADO data types --------------------------------- +typeMap = { + memoryViewType: adc.adVarBinary, + float: adc.adDouble, + type(None): adc.adEmpty, + str: adc.adBSTR, + bool: adc.adBoolean, # v2.1 Cole + decimal.Decimal: adc.adDecimal, + int: adc.adBigInt, + bytes: adc.adVarBinary, +} + + +def pyTypeToADOType(d): + tp = type(d) + try: + return typeMap[tp] + except KeyError: # The type was not defined in the pre-computed Type table + from . import dateconverter + + if ( + tp in dateconverter.types + ): # maybe it is one of our supported Date/Time types + return adc.adDate + # otherwise, attempt to discern the type by probing the data object itself -- to handle duck typing + if isinstance(d, StringTypes): + return adc.adBSTR + if isinstance(d, numbers.Integral): + return adc.adBigInt + if isinstance(d, numbers.Real): + return adc.adDouble + raise DataError('cannot convert "%s" (type=%s) to ADO' % (repr(d), tp)) + + +# # # # # # # # # # # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# functions to convert database values to Python objects +# ------------------------------------------------------------------------ +# variant type : function converting variant to Python value +def variantConvertDate(v): + from . import dateconverter # this function only called when adodbapi is running + + return dateconverter.DateObjectFromCOMDate(v) + + +def cvtString(variant): # use to get old action of adodbapi v1 if desired + if onIronPython: + try: + return variant.ToString() + except: + pass + return str(variant) + + +def cvtDecimal(variant): # better name + return _convertNumberWithCulture(variant, decimal.Decimal) + + +def cvtNumeric(variant): # older name - don't break old code + return cvtDecimal(variant) + + +def cvtFloat(variant): + return _convertNumberWithCulture(variant, float) + + +def _convertNumberWithCulture(variant, f): + try: + return f(variant) + except (ValueError, TypeError, decimal.InvalidOperation): + try: + europeVsUS = str(variant).replace(",", ".") + return f(europeVsUS) + except (ValueError, TypeError, decimal.InvalidOperation): + pass + + +def cvtInt(variant): + return int(variant) + + +def cvtLong(variant): # only important in old versions where long and int differ + return int(variant) + + +def cvtBuffer(variant): + return bytes(variant) + + +def cvtUnicode(variant): + return str(variant) + + +def identity(x): + return x + + +def cvtUnusual(variant): + if verbose > 1: + sys.stderr.write("Conversion called for Unusual data=%s\n" % repr(variant)) + if isinstance(variant, DateTime): # COMdate or System.Date + from .adodbapi import ( # this will only be called when adodbapi is in use, and very rarely + dateconverter, + ) + + return dateconverter.DateObjectFromCOMDate(variant) + return variant # cannot find conversion function -- just give the data to the user + + +def convert_to_python(variant, func): # convert DB value into Python value + if isinstance(variant, NullTypes): # IronPython Null or None + return None + return func(variant) # call the appropriate conversion function + + +class MultiMap(dict): # builds a dictionary from {(sequence,of,keys) : function} + """A dictionary of ado.type : function -- but you can set multiple items by passing a sequence of keys""" + + # useful for defining conversion functions for groups of similar data types. + def __init__(self, aDict): + for k, v in list(aDict.items()): + self[k] = v # we must call __setitem__ + + def __setitem__(self, adoType, cvtFn): + "set a single item, or a whole sequence of items" + try: # user passed us a sequence, set them individually + for type in adoType: + dict.__setitem__(self, type, cvtFn) + except TypeError: # a single value fails attempt to iterate + dict.__setitem__(self, adoType, cvtFn) + + +# initialize variantConversions dictionary used to convert SQL to Python +# this is the dictionary of default conversion functions, built by the class above. +# this becomes a class attribute for the Connection, and that attribute is used +# to build the list of column conversion functions for the Cursor +variantConversions = MultiMap( + { + adoDateTimeTypes: variantConvertDate, + adoApproximateNumericTypes: cvtFloat, + adoExactNumericTypes: cvtDecimal, # use to force decimal rather than unicode + adoLongTypes: cvtLong, + adoIntegerTypes: cvtInt, + adoRowIdTypes: cvtInt, + adoStringTypes: identity, + adoBinaryTypes: cvtBuffer, + adoRemainingTypes: cvtUnusual, + } +) + +# # # # # classes to emulate the result of cursor.fetchxxx() as a sequence of sequences # # # # # +# "an ENUM of how my low level records are laid out" +RS_WIN_32, RS_ARRAY, RS_REMOTE = list(range(1, 4)) + + +class SQLrow(object): # a single database row + # class to emulate a sequence, so that a column may be retrieved by either number or name + def __init__(self, rows, index): # "rows" is an _SQLrows object, index is which row + self.rows = rows # parent 'fetch' container object + self.index = index # my row number within parent + + def __getattr__(self, name): # used for row.columnName type of value access + try: + return self._getValue(self.rows.columnNames[name.lower()]) + except KeyError: + raise AttributeError('Unknown column name "{}"'.format(name)) + + def _getValue(self, key): # key must be an integer + if ( + self.rows.recordset_format == RS_ARRAY + ): # retrieve from two-dimensional array + v = self.rows.ado_results[key, self.index] + elif self.rows.recordset_format == RS_REMOTE: + v = self.rows.ado_results[self.index][key] + else: # pywin32 - retrieve from tuple of tuples + v = self.rows.ado_results[key][self.index] + if self.rows.converters is NotImplemented: + return v + return convert_to_python(v, self.rows.converters[key]) + + def __len__(self): + return self.rows.numberOfColumns + + def __getitem__(self, key): # used for row[key] type of value access + if isinstance(key, int): # normal row[1] designation + try: + return self._getValue(key) + except IndexError: + raise + if isinstance(key, slice): + indices = key.indices(self.rows.numberOfColumns) + vl = [self._getValue(i) for i in range(*indices)] + return tuple(vl) + try: + return self._getValue( + self.rows.columnNames[key.lower()] + ) # extension row[columnName] designation + except (KeyError, TypeError): + er, st, tr = sys.exc_info() + raise er( + 'No such key as "%s" in %s' % (repr(key), self.__repr__()) + ).with_traceback(tr) + + def __iter__(self): + return iter(self.__next__()) + + def __next__(self): + for n in range(self.rows.numberOfColumns): + yield self._getValue(n) + + def __repr__(self): # create a human readable representation + taglist = sorted(list(self.rows.columnNames.items()), key=lambda x: x[1]) + s = "" + + def __str__(self): # create a pretty human readable representation + return str( + tuple(str(self._getValue(i)) for i in range(self.rows.numberOfColumns)) + ) + + # TO-DO implement pickling an SQLrow directly + # def __getstate__(self): return self.__dict__ + # def __setstate__(self, d): self.__dict__.update(d) + # which basically tell pickle to treat your class just like a normal one, + # taking self.__dict__ as representing the whole of the instance state, + # despite the existence of the __getattr__. + # # # # + + +class SQLrows(object): + # class to emulate a sequence for multiple rows using a container object + def __init__(self, ado_results, numberOfRows, cursor): + self.ado_results = ado_results # raw result of SQL get + try: + self.recordset_format = cursor.recordset_format + self.numberOfColumns = cursor.numberOfColumns + self.converters = cursor.converters + self.columnNames = cursor.columnNames + except AttributeError: + self.recordset_format = RS_ARRAY + self.numberOfColumns = 0 + self.converters = [] + self.columnNames = {} + self.numberOfRows = numberOfRows + + def __len__(self): + return self.numberOfRows + + def __getitem__(self, item): # used for row or row,column access + if not self.ado_results: + return [] + if isinstance(item, slice): # will return a list of row objects + indices = item.indices(self.numberOfRows) + return [SQLrow(self, k) for k in range(*indices)] + elif isinstance(item, tuple) and len(item) == 2: + # d = some_rowsObject[i,j] will return a datum from a two-dimension address + i, j = item + if not isinstance(j, int): + try: + j = self.columnNames[j.lower()] # convert named column to numeric + except KeyError: + raise KeyError('adodbapi: no such column name as "%s"' % repr(j)) + if self.recordset_format == RS_ARRAY: # retrieve from two-dimensional array + v = self.ado_results[j, i] + elif self.recordset_format == RS_REMOTE: + v = self.ado_results[i][j] + else: # pywin32 - retrieve from tuple of tuples + v = self.ado_results[j][i] + if self.converters is NotImplemented: + return v + return convert_to_python(v, self.converters[j]) + else: + row = SQLrow(self, item) # new row descriptor + return row + + def __iter__(self): + return iter(self.__next__()) + + def __next__(self): + for n in range(self.numberOfRows): + row = SQLrow(self, n) + yield row + # # # # # + + # # # # # functions to re-format SQL requests to other paramstyle requirements # # # # # # # # # # + + +def changeNamedToQmark( + op, +): # convert from 'named' paramstyle to ADO required '?'mark parameters + outOp = "" + outparms = [] + chunks = op.split( + "'" + ) # quote all literals -- odd numbered list results are literals. + inQuotes = False + for chunk in chunks: + if inQuotes: # this is inside a quote + if chunk == "": # double apostrophe to quote one apostrophe + outOp = outOp[:-1] # so take one away + else: + outOp += "'" + chunk + "'" # else pass the quoted string as is. + else: # is SQL code -- look for a :namedParameter + while chunk: # some SQL string remains + sp = chunk.split(":", 1) + outOp += sp[0] # concat the part up to the : + s = "" + try: + chunk = sp[1] + except IndexError: + chunk = None + if chunk: # there was a parameter - parse it out + i = 0 + c = chunk[0] + while c.isalnum() or c == "_": + i += 1 + try: + c = chunk[i] + except IndexError: + break + s = chunk[:i] + chunk = chunk[i:] + if s: + outparms.append(s) # list the parameters in order + outOp += "?" # put in the Qmark + inQuotes = not inQuotes + return outOp, outparms + + +def changeFormatToQmark( + op, +): # convert from 'format' paramstyle to ADO required '?'mark parameters + outOp = "" + outparams = [] + chunks = op.split( + "'" + ) # quote all literals -- odd numbered list results are literals. + inQuotes = False + for chunk in chunks: + if inQuotes: + if ( + outOp != "" and chunk == "" + ): # he used a double apostrophe to quote one apostrophe + outOp = outOp[:-1] # so take one away + else: + outOp += "'" + chunk + "'" # else pass the quoted string as is. + else: # is SQL code -- look for a %s parameter + if "%(" in chunk: # ugh! pyformat! + while chunk: # some SQL string remains + sp = chunk.split("%(", 1) + outOp += sp[0] # concat the part up to the % + if len(sp) > 1: + try: + s, chunk = sp[1].split(")s", 1) # find the ')s' + except ValueError: + raise ProgrammingError( + 'Pyformat SQL has incorrect format near "%s"' % chunk + ) + outparams.append(s) + outOp += "?" # put in the Qmark + else: + chunk = None + else: # proper '%s' format + sp = chunk.split("%s") # make each %s + outOp += "?".join(sp) # into ? + inQuotes = not inQuotes # every other chunk is a quoted string + return outOp, outparams diff --git a/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_print.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_print.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..024c590ca5b2f2fa30d1f22fe09480544ac09694 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_print.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_table_names.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_table_names.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aea92cdfbc34f88e652ce3f815d4c6fb5893d89 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/db_table_names.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_read.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_read.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ace2fc72b43a0969de986144b8affc9771458c71 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_read.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_write.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_write.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..215a7bc4b2cbd42b17c09c5add6ebca93c2ccbdc Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/examples/__pycache__/xls_write.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/examples/db_print.py b/MLPY/Lib/site-packages/adodbapi/examples/db_print.py new file mode 100644 index 0000000000000000000000000000000000000000..c0eb83ee4dbae3b579dbfd63ccfc54748b6320d3 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/examples/db_print.py @@ -0,0 +1,72 @@ +""" db_print.py -- a simple demo for ADO database reads.""" + +import sys + +import adodbapi.ado_consts as adc + +cmd_args = ("filename", "table_name") +if "help" in sys.argv: + print("possible settings keywords are:", cmd_args) + sys.exit() + +kw_args = {} # pick up filename and proxy address from command line (optionally) +for arg in sys.argv: + s = arg.split("=") + if len(s) > 1: + if s[0] in cmd_args: + kw_args[s[0]] = s[1] + +kw_args.setdefault( + "filename", "test.mdb" +) # assumes server is running from examples folder +kw_args.setdefault("table_name", "Products") # the name of the demo table + +# the server needs to select the provider based on his Python installation +provider_switch = ["provider", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"] + +# ------------------------ START HERE ------------------------------------- +# create the connection +constr = "Provider=%(provider)s;Data Source=%(filename)s" +import adodbapi as db + +con = db.connect(constr, kw_args, macro_is64bit=provider_switch) + +if kw_args["table_name"] == "?": + print("The tables in your database are:") + for name in con.get_table_names(): + print(name) +else: + # make a cursor on the connection + with con.cursor() as c: + # run an SQL statement on the cursor + sql = "select * from %s" % kw_args["table_name"] + print('performing query="%s"' % sql) + c.execute(sql) + + # check the results + print( + 'result rowcount shows as= %d. (Note: -1 means "not known")' % (c.rowcount,) + ) + print("") + print("result data description is:") + print(" NAME Type DispSize IntrnlSz Prec Scale Null?") + for d in c.description: + print( + ("%16s %-12s %8s %8d %4d %5d %s") + % (d[0], adc.adTypeNames[d[1]], d[2], d[3], d[4], d[5], bool(d[6])) + ) + print("") + print("str() of first five records are...") + + # get the results + db = c.fetchmany(5) + + # print them + for rec in db: + print(rec) + + print("") + print("repr() of next row is...") + print(repr(c.fetchone())) + print("") +con.close() diff --git a/MLPY/Lib/site-packages/adodbapi/examples/db_table_names.py b/MLPY/Lib/site-packages/adodbapi/examples/db_table_names.py new file mode 100644 index 0000000000000000000000000000000000000000..2d7bf9df0d3eb89f46a48e2cc942a8eb9e7401b2 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/examples/db_table_names.py @@ -0,0 +1,20 @@ +""" db_table_names.py -- a simple demo for ADO database table listing.""" +import sys + +import adodbapi + +try: + databasename = sys.argv[1] +except IndexError: + databasename = "test.mdb" + +provider = ["prv", "Microsoft.ACE.OLEDB.12.0", "Microsoft.Jet.OLEDB.4.0"] +constr = "Provider=%(prv)s;Data Source=%(db)s" + +# create the connection +con = adodbapi.connect(constr, db=databasename, macro_is64bit=provider) + +print("Table names in= %s" % databasename) + +for table in con.get_table_names(): + print(table) diff --git a/MLPY/Lib/site-packages/adodbapi/examples/xls_read.py b/MLPY/Lib/site-packages/adodbapi/examples/xls_read.py new file mode 100644 index 0000000000000000000000000000000000000000..10bcc57e6bb77db9d10eddbf6d2ae71deb4eb601 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/examples/xls_read.py @@ -0,0 +1,41 @@ +import sys + +import adodbapi + +try: + import adodbapi.is64bit as is64bit + + is64 = is64bit.Python() +except ImportError: + is64 = False + +if is64: + driver = "Microsoft.ACE.OLEDB.12.0" +else: + driver = "Microsoft.Jet.OLEDB.4.0" +extended = 'Extended Properties="Excel 8.0;HDR=Yes;IMEX=1;"' + +try: # first command line argument will be xls file name -- default to the one written by xls_write.py + filename = sys.argv[1] +except IndexError: + filename = "xx.xls" + +constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended) + +conn = adodbapi.connect(constr) + +try: # second command line argument will be worksheet name -- default to first worksheet + sheet = sys.argv[2] +except IndexError: + # use ADO feature to get the name of the first worksheet + sheet = conn.get_table_names()[0] + +print("Shreadsheet=%s Worksheet=%s" % (filename, sheet)) +print("------------------------------------------------------------") +crsr = conn.cursor() +sql = "SELECT * from [%s]" % sheet +crsr.execute(sql) +for row in crsr.fetchmany(10): + print(repr(row)) +crsr.close() +conn.close() diff --git a/MLPY/Lib/site-packages/adodbapi/examples/xls_write.py b/MLPY/Lib/site-packages/adodbapi/examples/xls_write.py new file mode 100644 index 0000000000000000000000000000000000000000..38baefd8122fe0dfe83488ab317e5e51e57fd595 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/examples/xls_write.py @@ -0,0 +1,41 @@ +import datetime + +import adodbapi + +try: + import adodbapi.is64bit as is64bit + + is64 = is64bit.Python() +except ImportError: + is64 = False # in case the user has an old version of adodbapi +if is64: + driver = "Microsoft.ACE.OLEDB.12.0" +else: + driver = "Microsoft.Jet.OLEDB.4.0" +filename = "xx.xls" # file will be created if it does not exist +extended = 'Extended Properties="Excel 8.0;Readonly=False;"' + +constr = "Provider=%s;Data Source=%s;%s" % (driver, filename, extended) + +conn = adodbapi.connect(constr) +with conn: # will auto commit if no errors + with conn.cursor() as crsr: + try: + crsr.execute("drop table SheetOne") + except: + pass # just is case there is one already there + + # create the sheet and the header row and set the types for the columns + crsr.execute( + "create table SheetOne (Name varchar, Rank varchar, SrvcNum integer, Weight float, Birth date)" + ) + + sql = "INSERT INTO SheetOne (name, rank , srvcnum, weight, birth) values (?,?,?,?,?)" + + data = ("Mike Murphy", "SSG", 123456789, 167.8, datetime.date(1922, 12, 27)) + crsr.execute(sql, data) # write the first row of data + crsr.execute( + sql, ["John Jones", "Pvt", 987654321, 140.0, datetime.date(1921, 7, 4)] + ) # another row of data +conn.close() +print("Created spreadsheet=%s worksheet=%s" % (filename, "SheetOne")) diff --git a/MLPY/Lib/site-packages/adodbapi/is64bit.py b/MLPY/Lib/site-packages/adodbapi/is64bit.py new file mode 100644 index 0000000000000000000000000000000000000000..911c61931e2b1beb3e086d3ad3aa71df527592ae --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/is64bit.py @@ -0,0 +1,41 @@ +"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version""" +import sys + + +def Python(): + if sys.platform == "cli": # IronPython + import System + + return System.IntPtr.Size == 8 + else: + try: + return sys.maxsize > 2147483647 + except AttributeError: + return sys.maxint > 2147483647 + + +def os(): + import platform + + pm = platform.machine() + if pm != ".." and pm.endswith("64"): # recent Python (not Iron) + return True + else: + import os + + if "PROCESSOR_ARCHITEW6432" in os.environ: + return True # 32 bit program running on 64 bit Windows + try: + return os.environ["PROCESSOR_ARCHITECTURE"].endswith( + "64" + ) # 64 bit Windows 64 bit program + except (IndexError, KeyError): + pass # not Windows + try: + return "64" in platform.architecture()[0] # this often works in Linux + except: + return False # is an older version of Python, assume also an older os (best we can guess) + + +if __name__ == "__main__": + print("is64bit.Python() =", Python(), "is64bit.os() =", os()) diff --git a/MLPY/Lib/site-packages/adodbapi/license.txt b/MLPY/Lib/site-packages/adodbapi/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..c255f4aae1dc73f6e394448e9492f0beebc7fc5d --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/license.txt @@ -0,0 +1,506 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + diff --git a/MLPY/Lib/site-packages/adodbapi/process_connect_string.py b/MLPY/Lib/site-packages/adodbapi/process_connect_string.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3f9e7afa9a96a536397ec400eb4162bf226720 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/process_connect_string.py @@ -0,0 +1,144 @@ +""" a clumsy attempt at a macro language to let the programmer execute code on the server (ex: determine 64bit)""" +from . import is64bit as is64bit + + +def macro_call(macro_name, args, kwargs): + """allow the programmer to perform limited processing on the server by passing macro names and args + + :new_key - the key name the macro will create + :args[0] - macro name + :args[1:] - any arguments + :code - the value of the keyword item + :kwargs - the connection keyword dictionary. ??key has been removed + --> the value to put in for kwargs['name'] = value + """ + if isinstance(args, (str, str)): + args = [ + args + ] # the user forgot to pass a sequence, so make a string into args[0] + new_key = args[0] + try: + if macro_name == "is64bit": + if is64bit.Python(): # if on 64 bit Python + return new_key, args[1] # return first argument + else: + try: + return new_key, args[2] # else return second argument (if defined) + except IndexError: + return new_key, "" # else return blank + + elif ( + macro_name == "getuser" + ): # get the name of the user the server is logged in under + if not new_key in kwargs: + import getpass + + return new_key, getpass.getuser() + + elif macro_name == "getnode": # get the name of the computer running the server + import platform + + try: + return new_key, args[1] % platform.node() + except IndexError: + return new_key, platform.node() + + elif macro_name == "getenv": # expand the server's environment variable args[1] + try: + dflt = args[2] # if not found, default from args[2] + except IndexError: # or blank + dflt = "" + return new_key, os.environ.get(args[1], dflt) + + elif macro_name == "auto_security": + if ( + not "user" in kwargs or not kwargs["user"] + ): # missing, blank, or Null username + return new_key, "Integrated Security=SSPI" + return new_key, "User ID=%(user)s; Password=%(password)s" % kwargs + + elif ( + macro_name == "find_temp_test_path" + ): # helper function for testing ado operation -- undocumented + import os + import tempfile + + return new_key, os.path.join( + tempfile.gettempdir(), "adodbapi_test", args[1] + ) + + raise ValueError("Unknown connect string macro=%s" % macro_name) + except: + raise ValueError("Error in macro processing %s %s" % (macro_name, repr(args))) + + +def process( + args, kwargs, expand_macros=False +): # --> connection string with keyword arguments processed. + """attempts to inject arguments into a connection string using Python "%" operator for strings + + co: adodbapi connection object + args: positional parameters from the .connect() call + kvargs: keyword arguments from the .connect() call + """ + try: + dsn = args[0] + except IndexError: + dsn = None + if isinstance( + dsn, dict + ): # as a convenience the first argument may be django settings + kwargs.update(dsn) + elif ( + dsn + ): # the connection string is passed to the connection as part of the keyword dictionary + kwargs["connection_string"] = dsn + try: + a1 = args[1] + except IndexError: + a1 = None + # historically, the second positional argument might be a timeout value + if isinstance(a1, int): + kwargs["timeout"] = a1 + # if the second positional argument is a string, then it is user + elif isinstance(a1, str): + kwargs["user"] = a1 + # if the second positional argument is a dictionary, use it as keyword arguments, too + elif isinstance(a1, dict): + kwargs.update(a1) + try: + kwargs["password"] = args[2] # the third positional argument is password + kwargs["host"] = args[3] # the fourth positional argument is host name + kwargs["database"] = args[4] # the fifth positional argument is database name + except IndexError: + pass + + # make sure connection string is defined somehow + if not "connection_string" in kwargs: + try: # perhaps 'dsn' was defined + kwargs["connection_string"] = kwargs["dsn"] + except KeyError: + try: # as a last effort, use the "host" keyword + kwargs["connection_string"] = kwargs["host"] + except KeyError: + raise TypeError("Must define 'connection_string' for ado connections") + if expand_macros: + for kwarg in list(kwargs.keys()): + if kwarg.startswith("macro_"): # If a key defines a macro + macro_name = kwarg[6:] # name without the "macro_" + macro_code = kwargs.pop( + kwarg + ) # we remove the macro_key and get the code to execute + new_key, rslt = macro_call( + macro_name, macro_code, kwargs + ) # run the code in the local context + kwargs[new_key] = rslt # put the result back in the keywords dict + # special processing for PyRO IPv6 host address + try: + s = kwargs["proxy_host"] + if ":" in s: # it is an IPv6 address + if s[0] != "[": # is not surrounded by brackets + kwargs["proxy_host"] = s.join(("[", "]")) # put it in brackets + except KeyError: + pass + return kwargs diff --git a/MLPY/Lib/site-packages/adodbapi/readme.txt b/MLPY/Lib/site-packages/adodbapi/readme.txt new file mode 100644 index 0000000000000000000000000000000000000000..02bb620f4d1d4506c05cbd49fea8f050506dd9ba --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/readme.txt @@ -0,0 +1,92 @@ +Project +------- +adodbapi + +A Python DB-API 2.0 (PEP-249) module that makes it easy to use Microsoft ADO +for connecting with databases and other data sources +using either CPython or IronPython. + +Home page: + +Features: +* 100% DB-API 2.0 (PEP-249) compliant (including most extensions and recommendations). +* Includes pyunit testcases that describe how to use the module. +* Fully implemented in Python. -- runs in Python 2.5+ Python 3.0+ and IronPython 2.6+ +* Licensed under the LGPL license, which means that it can be used freely even in commercial programs subject to certain restrictions. +* The user can choose between paramstyles: 'qmark' 'named' 'format' 'pyformat' 'dynamic' +* Supports data retrieval by column name e.g.: + for row in myCurser.execute("select name,age from students"): + print("Student", row.name, "is", row.age, "years old.") +* Supports user-definable system-to-Python data conversion functions (selected by ADO data type, or by column) + +Prerequisites: +* C Python 2.7 or 3.5 or higher + and pywin32 (Mark Hammond's python for windows extensions.) +or + Iron Python 2.7 or higher. (works in IPy2.0 for all data types except BUFFER) + +Installation: +* (C-Python on Windows): Install pywin32 ("pip install pywin32") which includes adodbapi. +* (IronPython on Windows): Download adodbapi from http://sf.net/projects/adodbapi. Unpack the zip. + Open a command window as an administrator. CD to the folder containing the unzipped files. + Run "setup.py install" using the IronPython of your choice. + +NOTE: ........... +If you do not like the new default operation of returning Numeric columns as decimal.Decimal, +you can select other options by the user defined conversion feature. +Try: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtString +or: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = adodbapi.apibase.cvtFloat +or: + adodbapi.apibase.variantConversions[adodbapi.ado_consts.adNumeric] = write_your_own_convertion_function + ............ +notes for 2.6.2: + The definitive source has been moved to https://github.com/mhammond/pywin32/tree/master/adodbapi. + Remote has proven too hard to configure and test with Pyro4. I am moving it to unsupported status + until I can change to a different connection method. +whats new in version 2.6 + A cursor.prepare() method and support for prepared SQL statements. + Lots of refactoring, especially of the Remote and Server modules (still to be treated as Beta code). + The quick start document 'quick_reference.odt' will export as a nice-looking pdf. + Added paramstyles 'pyformat' and 'dynamic'. If your 'paramstyle' is 'named' you _must_ pass a dictionary of + parameters to your .execute() method. If your 'paramstyle' is 'format' 'pyformat' or 'dynamic', you _may_ + pass a dictionary of parameters -- provided your SQL operation string is formatted correctly. + +whats new in version 2.5 + Remote module: (works on Linux!) allows a Windows computer to serve ADO databases via PyRO + Server module: PyRO server for ADO. Run using a command like= C:>python -m adodbapi.server + (server has simple connection string macros: is64bit, getuser, sql_provider, auto_security) + Brief documentation included. See adodbapi/examples folder adodbapi.rtf + New connection method conn.get_table_names() --> list of names of tables in database + + Vastly refactored. Data conversion things have been moved to the new adodbapi.apibase module. + Many former module-level attributes are now class attributes. (Should be more thread-safe) + Connection objects are now context managers for transactions and will commit or rollback. + Cursor objects are context managers and will automatically close themselves. + Autocommit can be switched on and off. + Keyword and positional arguments on the connect() method work as documented in PEP 249. + Keyword arguments from the connect call can be formatted into the connection string. + New keyword arguments defined, such as: autocommit, paramstyle, remote_proxy, remote_port. + *** Breaking change: variantConversion lookups are simplified: the following will raise KeyError: + oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes] + Refactor as: oldconverter=adodbapi.variantConversions[adodbapi.adoStringTypes[0]] + +License +------- +LGPL, see http://www.opensource.org/licenses/lgpl-license.php + +Documentation +------------- + +Look at adodbapi/quick_reference.md +http://www.python.org/topics/database/DatabaseAPI-2.0.html +read the examples in adodbapi/examples +and look at the test cases in adodbapi/test directory. + +Mailing lists +------------- +The adodbapi mailing lists have been deactivated. Submit comments to the +pywin32 or IronPython mailing lists. + -- the bug tracker on sourceforge.net/projects/adodbapi may be checked, (infrequently). + -- please use: https://github.com/mhammond/pywin32/issues diff --git a/MLPY/Lib/site-packages/adodbapi/remote.py b/MLPY/Lib/site-packages/adodbapi/remote.py new file mode 100644 index 0000000000000000000000000000000000000000..ae22b5a7ea6e3c771a7f0bd5a6c3e8a26a5858ef --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/remote.py @@ -0,0 +1,634 @@ +"""adodbapi.remote - A python DB API 2.0 (PEP 249) interface to Microsoft ADO + +Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole +* http://sourceforge.net/projects/pywin32 +* http://sourceforge.net/projects/adodbapi + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + django adaptations and refactoring thanks to Adam Vandenberg + +DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/ + +This module source should run correctly in CPython versions 2.5 and later, +or IronPython version 2.7 and later, +or, after running through 2to3.py, CPython 3.0 or later. +""" + +__version__ = "2.6.0.4" +version = "adodbapi.remote v" + __version__ + +import array +import datetime +import os +import sys +import time + +# Pyro4 is required for server and remote operation --> https://pypi.python.org/pypi/Pyro4/ +try: + import Pyro4 +except ImportError: + print('* * * Sorry, server operation requires Pyro4. Please "pip import" it.') + exit(11) + +import adodbapi +import adodbapi.apibase as api +import adodbapi.process_connect_string +from adodbapi.apibase import ProgrammingError + +_BaseException = api._BaseException + +sys.excepthook = Pyro4.util.excepthook +Pyro4.config.PREFER_IP_VERSION = 0 # allow system to prefer IPv6 +Pyro4.config.COMMTIMEOUT = 40.0 # a bit longer than the default SQL server Gtimeout +Pyro4.config.SERIALIZER = "pickle" + +try: + verbose = int(os.environ["ADODBAPI_VERBOSE"]) +except: + verbose = False +if verbose: + print(version) + +# --- define objects to smooth out Python3 <-> Python 2.x differences +unicodeType = str # this line will be altered by 2to3.py to '= str' +longType = int # this line will be altered by 2to3.py to '= int' +StringTypes = str +makeByteBuffer = bytes +memoryViewType = memoryview + +# ----------------------------------------------------------- +# conversion functions mandated by PEP 249 +Binary = makeByteBuffer # override the function from apibase.py + + +def Date(year, month, day): + return datetime.date(year, month, day) # dateconverter.Date(year,month,day) + + +def Time(hour, minute, second): + return datetime.time(hour, minute, second) # dateconverter.Time(hour,minute,second) + + +def Timestamp(year, month, day, hour, minute, second): + return datetime.datetime(year, month, day, hour, minute, second) + + +def DateFromTicks(ticks): + return Date(*time.gmtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + return Time(*time.gmtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + return Timestamp(*time.gmtime(ticks)[:6]) + + +def connect(*args, **kwargs): # --> a remote db-api connection object + """Create and open a remote db-api database connection object""" + # process the argument list the programmer gave us + kwargs = adodbapi.process_connect_string.process(args, kwargs) + # the "proxy_xxx" keys tell us where to find the PyRO proxy server + kwargs.setdefault( + "pyro_connection", "PYRO:ado.connection@%(proxy_host)s:%(proxy_port)s" + ) + if not "proxy_port" in kwargs: + try: + pport = os.environ["PROXY_PORT"] + except KeyError: + pport = 9099 + kwargs["proxy_port"] = pport + if not "proxy_host" in kwargs or not kwargs["proxy_host"]: + try: + phost = os.environ["PROXY_HOST"] + except KeyError: + phost = "[::1]" # '127.0.0.1' + kwargs["proxy_host"] = phost + ado_uri = kwargs["pyro_connection"] % kwargs + # ask PyRO make us a remote connection object + auto_retry = 3 + while auto_retry: + try: + dispatcher = Pyro4.Proxy(ado_uri) + if "comm_timeout" in kwargs: + dispatcher._pyroTimeout = float(kwargs["comm_timeout"]) + uri = dispatcher.make_connection() + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError("Cannot create connection to=%s" % ado_uri) + + conn_uri = fix_uri(uri, kwargs) # get a host connection from the proxy server + while auto_retry: + try: + host_conn = Pyro4.Proxy( + conn_uri + ) # bring up an exclusive Pyro connection for my ADO connection + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError( + "Cannot create ADO connection object using=%s" % conn_uri + ) + if "comm_timeout" in kwargs: + host_conn._pyroTimeout = float(kwargs["comm_timeout"]) + # make a local clone + myConn = Connection() + while auto_retry: + try: + myConn.connect( + kwargs, host_conn + ) # call my connect method -- hand him the host connection + break + except Pyro4.core.errors.PyroError: + auto_retry -= 1 + if auto_retry: + time.sleep(1) + else: + raise api.DatabaseError( + "Pyro error creating connection to/thru=%s" % repr(kwargs) + ) + except _BaseException as e: + raise api.DatabaseError( + "Error creating remote connection to=%s, e=%s, %s" + % (repr(kwargs), repr(e), sys.exc_info()[2]) + ) + return myConn + + +def fix_uri(uri, kwargs): + """convert a generic pyro uri with '0.0.0.0' into the address we actually called""" + u = uri.asString() + s = u.split("[::0]") # IPv6 generic address + if len(s) == 1: # did not find one + s = u.split("0.0.0.0") # IPv4 generic address + if len(s) > 1: # found a generic + return kwargs["proxy_host"].join(s) # fill in our address for the host + return uri + + +# # # # # ----- the Class that defines a connection ----- # # # # # +class Connection(object): + # include connection attributes required by api definition. + Warning = api.Warning + Error = api.Error + InterfaceError = api.InterfaceError + DataError = api.DataError + DatabaseError = api.DatabaseError + OperationalError = api.OperationalError + IntegrityError = api.IntegrityError + InternalError = api.InternalError + NotSupportedError = api.NotSupportedError + ProgrammingError = api.ProgrammingError + # set up some class attributes + paramstyle = api.paramstyle + + @property + def dbapi(self): # a proposed db-api version 3 extension. + "Return a reference to the DBAPI module for this Connection." + return api + + def __init__(self): + self.proxy = None + self.kwargs = {} + self.errorhandler = None + self.supportsTransactions = False + self.paramstyle = api.paramstyle + self.timeout = 30 + self.cursors = {} + + def connect(self, kwargs, connection_maker): + self.kwargs = kwargs + if verbose: + print('%s attempting: "%s"' % (version, repr(kwargs))) + self.proxy = connection_maker + ##try: + ret = self.proxy.connect(kwargs) # ask the server to hook us up + ##except ImportError, e: # Pyro is trying to import pywinTypes.comerrer + ## self._raiseConnectionError(api.DatabaseError, 'Proxy cannot connect using=%s' % repr(kwargs)) + if ret is not True: + self._raiseConnectionError( + api.OperationalError, "Proxy returns error message=%s" % repr(ret) + ) + + self.supportsTransactions = self.getIndexedValue("supportsTransactions") + self.paramstyle = self.getIndexedValue("paramstyle") + self.timeout = self.getIndexedValue("timeout") + if verbose: + print("adodbapi.remote New connection at %X" % id(self)) + + def _raiseConnectionError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self, None, errorclass, errorvalue) + + def close(self): + """Close the connection now (rather than whenever __del__ is called). + + The connection will be unusable from this point forward; + an Error (or subclass) exception will be raised if any operation is attempted with the connection. + The same applies to all cursor objects trying to use the connection. + """ + for crsr in list(self.cursors.values())[ + : + ]: # copy the list, then close each one + crsr.close() + try: + """close the underlying remote Connection object""" + self.proxy.close() + if verbose: + print("adodbapi.remote Closed connection at %X" % id(self)) + object.__delattr__( + self, "proxy" + ) # future attempts to use closed cursor will be caught by __getattr__ + except Exception: + pass + + def __del__(self): + try: + self.proxy.close() + except: + pass + + def commit(self): + """Commit any pending transaction to the database. + + Note that if the database supports an auto-commit feature, + this must be initially off. An interface method may be provided to turn it back on. + Database modules that do not support transactions should implement this method with void functionality. + """ + if not self.supportsTransactions: + return + result = self.proxy.commit() + if result: + self._raiseConnectionError( + api.OperationalError, "Error during commit: %s" % result + ) + + def _rollback(self): + """In case a database does provide transactions this method causes the the database to roll back to + the start of any pending transaction. Closing a connection without committing the changes first will + cause an implicit rollback to be performed. + """ + result = self.proxy.rollback() + if result: + self._raiseConnectionError( + api.OperationalError, "Error during rollback: %s" % result + ) + + def __setattr__(self, name, value): + if name in ("paramstyle", "timeout", "autocommit"): + if self.proxy: + self.proxy.send_attribute_to_host(name, value) + object.__setattr__(self, name, value) # store attribute locally (too) + + def __getattr__(self, item): + if ( + item == "rollback" + ): # the rollback method only appears if the database supports transactions + if self.supportsTransactions: + return ( + self._rollback + ) # return the rollback method so the caller can execute it. + else: + raise self.ProgrammingError( + "this data provider does not support Rollback" + ) + elif item in ( + "dbms_name", + "dbms_version", + "connection_string", + "autocommit", + ): # 'messages' ): + return self.getIndexedValue(item) + elif item == "proxy": + raise self.ProgrammingError("Attempting to use closed connection") + else: + raise self.ProgrammingError('No remote access for attribute="%s"' % item) + + def getIndexedValue(self, index): + r = self.proxy.get_attribute_for_remote(index) + return r + + def cursor(self): + "Return a new Cursor Object using the connection." + myCursor = Cursor(self) + return myCursor + + def _i_am_here(self, crsr): + "message from a new cursor proclaiming its existence" + self.cursors[crsr.id] = crsr + + def _i_am_closing(self, crsr): + "message from a cursor giving connection a chance to clean up" + try: + del self.cursors[crsr.id] + except: + pass + + def __enter__(self): # Connections are context managers + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type: + self._rollback() # automatic rollback on errors + else: + self.commit() + + def get_table_names(self): + return self.proxy.get_table_names() + + +def fixpickle(x): + """pickle barfs on buffer(x) so we pass as array.array(x) then restore to original form for .execute()""" + if x is None: + return None + if isinstance(x, dict): + # for 'named' paramstyle user will pass a mapping + newargs = {} + for arg, val in list(x.items()): + if isinstance(val, memoryViewType): + newval = array.array("B") + newval.fromstring(val) + newargs[arg] = newval + else: + newargs[arg] = val + return newargs + # if not a mapping, then a sequence + newargs = [] + for arg in x: + if isinstance(arg, memoryViewType): + newarg = array.array("B") + newarg.fromstring(arg) + newargs.append(newarg) + else: + newargs.append(arg) + return newargs + + +class Cursor(object): + def __init__(self, connection): + self.command = None + self.errorhandler = None ## was: connection.errorhandler + self.connection = connection + self.proxy = self.connection.proxy + self.rs = None # the fetchable data for this cursor + self.converters = NotImplemented + self.id = connection.proxy.build_cursor() + connection._i_am_here(self) + self.recordset_format = api.RS_REMOTE + if verbose: + print( + "%s New cursor at %X on conn %X" + % (version, id(self), id(self.connection)) + ) + + def prepare(self, operation): + self.command = operation + try: + del self.description + except AttributeError: + pass + self.proxy.crsr_prepare(self.id, operation) + + def __iter__(self): # [2.1 Zamarev] + return iter(self.fetchone, None) # [2.1 Zamarev] + + def __next__(self): + r = self.fetchone() + if r: + return r + raise StopIteration + + def __enter__(self): + "Allow database cursors to be used with context managers." + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + "Allow database cursors to be used with context managers." + self.close() + + def __getattr__(self, key): + if key == "numberOfColumns": + try: + return len(self.rs[0]) + except: + return 0 + if key == "description": + try: + self.description = self.proxy.crsr_get_description(self.id)[:] + return self.description + except TypeError: + return None + if key == "columnNames": + try: + r = dict( + self.proxy.crsr_get_columnNames(self.id) + ) # copy the remote columns + + except TypeError: + r = {} + self.columnNames = r + return r + + if key == "remote_cursor": + raise api.OperationalError + try: + return self.proxy.crsr_get_attribute_for_remote(self.id, key) + except AttributeError: + raise api.InternalError( + 'Failure getting attribute "%s" from proxy cursor.' % key + ) + + def __setattr__(self, key, value): + if key == "arraysize": + self.proxy.crsr_set_arraysize(self.id, value) + if key == "paramstyle": + if value in api.accepted_paramstyles: + self.proxy.crsr_set_paramstyle(self.id, value) + else: + self._raiseCursorError( + api.ProgrammingError, 'invalid paramstyle ="%s"' % value + ) + object.__setattr__(self, key, value) + + def _raiseCursorError(self, errorclass, errorvalue): + eh = self.errorhandler + if eh is None: + eh = api.standardErrorHandler + eh(self.connection, self, errorclass, errorvalue) + + def execute(self, operation, parameters=None): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = operation + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + fp = fixpickle(parameters) + if verbose > 2: + print( + ( + '%s executing "%s" with params=%s' + % (version, operation, repr(parameters)) + ) + ) + result = self.proxy.crsr_execute(self.id, operation, fp) + if result: # an exception was triggered + self._raiseCursorError(result[0], result[1]) + + def executemany(self, operation, seq_of_parameters): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = operation + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + sq = [fixpickle(x) for x in seq_of_parameters] + if verbose > 2: + print( + ( + '%s executemany "%s" with params=%s' + % (version, operation, repr(seq_of_parameters)) + ) + ) + self.proxy.crsr_executemany(self.id, operation, sq) + + def nextset(self): + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + if verbose > 2: + print(("%s nextset" % version)) + return self.proxy.crsr_nextset(self.id) + + def callproc(self, procname, parameters=None): + if self.connection is None: + self._raiseCursorError( + ProgrammingError, "Attempted operation on closed cursor" + ) + self.command = procname + try: + del self.description + except AttributeError: + pass + try: + del self.columnNames + except AttributeError: + pass + fp = fixpickle(parameters) + if verbose > 2: + print( + ( + '%s callproc "%s" with params=%s' + % (version, procname, repr(parameters)) + ) + ) + return self.proxy.crsr_callproc(self.id, procname, fp) + + def fetchone(self): + try: + f1 = self.proxy.crsr_fetchone(self.id) + except _BaseException as e: + self._raiseCursorError(api.DatabaseError, e) + else: + if f1 is None: + return None + self.rs = [f1] + return api.SQLrows(self.rs, 1, self)[ + 0 + ] # new object to hold the results of the fetch + + def fetchmany(self, size=None): + try: + self.rs = self.proxy.crsr_fetchmany(self.id, size) + if not self.rs: + return [] + r = api.SQLrows(self.rs, len(self.rs), self) + return r + except Exception as e: + self._raiseCursorError(api.DatabaseError, e) + + def fetchall(self): + try: + self.rs = self.proxy.crsr_fetchall(self.id) + if not self.rs: + return [] + return api.SQLrows(self.rs, len(self.rs), self) + except Exception as e: + self._raiseCursorError(api.DatabaseError, e) + + def close(self): + if self.connection is None: + return + self.connection._i_am_closing(self) # take me off the connection's cursors list + try: + self.proxy.crsr_close(self.id) + except: + pass + try: + del self.description + except: + pass + try: + del self.rs # let go of the recordset + except: + pass + self.connection = ( + None # this will make all future method calls on me throw an exception + ) + self.proxy = None + if verbose: + print("adodbapi.remote Closed cursor at %X" % id(self)) + + def __del__(self): + try: + self.close() + except: + pass + + def setinputsizes(self, sizes): + pass + + def setoutputsize(self, size, column=None): + pass diff --git a/MLPY/Lib/site-packages/adodbapi/schema_table.py b/MLPY/Lib/site-packages/adodbapi/schema_table.py new file mode 100644 index 0000000000000000000000000000000000000000..21ad37e31d3d78dd575c4359a267a0507ce2e71c --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/schema_table.py @@ -0,0 +1,15 @@ +"""call using an open ADO connection --> list of table names""" +from . import adodbapi + + +def names(connection_object): + ado = connection_object.adoConn + schema = ado.OpenSchema(20) # constant = adSchemaTables + + tables = [] + while not schema.EOF: + name = adodbapi.getIndexedValue(schema.Fields, "TABLE_NAME").Value + tables.append(name) + schema.MoveNext() + del schema + return tables diff --git a/MLPY/Lib/site-packages/adodbapi/setup.py b/MLPY/Lib/site-packages/adodbapi/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..d25869adf9b43179c67a1a08bdd34104a1456c1c --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/setup.py @@ -0,0 +1,70 @@ +"""adodbapi -- a pure Python PEP 249 DB-API package using Microsoft ADO + +Adodbapi can be run on CPython 3.5 and later. +or IronPython version 2.6 and later (in theory, possibly no longer in practice!) +""" +CLASSIFIERS = """\ +Development Status :: 5 - Production/Stable +Intended Audience :: Developers +License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Operating System :: Microsoft :: Windows +Operating System :: POSIX :: Linux +Programming Language :: Python +Programming Language :: Python :: 3 +Programming Language :: SQL +Topic :: Software Development +Topic :: Software Development :: Libraries :: Python Modules +Topic :: Database +""" + +NAME = "adodbapi" +MAINTAINER = "Vernon Cole" +MAINTAINER_EMAIL = "vernondcole@gmail.com" +DESCRIPTION = ( + """A pure Python package implementing PEP 249 DB-API using Microsoft ADO.""" +) +URL = "http://sourceforge.net/projects/adodbapi" +LICENSE = "LGPL" +CLASSIFIERS = filter(None, CLASSIFIERS.split("\n")) +AUTHOR = "Henrik Ekelund, Vernon Cole, et.al." +AUTHOR_EMAIL = "vernondcole@gmail.com" +PLATFORMS = ["Windows", "Linux"] + +VERSION = None # in case searching for version fails +a = open("adodbapi.py") # find the version string in the source code +for line in a: + if "__version__" in line: + VERSION = line.split("'")[1] + print('adodbapi version="%s"' % VERSION) + break +a.close() + + +def setup_package(): + from distutils.command.build_py import build_py + from distutils.core import setup + + setup( + cmdclass={"build_py": build_py}, + name=NAME, + maintainer=MAINTAINER, + maintainer_email=MAINTAINER_EMAIL, + description=DESCRIPTION, + url=URL, + keywords="database ado odbc dbapi db-api Microsoft SQL", + ## download_url=DOWNLOAD_URL, + long_description=open("README.txt").read(), + license=LICENSE, + classifiers=CLASSIFIERS, + author=AUTHOR, + author_email=AUTHOR_EMAIL, + platforms=PLATFORMS, + version=VERSION, + package_dir={"adodbapi": ""}, + packages=["adodbapi"], + ) + return + + +if __name__ == "__main__": + setup_package() diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitest.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a64f55aa97b0d078958404d287e01fe4a8ce439d Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitestconfig.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitestconfig.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bdae69f95888085e7114cf7f5c4ef20d48e95ad Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/adodbapitestconfig.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/dbapi20.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/dbapi20.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7451b99885f9f9844aaa26555832f09e42aa6618 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/dbapi20.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/is64bit.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/is64bit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..947301bd17755d49e43920aabb5e60bf02ad536e Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/is64bit.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/setuptestframework.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/setuptestframework.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..314e215def8a11e9eb369e72798fc015b2b8ee1f Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/setuptestframework.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/test_adodbapi_dbapi20.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/test_adodbapi_dbapi20.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..900e5785f372d54f43c4bde2704b06b172fe7c8a Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/test_adodbapi_dbapi20.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/__pycache__/tryconnection.cpython-39.pyc b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/tryconnection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e613b4f86a8634d139edfe9b3bc34ecbe4105a16 Binary files /dev/null and b/MLPY/Lib/site-packages/adodbapi/test/__pycache__/tryconnection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/adodbapi/test/adodbapitest.py b/MLPY/Lib/site-packages/adodbapi/test/adodbapitest.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b3dc1946e549fe28559ccdf92b1786dd1aa9fb --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/adodbapitest.py @@ -0,0 +1,1692 @@ +""" Unit tests version 2.6.1.0 for adodbapi""" +""" + adodbapi - A python DB API 2.0 interface to Microsoft ADO + + Copyright (C) 2002 Henrik Ekelund + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Updates by Vernon Cole +""" + +import copy +import datetime +import decimal +import random +import string +import sys +import unittest + +try: + import win32com.client + + win32 = True +except ImportError: + win32 = False + +# run the configuration module. +import adodbapitestconfig as config # will set sys.path to find correct version of adodbapi + +# in our code below, all our switches are from config.whatever +import tryconnection + +import adodbapi +import adodbapi.apibase as api + +try: + import adodbapi.ado_consts as ado_consts +except ImportError: # we are doing a shortcut import as a module -- so + try: + import ado_consts + except ImportError: + from adodbapi import ado_consts + + +def str2bytes(sval): + return sval.encode("latin1") + + +long = int + + +def randomstring(length): + return "".join([random.choice(string.ascii_letters) for n in range(32)]) + + +class CommonDBTests(unittest.TestCase): + "Self contained super-simple tests in easy syntax, should work on everything between mySQL and Oracle" + + def setUp(self): + self.engine = "unknown" + + def getEngine(self): + return self.engine + + def getConnection(self): + raise NotImplementedError # "This method must be overriden by a subclass" + + def getCursor(self): + return self.getConnection().cursor() + + def testConnection(self): + crsr = self.getCursor() + assert crsr.__class__.__name__ == "Cursor" + + def testErrorHandlerInherits(self): + if not self.remote: + conn = self.getConnection() + mycallable = lambda connection, cursor, errorclass, errorvalue: 1 + conn.errorhandler = mycallable + crsr = conn.cursor() + assert ( + crsr.errorhandler == mycallable + ), "Error handler on crsr should be same as on connection" + + def testDefaultErrorHandlerConnection(self): + if not self.remote: + conn = self.getConnection() + del conn.messages[:] + try: + conn.close() + conn.commit() # Should not be able to use connection after it is closed + except: + assert len(conn.messages) == 1 + assert len(conn.messages[0]) == 2 + assert conn.messages[0][0] == api.ProgrammingError + + def testOwnErrorHandlerConnection(self): + if self.remote: # ToDo: use "skip" + return + mycallable = ( + lambda connection, cursor, errorclass, errorvalue: 1 + ) # does not raise anything + conn = self.getConnection() + conn.errorhandler = mycallable + conn.close() + conn.commit() # Should not be able to use connection after it is closed + assert len(conn.messages) == 0 + + conn.errorhandler = None # This should bring back the standard error handler + try: + conn.close() + conn.commit() # Should not be able to use connection after it is closed + except: + pass + # The Standard errorhandler appends error to messages attribute + assert ( + len(conn.messages) > 0 + ), "Setting errorhandler to none should bring back the standard error handler" + + def testDefaultErrorHandlerCursor(self): + crsr = self.getConnection().cursor() + if not self.remote: + del crsr.messages[:] + try: + crsr.execute("SELECT abbtytddrf FROM dasdasd") + except: + assert len(crsr.messages) == 1 + assert len(crsr.messages[0]) == 2 + assert crsr.messages[0][0] == api.DatabaseError + + def testOwnErrorHandlerCursor(self): + if self.remote: # ToDo: should be a "skip" + return + mycallable = ( + lambda connection, cursor, errorclass, errorvalue: 1 + ) # does not raise anything + crsr = self.getConnection().cursor() + crsr.errorhandler = mycallable + crsr.execute("SELECT abbtytddrf FROM dasdasd") + assert len(crsr.messages) == 0 + + crsr.errorhandler = None # This should bring back the standard error handler + try: + crsr.execute("SELECT abbtytddrf FROM dasdasd") + except: + pass + # The Standard errorhandler appends error to messages attribute + assert ( + len(crsr.messages) > 0 + ), "Setting errorhandler to none should bring back the standard error handler" + + def testUserDefinedConversions(self): + if self.remote: ## Todo: should be a "skip" + return + try: + duplicatingConverter = lambda aStringField: aStringField * 2 + assert duplicatingConverter("gabba") == "gabbagabba" + + self.helpForceDropOnTblTemp() + conn = self.getConnection() + # the variantConversions attribute should not exist on a normal connection object + self.assertRaises(AttributeError, lambda x: conn.variantConversions[x], [2]) + if not self.remote: + # create a variantConversions attribute on the connection + conn.variantConversions = copy.copy(api.variantConversions) + crsr = conn.cursor() + tabdef = ( + "CREATE TABLE xx_%s (fldData VARCHAR(100) NOT NULL, fld2 VARCHAR(20))" + % config.tmp + ) + crsr.execute(tabdef) + crsr.execute( + "INSERT INTO xx_%s(fldData,fld2) VALUES('gabba','booga')" + % config.tmp + ) + crsr.execute( + "INSERT INTO xx_%s(fldData,fld2) VALUES('hey','yo')" % config.tmp + ) + # change converter for ALL adoStringTypes columns + conn.variantConversions[api.adoStringTypes] = duplicatingConverter + crsr.execute( + "SELECT fldData,fld2 FROM xx_%s ORDER BY fldData" % config.tmp + ) + + rows = crsr.fetchall() + row = rows[0] + self.assertEqual(row[0], "gabbagabba") + row = rows[1] + self.assertEqual(row[0], "heyhey") + self.assertEqual(row[1], "yoyo") + + upcaseConverter = lambda aStringField: aStringField.upper() + assert upcaseConverter("upThis") == "UPTHIS" + + # now use a single column converter + rows.converters[1] = upcaseConverter # convert second column + self.assertEqual(row[0], "heyhey") # first will be unchanged + self.assertEqual(row[1], "YO") # second will convert to upper case + + finally: + try: + del conn.variantConversions # Restore the default + except: + pass + self.helpRollbackTblTemp() + + def testUserDefinedConversionForExactNumericTypes(self): + # variantConversions is a dictionary of conversion functions + # held internally in adodbapi.apibase + # + # !!! this test intentionally alters the value of what should be constant in the module + # !!! no new code should use this example, to is only a test to see that the + # !!! deprecated way of doing this still works. (use connection.variantConversions) + # + if not self.remote and sys.version_info < (3, 0): ### Py3 need different test + oldconverter = adodbapi.variantConversions[ + ado_consts.adNumeric + ] # keep old function to restore later + # By default decimal and "numbers" are returned as decimals. + # Instead, make numbers return as floats + try: + adodbapi.variantConversions[ado_consts.adNumeric] = adodbapi.cvtFloat + self.helpTestDataType( + "decimal(18,2)", "NUMBER", 3.45, compareAlmostEqual=1 + ) + self.helpTestDataType( + "numeric(18,2)", "NUMBER", 3.45, compareAlmostEqual=1 + ) + # now return strings + adodbapi.variantConversions[ado_consts.adNumeric] = adodbapi.cvtString + self.helpTestDataType("numeric(18,2)", "NUMBER", "3.45") + # now a completly weird user defined convertion + adodbapi.variantConversions[ado_consts.adNumeric] = ( + lambda x: "!!This function returns a funny unicode string %s!!" % x + ) + self.helpTestDataType( + "numeric(18,2)", + "NUMBER", + "3.45", + allowedReturnValues=[ + "!!This function returns a funny unicode string 3.45!!" + ], + ) + finally: + # now reset the converter to its original function + adodbapi.variantConversions[ + ado_consts.adNumeric + ] = oldconverter # Restore the original convertion function + + def helpTestDataType( + self, + sqlDataTypeString, + DBAPIDataTypeString, + pyData, + pyDataInputAlternatives=None, + compareAlmostEqual=None, + allowedReturnValues=None, + ): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData """ + % config.tmp + + sqlDataTypeString + + ")\n" + ) + + crsr.execute(tabdef) + + # Test Null values mapped to None + crsr.execute("INSERT INTO xx_%s (fldId) VALUES (1)" % config.tmp) + + crsr.execute("SELECT fldId,fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchone() + self.assertEqual(rs[1], None) # Null should be mapped to None + assert rs[0] == 1 + + # Test description related + descTuple = crsr.description[1] + assert descTuple[0] in ["fldData", "flddata"], 'was "%s" expected "%s"' % ( + descTuple[0], + "fldData", + ) + + if DBAPIDataTypeString == "STRING": + assert descTuple[1] == api.STRING, 'was "%s" expected "%s"' % ( + descTuple[1], + api.STRING.values, + ) + elif DBAPIDataTypeString == "NUMBER": + assert descTuple[1] == api.NUMBER, 'was "%s" expected "%s"' % ( + descTuple[1], + api.NUMBER.values, + ) + elif DBAPIDataTypeString == "BINARY": + assert descTuple[1] == api.BINARY, 'was "%s" expected "%s"' % ( + descTuple[1], + api.BINARY.values, + ) + elif DBAPIDataTypeString == "DATETIME": + assert descTuple[1] == api.DATETIME, 'was "%s" expected "%s"' % ( + descTuple[1], + api.DATETIME.values, + ) + elif DBAPIDataTypeString == "ROWID": + assert descTuple[1] == api.ROWID, 'was "%s" expected "%s"' % ( + descTuple[1], + api.ROWID.values, + ) + elif DBAPIDataTypeString == "UUID": + assert descTuple[1] == api.OTHER, 'was "%s" expected "%s"' % ( + descTuple[1], + api.OTHER.values, + ) + else: + raise NotImplementedError # "DBAPIDataTypeString not provided" + + # Test data binding + inputs = [pyData] + if pyDataInputAlternatives: + inputs.extend(pyDataInputAlternatives) + inputs = set(inputs) # removes redundant string==unicode tests + fldId = 1 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (?,?)" % config.tmp, + (fldId, inParam), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE ?=fldID" % config.tmp, [fldId] + ) + rs = crsr.fetchone() + if allowedReturnValues: + allowedTypes = tuple([type(aRV) for aRV in allowedReturnValues]) + assert isinstance( + rs[0], allowedTypes + ), 'result type "%s" must be one of %s' % (type(rs[0]), allowedTypes) + else: + assert isinstance( + rs[0], type(pyData) + ), 'result type "%s" must be instance of %s' % ( + type(rs[0]), + type(pyData), + ) + + if compareAlmostEqual and DBAPIDataTypeString == "DATETIME": + iso1 = adodbapi.dateconverter.DateObjectToIsoFormatString(rs[0]) + iso2 = adodbapi.dateconverter.DateObjectToIsoFormatString(pyData) + self.assertEqual(iso1, iso2) + elif compareAlmostEqual: + s = float(pyData) + v = float(rs[0]) + assert ( + abs(v - s) / s < 0.00001 + ), "Values not almost equal recvd=%s, expected=%f" % (rs[0], s) + else: + if allowedReturnValues: + ok = False + self.assertTrue( + rs[0] in allowedReturnValues, + 'Value "%s" not in %s' % (repr(rs[0]), allowedReturnValues), + ) + else: + self.assertEqual( + rs[0], + pyData, + 'Values are not equal recvd="%s", expected="%s"' + % (rs[0], pyData), + ) + + def testDataTypeFloat(self): + self.helpTestDataType("real", "NUMBER", 3.45, compareAlmostEqual=True) + self.helpTestDataType("float", "NUMBER", 1.79e37, compareAlmostEqual=True) + + def testDataTypeDecmal(self): + self.helpTestDataType( + "decimal(18,2)", + "NUMBER", + 3.45, + allowedReturnValues=["3.45", "3,45", decimal.Decimal("3.45")], + ) + self.helpTestDataType( + "numeric(18,2)", + "NUMBER", + 3.45, + allowedReturnValues=["3.45", "3,45", decimal.Decimal("3.45")], + ) + self.helpTestDataType( + "decimal(20,2)", + "NUMBER", + 444444444444444444, + allowedReturnValues=[ + "444444444444444444.00", + "444444444444444444,00", + decimal.Decimal("444444444444444444"), + ], + ) + if self.getEngine() == "MSSQL": + self.helpTestDataType( + "uniqueidentifier", + "UUID", + "{71A4F49E-39F3-42B1-A41E-48FF154996E6}", + allowedReturnValues=["{71A4F49E-39F3-42B1-A41E-48FF154996E6}"], + ) + + def testDataTypeMoney(self): # v2.1 Cole -- use decimal for money + if self.getEngine() == "MySQL": + self.helpTestDataType( + "DECIMAL(20,4)", "NUMBER", decimal.Decimal("-922337203685477.5808") + ) + elif self.getEngine() == "PostgreSQL": + self.helpTestDataType( + "money", + "NUMBER", + decimal.Decimal("-922337203685477.5808"), + compareAlmostEqual=True, + allowedReturnValues=[ + -922337203685477.5808, + decimal.Decimal("-922337203685477.5808"), + ], + ) + else: + self.helpTestDataType("smallmoney", "NUMBER", decimal.Decimal("214748.02")) + self.helpTestDataType( + "money", "NUMBER", decimal.Decimal("-922337203685477.5808") + ) + + def testDataTypeInt(self): + if self.getEngine() != "PostgreSQL": + self.helpTestDataType("tinyint", "NUMBER", 115) + self.helpTestDataType("smallint", "NUMBER", -32768) + if self.getEngine() not in ["ACCESS", "PostgreSQL"]: + self.helpTestDataType( + "bit", "NUMBER", 1 + ) # Does not work correctly with access + if self.getEngine() in ["MSSQL", "PostgreSQL"]: + self.helpTestDataType( + "bigint", + "NUMBER", + 3000000000, + allowedReturnValues=[3000000000, int(3000000000)], + ) + self.helpTestDataType("int", "NUMBER", 2147483647) + + def testDataTypeChar(self): + for sqlDataType in ("char(6)", "nchar(6)"): + self.helpTestDataType( + sqlDataType, + "STRING", + "spam ", + allowedReturnValues=["spam", "spam", "spam ", "spam "], + ) + + def testDataTypeVarChar(self): + if self.getEngine() == "MySQL": + stringKinds = ["varchar(10)", "text"] + elif self.getEngine() == "PostgreSQL": + stringKinds = ["varchar(10)", "text", "character varying"] + else: + stringKinds = [ + "varchar(10)", + "nvarchar(10)", + "text", + "ntext", + ] # ,"varchar(max)"] + + for sqlDataType in stringKinds: + self.helpTestDataType(sqlDataType, "STRING", "spam", ["spam"]) + + def testDataTypeDate(self): + if self.getEngine() == "PostgreSQL": + dt = "timestamp" + else: + dt = "datetime" + self.helpTestDataType( + dt, "DATETIME", adodbapi.Date(2002, 10, 28), compareAlmostEqual=True + ) + if self.getEngine() not in ["MySQL", "PostgreSQL"]: + self.helpTestDataType( + "smalldatetime", + "DATETIME", + adodbapi.Date(2002, 10, 28), + compareAlmostEqual=True, + ) + if tag != "pythontime" and self.getEngine() not in [ + "MySQL", + "PostgreSQL", + ]: # fails when using pythonTime + self.helpTestDataType( + dt, + "DATETIME", + adodbapi.Timestamp(2002, 10, 28, 12, 15, 1), + compareAlmostEqual=True, + ) + + def testDataTypeBinary(self): + binfld = str2bytes("\x07\x00\xE2\x40*") + arv = [binfld, adodbapi.Binary(binfld), bytes(binfld)] + if self.getEngine() == "PostgreSQL": + self.helpTestDataType( + "bytea", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + else: + self.helpTestDataType( + "binary(5)", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + self.helpTestDataType( + "varbinary(100)", + "BINARY", + adodbapi.Binary(binfld), + allowedReturnValues=arv, + ) + if self.getEngine() != "MySQL": + self.helpTestDataType( + "image", "BINARY", adodbapi.Binary(binfld), allowedReturnValues=arv + ) + + def helpRollbackTblTemp(self): + self.helpForceDropOnTblTemp() + + def helpForceDropOnTblTemp(self): + conn = self.getConnection() + with conn.cursor() as crsr: + try: + crsr.execute("DROP TABLE xx_%s" % config.tmp) + if not conn.autocommit: + conn.commit() + except: + pass + + def helpCreateAndPopulateTableTemp(self, crsr): + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldData INTEGER + ) + """ + % config.tmp + ) + try: # EAFP + crsr.execute(tabdef) + except api.DatabaseError: # was not dropped before + self.helpForceDropOnTblTemp() # so drop it now + crsr.execute(tabdef) + for i in range(9): # note: this poor SQL code, but a valid test + crsr.execute("INSERT INTO xx_%s (fldData) VALUES (%i)" % (config.tmp, i)) + # NOTE: building the test table without using parameter substitution + + def testFetchAll(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 9 + # test slice of rows + i = 3 + for row in rs[3:-2]: # should have rowid 3..6 + assert row[0] == i + i += 1 + self.helpRollbackTblTemp() + + def testPreparedStatement(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.prepare("SELECT fldData FROM xx_%s" % config.tmp) + crsr.execute(crsr.command) # remember the one that was prepared + rs = crsr.fetchall() + assert len(rs) == 9 + assert rs[2][0] == 2 + self.helpRollbackTblTemp() + + def testWrongPreparedStatement(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.prepare("SELECT * FROM nowhere") + crsr.execute( + "SELECT fldData FROM xx_%s" % config.tmp + ) # should execute this one, not the prepared one + rs = crsr.fetchall() + assert len(rs) == 9 + assert rs[2][0] == 2 + self.helpRollbackTblTemp() + + def testIterator(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + for i, row in enumerate( + crsr + ): # using cursor as an iterator, rather than fetchxxx + assert row[0] == i + self.helpRollbackTblTemp() + + def testExecuteMany(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + seq_of_values = [(111,), (222,)] + crsr.executemany( + "INSERT INTO xx_%s (fldData) VALUES (?)" % config.tmp, seq_of_values + ) + if crsr.rowcount == -1: + print( + self.getEngine() + + " Provider does not support rowcount (on .executemany())" + ) + else: + self.assertEqual(crsr.rowcount, 2) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 11 + self.helpRollbackTblTemp() + + def testRowCount(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + if crsr.rowcount == -1: + # print("provider does not support rowcount on select") + pass + else: + self.assertEqual(crsr.rowcount, 9) + self.helpRollbackTblTemp() + + def testRowCountNoRecordset(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("DELETE FROM xx_%s WHERE fldData >= 5" % config.tmp) + if crsr.rowcount == -1: + print(self.getEngine() + " Provider does not support rowcount (on DELETE)") + else: + self.assertEqual(crsr.rowcount, 4) + self.helpRollbackTblTemp() + + def testFetchMany(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchmany(3) + assert len(rs) == 3 + rs = crsr.fetchmany(5) + assert len(rs) == 5 + rs = crsr.fetchmany(5) + assert len(rs) == 1 # Asked for five, but there is only one left + self.helpRollbackTblTemp() + + def testFetchManyWithArraySize(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("SELECT fldData FROM xx_%s" % config.tmp) + rs = crsr.fetchmany() + assert len(rs) == 1 # arraysize Defaults to one + crsr.arraysize = 4 + rs = crsr.fetchmany() + assert len(rs) == 4 + rs = crsr.fetchmany() + assert len(rs) == 4 + rs = crsr.fetchmany() + assert len(rs) == 0 + self.helpRollbackTblTemp() + + def testErrorConnect(self): + conn = self.getConnection() + kw = {} + if "proxy_host" in conn.kwargs: + kw["proxy_host"] = conn.kwargs["proxy_host"] + conn.close() + self.assertRaises(api.DatabaseError, self.db, "not a valid connect string", kw) + + def testRowIterator(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldTwo integer, + fldThree integer, + fldFour integer) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = [(2, 3, 4), (102, 103, 104)] + fldId = 1 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldTwo,fldThree,fldFour) VALUES (?,?,?,?)" + % config.tmp, + (fldId, inParam[0], inParam[1], inParam[2]), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldTwo,fldThree,fldFour FROM xx_%s WHERE ?=fldID" % config.tmp, + [fldId], + ) + rec = crsr.fetchone() + # check that stepping through an emulated row works + for j in range(len(inParam)): + assert ( + rec[j] == inParam[j] + ), 'returned value:"%s" != test value:"%s"' % (rec[j], inParam[j]) + # check that we can get a complete tuple from a row + assert tuple(rec) == inParam, 'returned value:"%s" != test value:"%s"' % ( + repr(rec), + repr(inParam), + ) + # test that slices of rows work + slice1 = tuple(rec[:-1]) + slice2 = tuple(inParam[0:2]) + assert slice1 == slice2, 'returned value:"%s" != test value:"%s"' % ( + repr(slice1), + repr(slice2), + ) + # now test named column retrieval + assert rec["fldTwo"] == inParam[0] + assert rec.fldThree == inParam[1] + assert rec.fldFour == inParam[2] + # test array operation + # note that the fields vv vv vv are out of order + crsr.execute("select fldThree,fldFour,fldTwo from xx_%s" % config.tmp) + recs = crsr.fetchall() + assert recs[1][0] == 103 + assert recs[0][1] == 4 + assert recs[1]["fldFour"] == 104 + assert recs[0, 0] == 3 + assert recs[0, "fldTwo"] == 2 + assert recs[1, 2] == 102 + for i in range(1): + for j in range(2): + assert recs[i][j] == recs[i, j] + + def testFormatParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + conn.paramstyle = "format" # test nonstandard use of paramstyle + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10), + fldConst varchar(30)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["one", "two", "three"] + fldId = 2 + for inParam in inputs: + fldId += 1 + sql = ( + "INSERT INTO xx_" + + config.tmp + + " (fldId,fldConst,fldData) VALUES (%s,'thi%s :may cause? trouble', %s)" + ) + try: + crsr.execute(sql, (fldId, inParam)) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData, fldConst FROM xx_" + config.tmp + " WHERE %s=fldID", + [fldId], + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + self.assertEqual(rec[1], "thi%s :may cause? trouble") + + # now try an operation with a "%s" as part of a literal + sel = ( + "insert into xx_" + config.tmp + " (fldId,fldData) VALUES (%s,'four%sfive')" + ) + params = (20,) + crsr.execute(sel, params) + + # test the .query implementation + assert "(?," in crsr.query, 'expected:"%s" in "%s"' % ("(?,", crsr.query) + # test the .command attribute + assert crsr.command == sel, 'expected:"%s" but found "%s"' % (sel, crsr.command) + + # test the .parameters attribute + if not self.remote: # parameter list will be altered in transit + self.assertEqual(crsr.parameters, params) + # now make sure the data made it + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=20" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "four%sfive") + + def testNamedParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + crsr.paramstyle = "named" # test nonstandard use of paramstyle + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["four", "five", "six"] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (:Id,:f_Val)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE fldID=:Id" % config.tmp, {"Id": fldId} + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a ":" as part of a literal + crsr.execute( + "insert into xx_%s (fldId,fldData) VALUES (:xyz,'six:five')" % config.tmp, + {"xyz": 30}, + ) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six:five") + + def testPyformatParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + crsr.paramstyle = "pyformat" # test nonstandard use of paramstyle + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10)) + """ + % config.tmp + ) + crsr.execute(tabdef) + + inputs = ["four", "five", "six"] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (%%(Id)s,%%(f_Val)s)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE fldID=%%(Id)s" % config.tmp, + {"Id": fldId}, + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a "%" as part of a literal + crsr.execute( + "insert into xx_%s (fldId,fldData) VALUES (%%(xyz)s,'six%%five')" + % config.tmp, + {"xyz": 30}, + ) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six%five") + + def testAutomaticParamstyle(self): + self.helpForceDropOnTblTemp() + conn = self.getConnection() + conn.paramstyle = "dynamic" # test nonstandard use of paramstyle + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + fldId integer NOT NULL, + fldData varchar(10), + fldConst varchar(30)) + """ + % config.tmp + ) + crsr.execute(tabdef) + inputs = ["one", "two", "three"] + fldId = 2 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_" + + config.tmp + + " (fldId,fldConst,fldData) VALUES (?,'thi%s :may cause? troub:1e', ?)", + (fldId, inParam), + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + trouble = "thi%s :may cause? troub:1e" + crsr.execute( + "SELECT fldData, fldConst FROM xx_" + config.tmp + " WHERE ?=fldID", + [fldId], + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + self.assertEqual(rec[1], trouble) + # inputs = [u'four',u'five',u'six'] + fldId = 10 + for inParam in inputs: + fldId += 1 + try: + crsr.execute( + "INSERT INTO xx_%s (fldId,fldData) VALUES (:Id,:f_Val)" + % config.tmp, + {"f_Val": inParam, "Id": fldId}, + ) + except: + if self.remote: + for message in crsr.messages: + print(message) + else: + conn.printADOerrors() + raise + crsr.execute( + "SELECT fldData FROM xx_%s WHERE :Id=fldID" % config.tmp, {"Id": fldId} + ) + rec = crsr.fetchone() + self.assertEqual( + rec[0], + inParam, + 'returned value:"%s" != test value:"%s"' % (rec[0], inParam), + ) + # now a test with a ":" as part of a literal -- and use a prepared query + ppdcmd = ( + "insert into xx_%s (fldId,fldData) VALUES (:xyz,'six:five')" % config.tmp + ) + crsr.prepare(ppdcmd) + crsr.execute(ppdcmd, {"xyz": 30}) + crsr.execute("SELECT fldData FROM xx_%s WHERE fldID=30" % config.tmp) + rec = crsr.fetchone() + self.assertEqual(rec[0], "six:five") + + def testRollBack(self): + conn = self.getConnection() + crsr = conn.cursor() + assert not crsr.connection.autocommit, "Unexpected beginning condition" + self.helpCreateAndPopulateTableTemp(crsr) + crsr.connection.commit() # commit the first bunch + + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + self.conn.rollback() + crsr.execute(selectSql) + assert ( + crsr.fetchone() == None + ), "cursor.fetchone should return None if a query retrieves no rows" + crsr.execute("SELECT fldData from xx_%s" % config.tmp) + rs = crsr.fetchall() + assert len(rs) == 9, "the original records should still be present" + self.helpRollbackTblTemp() + + def testCommit(self): + try: + con2 = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + assert not con2.autocommit, "default should be manual commit" + crsr = con2.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + con2.commit() + + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + crsr.close() + con2.close() + conn = self.getConnection() + crsr = self.getCursor() + with conn.cursor() as crsr: + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + assert rs[0][0] == 100 + self.helpRollbackTblTemp() + + def testAutoRollback(self): + try: + con2 = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + assert not con2.autocommit, "unexpected beginning condition" + crsr = con2.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + selectSql = "SELECT fldData FROM xx_%s WHERE fldData=100" % config.tmp + crsr.execute(selectSql) + rs = crsr.fetchall() + assert len(rs) == 1 + crsr.close() + con2.close() + crsr = self.getCursor() + try: + crsr.execute( + selectSql + ) # closing the connection should have forced rollback + row = crsr.fetchone() + except api.DatabaseError: + row = None # if the entire table disappeared the rollback was perfect and the test passed + assert row == None, ( + "cursor.fetchone should return None if a query retrieves no rows. Got %s" + % repr(row) + ) + self.helpRollbackTblTemp() + + def testAutoCommit(self): + try: + ac_conn = self.getAnotherConnection({"autocommit": True}) + except NotImplementedError: + return # should be "SKIP" for ACCESS + crsr = ac_conn.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + crsr.close() + with self.getCursor() as crsr: + selectSql = "SELECT fldData from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + rs = crsr.fetchall() + assert len(rs) == 10, "all records should still be present" + ac_conn.close() + self.helpRollbackTblTemp() + + def testSwitchedAutoCommit(self): + try: + ac_conn = self.getAnotherConnection() + except NotImplementedError: + return # should be "SKIP" for ACCESS + ac_conn.autocommit = True + crsr = ac_conn.cursor() + self.helpCreateAndPopulateTableTemp(crsr) + crsr.execute("INSERT INTO xx_%s (fldData) VALUES(100)" % config.tmp) + crsr.close() + conn = self.getConnection() + ac_conn.close() + with self.getCursor() as crsr: + selectSql = "SELECT fldData from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + rs = crsr.fetchall() + assert len(rs) == 10, "all records should still be present" + self.helpRollbackTblTemp() + + def testExtendedTypeHandling(self): + class XtendString(str): + pass + + class XtendInt(int): + pass + + class XtendFloat(float): + pass + + xs = XtendString(randomstring(30)) + xi = XtendInt(random.randint(-100, 500)) + xf = XtendFloat(random.random()) + self.helpForceDropOnTblTemp() + conn = self.getConnection() + crsr = conn.cursor() + tabdef = ( + """ + CREATE TABLE xx_%s ( + s VARCHAR(40) NOT NULL, + i INTEGER NOT NULL, + f REAL NOT NULL)""" + % config.tmp + ) + crsr.execute(tabdef) + crsr.execute( + "INSERT INTO xx_%s (s, i, f) VALUES (?, ?, ?)" % config.tmp, (xs, xi, xf) + ) + crsr.close() + conn = self.getConnection() + with self.getCursor() as crsr: + selectSql = "SELECT s, i, f from xx_%s" % config.tmp + crsr.execute( + selectSql + ) # closing the connection should _not_ have forced rollback + row = crsr.fetchone() + self.assertEqual(row.s, xs) + self.assertEqual(row.i, xi) + self.assertAlmostEqual(row.f, xf) + self.helpRollbackTblTemp() + + +class TestADOwithSQLServer(CommonDBTests): + def setUp(self): + self.conn = config.dbSqlServerconnect( + *config.connStrSQLServer[0], **config.connStrSQLServer[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "MSSQL" + self.db = config.dbSqlServerconnect + self.remote = config.connStrSQLServer[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrSQLServer[1]) + if addkeys: + keys.update(addkeys) + return config.dbSqlServerconnect(*config.connStrSQLServer[0], **keys) + + def testVariableReturningStoredProcedure(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput varchar(50), + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput=@theInput+@theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + retvalues = crsr.callproc( + "sp_DeleteMeOnlyForTesting", ("Dodsworth", "Anne", " ") + ) + assert retvalues[0] == "Dodsworth", '%s is not "Dodsworth"' % repr(retvalues[0]) + assert retvalues[1] == "Anne", '%s is not "Anne"' % repr(retvalues[1]) + assert retvalues[2] == "DodsworthAnne", '%s is not "DodsworthAnne"' % repr( + retvalues[2] + ) + self.conn.rollback() + + def testMultipleSetReturn(self): + crsr = self.getCursor() + self.helpCreateAndPopulateTableTemp(crsr) + + spdef = """ + CREATE PROCEDURE sp_DeleteMe_OnlyForTesting + AS + SELECT fldData FROM xx_%s ORDER BY fldData ASC + SELECT fldData From xx_%s where fldData = -9999 + SELECT fldData FROM xx_%s ORDER BY fldData DESC + """ % ( + config.tmp, + config.tmp, + config.tmp, + ) + try: + crsr.execute("DROP PROCEDURE sp_DeleteMe_OnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + retvalues = crsr.callproc("sp_DeleteMe_OnlyForTesting") + row = crsr.fetchone() + self.assertEqual(row[0], 0) + assert crsr.nextset() == True, "Operation should succeed" + assert not crsr.fetchall(), "Should be an empty second set" + assert crsr.nextset() == True, "third set should be present" + rowdesc = crsr.fetchall() + self.assertEqual(rowdesc[0][0], 8) + assert crsr.nextset() == None, "No more return sets, should return None" + + self.helpRollbackTblTemp() + + def testDatetimeProcedureParameter(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput DATETIME, + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput = CONVERT(CHARACTER(20), @theInput, 0) + @theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + result = crsr.callproc( + "sp_DeleteMeOnlyForTesting", + [adodbapi.Timestamp(2014, 12, 25, 0, 1, 0), "Beep", " " * 30], + ) + + assert result[2] == "Dec 25 2014 12:01AM Beep", 'value was="%s"' % result[2] + self.conn.rollback() + + def testIncorrectStoredProcedureParameter(self): + crsr = self.conn.cursor() + spdef = """ + CREATE PROCEDURE sp_DeleteMeOnlyForTesting + @theInput DATETIME, + @theOtherInput varchar(50), + @theOutput varchar(100) OUTPUT + AS + SET @theOutput = CONVERT(CHARACTER(20), @theInput) + @theOtherInput + """ + try: + crsr.execute("DROP PROCEDURE sp_DeleteMeOnlyForTesting") + self.conn.commit() + except: # Make sure it is empty + pass + crsr.execute(spdef) + + # calling the sproc with a string for the first parameter where a DateTime is expected + result = tryconnection.try_operation_with_expected_exception( + (api.DataError, api.DatabaseError), + crsr.callproc, + ["sp_DeleteMeOnlyForTesting"], + {"parameters": ["this is wrong", "Anne", "not Alice"]}, + ) + if result[0]: # the expected exception was raised + assert "@theInput" in str(result[1]) or "DatabaseError" in str( + result + ), "Identifies the wrong erroneous parameter" + else: + assert result[0], result[1] # incorrect or no exception + self.conn.rollback() + + +class TestADOwithAccessDB(CommonDBTests): + def setUp(self): + self.conn = config.dbAccessconnect( + *config.connStrAccess[0], **config.connStrAccess[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "ACCESS" + self.db = config.dbAccessconnect + self.remote = config.connStrAccess[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + raise NotImplementedError("Jet cannot use a second connection to the database") + + def testOkConnect(self): + c = self.db(*config.connStrAccess[0], **config.connStrAccess[1]) + assert c != None + c.close() + + +class TestADOwithMySql(CommonDBTests): + def setUp(self): + self.conn = config.dbMySqlconnect( + *config.connStrMySql[0], **config.connStrMySql[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "MySQL" + self.db = config.dbMySqlconnect + self.remote = config.connStrMySql[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrMySql[1]) + if addkeys: + keys.update(addkeys) + return config.dbMySqlconnect(*config.connStrMySql[0], **keys) + + def testOkConnect(self): + c = self.db(*config.connStrMySql[0], **config.connStrMySql[1]) + assert c != None + + # def testStoredProcedure(self): + # crsr=self.conn.cursor() + # try: + # crsr.execute("DROP PROCEDURE DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + # spdef= """ + # DELIMITER $$ + # CREATE PROCEDURE DeleteMeOnlyForTesting (onein CHAR(10), twoin CHAR(10), OUT theout CHAR(20)) + # DETERMINISTIC + # BEGIN + # SET theout = onein //|| twoin; + # /* (SELECT 'a small string' as result; */ + # END $$ + # """ + # + # crsr.execute(spdef) + # + # retvalues=crsr.callproc('DeleteMeOnlyForTesting',('Dodsworth','Anne',' ')) + # print 'return value (mysql)=',repr(crsr.returnValue) ### + # assert retvalues[0]=='Dodsworth', '%s is not "Dodsworth"'%repr(retvalues[0]) + # assert retvalues[1]=='Anne','%s is not "Anne"'%repr(retvalues[1]) + # assert retvalues[2]=='DodsworthAnne','%s is not "DodsworthAnne"'%repr(retvalues[2]) + # + # try: + # crsr.execute("DROP PROCEDURE, DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + + +class TestADOwithPostgres(CommonDBTests): + def setUp(self): + self.conn = config.dbPostgresConnect( + *config.connStrPostgres[0], **config.connStrPostgres[1] + ) + self.conn.timeout = 30 # turn timeout back up + self.engine = "PostgreSQL" + self.db = config.dbPostgresConnect + self.remote = config.connStrPostgres[2] + + def tearDown(self): + try: + self.conn.rollback() + except: + pass + try: + self.conn.close() + except: + pass + self.conn = None + + def getConnection(self): + return self.conn + + def getAnotherConnection(self, addkeys=None): + keys = dict(config.connStrPostgres[1]) + if addkeys: + keys.update(addkeys) + return config.dbPostgresConnect(*config.connStrPostgres[0], **keys) + + def testOkConnect(self): + c = self.db(*config.connStrPostgres[0], **config.connStrPostgres[1]) + assert c != None + + # def testStoredProcedure(self): + # crsr=self.conn.cursor() + # spdef= """ + # CREATE OR REPLACE FUNCTION DeleteMeOnlyForTesting (text, text) + # RETURNS text AS $funk$ + # BEGIN + # RETURN $1 || $2; + # END; + # $funk$ + # LANGUAGE SQL; + # """ + # + # crsr.execute(spdef) + # retvalues = crsr.callproc('DeleteMeOnlyForTesting',('Dodsworth','Anne',' ')) + # ### print 'return value (pg)=',repr(crsr.returnValue) ### + # assert retvalues[0]=='Dodsworth', '%s is not "Dodsworth"'%repr(retvalues[0]) + # assert retvalues[1]=='Anne','%s is not "Anne"'%repr(retvalues[1]) + # assert retvalues[2]=='Dodsworth Anne','%s is not "Dodsworth Anne"'%repr(retvalues[2]) + # self.conn.rollback() + # try: + # crsr.execute("DROP PROCEDURE, DeleteMeOnlyForTesting") + # self.conn.commit() + # except: #Make sure it is empty + # pass + + +class TimeConverterInterfaceTest(unittest.TestCase): + def testIDate(self): + assert self.tc.Date(1990, 2, 2) + + def testITime(self): + assert self.tc.Time(13, 2, 2) + + def testITimestamp(self): + assert self.tc.Timestamp(1990, 2, 2, 13, 2, 1) + + def testIDateObjectFromCOMDate(self): + assert self.tc.DateObjectFromCOMDate(37435.7604282) + + def testICOMDate(self): + assert hasattr(self.tc, "COMDate") + + def testExactDate(self): + d = self.tc.Date(1994, 11, 15) + comDate = self.tc.COMDate(d) + correct = 34653.0 + assert comDate == correct, comDate + + def testExactTimestamp(self): + d = self.tc.Timestamp(1994, 11, 15, 12, 0, 0) + comDate = self.tc.COMDate(d) + correct = 34653.5 + self.assertEqual(comDate, correct) + + d = self.tc.Timestamp(2003, 5, 6, 14, 15, 17) + comDate = self.tc.COMDate(d) + correct = 37747.593946759262 + self.assertEqual(comDate, correct) + + def testIsoFormat(self): + d = self.tc.Timestamp(1994, 11, 15, 12, 3, 10) + iso = self.tc.DateObjectToIsoFormatString(d) + self.assertEqual(str(iso[:19]), "1994-11-15 12:03:10") + + dt = self.tc.Date(2003, 5, 2) + iso = self.tc.DateObjectToIsoFormatString(dt) + self.assertEqual(str(iso[:10]), "2003-05-02") + + +if config.doMxDateTimeTest: + import mx.DateTime + + +class TestMXDateTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.mxDateTimeConverter() + + def testCOMDate(self): + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 2) + cmd = self.tc.COMDate(t) + assert cmd == t.COMDate() + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 0) + t2 = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 2) + assert t2 > cmd > t + + def testDate(self): + assert mx.DateTime.Date(1980, 11, 4) == self.tc.Date(1980, 11, 4) + + def testTime(self): + assert mx.DateTime.Time(13, 11, 4) == self.tc.Time(13, 11, 4) + + def testTimestamp(self): + t = mx.DateTime.DateTime(2002, 6, 28, 18, 15, 1) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 1) + assert t == obj + + +import time + + +class TestPythonTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.pythonTimeConverter() + + def testCOMDate(self): + mk = time.mktime((2002, 6, 28, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + t = time.localtime(mk) + # Fri, 28 Jun 2002 18:15:01 +0000 + cmd = self.tc.COMDate(t) + assert abs(cmd - 37435.7604282) < 1.0 / 24, "%f more than an hour wrong" % cmd + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t1 = time.gmtime( + time.mktime((2002, 6, 28, 0, 14, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + # there are errors in the implementation of gmtime which we ignore + t2 = time.gmtime( + time.mktime((2002, 6, 29, 12, 14, 2, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + assert t1 < cmd < t2, '"%s" should be about 2002-6-28 12:15:01' % repr(cmd) + + def testDate(self): + t1 = time.mktime((2002, 6, 28, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 30, 0)) + t2 = time.mktime((2002, 6, 30, 18, 15, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, 0)) + obj = self.tc.Date(2002, 6, 29) + assert t1 < time.mktime(obj) < t2, obj + + def testTime(self): + self.assertEqual( + self.tc.Time(18, 15, 2), time.gmtime(18 * 60 * 60 + 15 * 60 + 2) + ) + + def testTimestamp(self): + t1 = time.localtime( + time.mktime((2002, 6, 28, 18, 14, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + t2 = time.localtime( + time.mktime((2002, 6, 28, 18, 16, 1, 4, 31 + 28 + 31 + 30 + 31 + 28, -1)) + ) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 2) + assert t1 < obj < t2, obj + + +class TestPythonDateTimeConverter(TimeConverterInterfaceTest): + def setUp(self): + self.tc = api.pythonDateTimeConverter() + + def testCOMDate(self): + t = datetime.datetime(2002, 6, 28, 18, 15, 1) + # Fri, 28 Jun 2002 18:15:01 +0000 + cmd = self.tc.COMDate(t) + assert abs(cmd - 37435.7604282) < 1.0 / 24, "more than an hour wrong" + + def testDateObjectFromCOMDate(self): + cmd = self.tc.DateObjectFromCOMDate(37435.7604282) + t1 = datetime.datetime(2002, 6, 28, 18, 14, 1) + t2 = datetime.datetime(2002, 6, 28, 18, 16, 1) + assert t1 < cmd < t2, cmd + + tx = datetime.datetime( + 2002, 6, 28, 18, 14, 1, 900000 + ) # testing that microseconds don't become milliseconds + c1 = self.tc.DateObjectFromCOMDate(self.tc.COMDate(tx)) + assert t1 < c1 < t2, c1 + + def testDate(self): + t1 = datetime.date(2002, 6, 28) + t2 = datetime.date(2002, 6, 30) + obj = self.tc.Date(2002, 6, 29) + assert t1 < obj < t2, obj + + def testTime(self): + self.assertEqual(self.tc.Time(18, 15, 2).isoformat()[:8], "18:15:02") + + def testTimestamp(self): + t1 = datetime.datetime(2002, 6, 28, 18, 14, 1) + t2 = datetime.datetime(2002, 6, 28, 18, 16, 1) + obj = self.tc.Timestamp(2002, 6, 28, 18, 15, 2) + assert t1 < obj < t2, obj + + +suites = [] +suites.append(unittest.makeSuite(TestPythonDateTimeConverter, "test")) +if config.doMxDateTimeTest: + suites.append(unittest.makeSuite(TestMXDateTimeConverter, "test")) +if config.doTimeTest: + suites.append(unittest.makeSuite(TestPythonTimeConverter, "test")) + +if config.doAccessTest: + suites.append(unittest.makeSuite(TestADOwithAccessDB, "test")) +if config.doSqlServerTest: + suites.append(unittest.makeSuite(TestADOwithSQLServer, "test")) +if config.doMySqlTest: + suites.append(unittest.makeSuite(TestADOwithMySql, "test")) +if config.doPostgresTest: + suites.append(unittest.makeSuite(TestADOwithPostgres, "test")) + + +class cleanup_manager(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + config.cleanup(config.testfolder, config.mdb_name) + + +suite = unittest.TestSuite(suites) +if __name__ == "__main__": + mysuite = copy.deepcopy(suite) + with cleanup_manager(): + defaultDateConverter = adodbapi.dateconverter + print(__doc__) + print("Default Date Converter is %s" % (defaultDateConverter,)) + dateconverter = defaultDateConverter + tag = "datetime" + unittest.TextTestRunner().run(mysuite) + + if config.iterateOverTimeTests: + for test, dateconverter, tag in ( + (config.doTimeTest, api.pythonTimeConverter, "pythontime"), + (config.doMxDateTimeTest, api.mxDateTimeConverter, "mx"), + ): + if test: + mysuite = copy.deepcopy( + suite + ) # work around a side effect of unittest.TextTestRunner + adodbapi.adodbapi.dateconverter = dateconverter() + print("Changed dateconverter to ") + print(adodbapi.adodbapi.dateconverter) + unittest.TextTestRunner().run(mysuite) diff --git a/MLPY/Lib/site-packages/adodbapi/test/adodbapitestconfig.py b/MLPY/Lib/site-packages/adodbapi/test/adodbapitestconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..98f254440e17d8227d8c0c665d435761d849249d --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/adodbapitestconfig.py @@ -0,0 +1,221 @@ +# Configure this to _YOUR_ environment in order to run the testcases. +"testADOdbapiConfig.py v 2.6.2.B00" + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # +# # TESTERS: +# # +# # You will need to make numerous modifications to this file +# # to adapt it to your own testing environment. +# # +# # Skip down to the next "# #" line -- +# # -- the things you need to change are below it. +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +import platform +import random +import sys + +import is64bit +import setuptestframework +import tryconnection + +print("\nPython", sys.version) +node = platform.node() +try: + print( + "node=%s, is64bit.os()= %s, is64bit.Python()= %s" + % (node, is64bit.os(), is64bit.Python()) + ) +except: + pass + +if "--help" in sys.argv: + print( + """Valid command-line switches are: + --package - create a temporary test package, run 2to3 if needed. + --all - run all possible tests + --time - loop over time format tests (including mxdatetime if present) + --nojet - do not test against an ACCESS database file + --mssql - test against Microsoft SQL server + --pg - test against PostgreSQL + --mysql - test against MariaDB + --remote= - test unsing remote server at= (experimental) + """ + ) + exit() +try: + onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python +except: + onWindows = False + +# create a random name for temporary table names +_alphabet = ( + "PYFGCRLAOEUIDHTNSQJKXBMWVZ" # why, yes, I do happen to use a dvorak keyboard +) +tmp = "".join([random.choice(_alphabet) for x in range(9)]) +mdb_name = "xx_" + tmp + ".mdb" # generate a non-colliding name for the temporary .mdb +testfolder = setuptestframework.maketemp() + +if "--package" in sys.argv: + # create a new adodbapi module -- running 2to3 if needed. + pth = setuptestframework.makeadopackage(testfolder) +else: + # use the adodbapi module in which this file appears + pth = setuptestframework.find_ado_path() +if pth not in sys.path: + # look here _first_ to find modules + sys.path.insert(1, pth) + +proxy_host = None +for arg in sys.argv: + if arg.startswith("--remote="): + proxy_host = arg.split("=")[1] + import adodbapi.remote as remote + + break + + +# function to clean up the temporary folder -- calling program must run this function before exit. +cleanup = setuptestframework.getcleanupfunction() +try: + import adodbapi # will (hopefully) be imported using the "pth" discovered above +except SyntaxError: + print( + '\n* * * Are you trying to run Python2 code using Python3? Re-run this test using the "--package" switch.' + ) + sys.exit(11) +try: + print(adodbapi.version) # show version +except: + print('"adodbapi.version" not present or not working.') +print(__doc__) + +verbose = False +for a in sys.argv: + if a.startswith("--verbose"): + arg = True + try: + arg = int(a.split("=")[1]) + except IndexError: + pass + adodbapi.adodbapi.verbose = arg + verbose = arg + +doAllTests = "--all" in sys.argv +doAccessTest = not ("--nojet" in sys.argv) +doSqlServerTest = "--mssql" in sys.argv or doAllTests +doMySqlTest = "--mysql" in sys.argv or doAllTests +doPostgresTest = "--pg" in sys.argv or doAllTests +iterateOverTimeTests = ("--time" in sys.argv or doAllTests) and onWindows + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # start your environment setup here v v v +SQL_HOST_NODE = "testsql.2txt.us,1430" + +try: # If mx extensions are installed, use mxDateTime + import mx.DateTime + + doMxDateTimeTest = True +except: + doMxDateTimeTest = False # Requires eGenixMXExtensions + +doTimeTest = True # obsolete python time format + +if doAccessTest: + if proxy_host: # determine the (probably remote) database file folder + c = {"macro_find_temp_test_path": ["mdb", mdb_name], "proxy_host": proxy_host} + else: + c = {"mdb": setuptestframework.makemdb(testfolder, mdb_name)} + + # macro definition for keyword "provider" using macro "is64bit" -- see documentation + # is64bit will return true for 64 bit versions of Python, so the macro will select the ACE provider + # (If running a remote ADO service, this will test the 64-bitedness of the ADO server.) + c["macro_is64bit"] = [ + "provider", + "Microsoft.ACE.OLEDB.12.0", # 64 bit provider + "Microsoft.Jet.OLEDB.4.0", + ] # 32 bit provider + connStrAccess = "Provider=%(provider)s;Data Source=%(mdb)s" # ;Mode=ReadWrite;Persist Security Info=False;Jet OLEDB:Bypass UserInfo Validation=True" + print( + " ...Testing ACCESS connection to {} file...".format( + c.get("mdb", "remote .mdb") + ) + ) + doAccessTest, connStrAccess, dbAccessconnect = tryconnection.try_connection( + verbose, connStrAccess, 10, **c + ) + +if doSqlServerTest: + c = { + "host": SQL_HOST_NODE, # name of computer with SQL Server + "database": "adotest", + "user": "adotestuser", # None implies Windows security + "password": "Sq1234567", + # macro definition for keyword "security" using macro "auto_security" + "macro_auto_security": "security", + "provider": "MSOLEDBSQL; MARS Connection=True", + } + if proxy_host: + c["proxy_host"] = proxy_host + connStr = "Provider=%(provider)s; Initial Catalog=%(database)s; Data Source=%(host)s; %(security)s;" + print(" ...Testing MS-SQL login to {}...".format(c["host"])) + ( + doSqlServerTest, + connStrSQLServer, + dbSqlServerconnect, + ) = tryconnection.try_connection(verbose, connStr, 30, **c) + +if doMySqlTest: + c = { + "host": "testmysql.2txt.us", + "database": "adodbapitest", + "user": "adotest", + "password": "12345678", + "port": "3330", # note the nonstandard port for obfuscation + "driver": "MySQL ODBC 5.1 Driver", + } # or _driver="MySQL ODBC 3.51 Driver + if proxy_host: + c["proxy_host"] = proxy_host + c["macro_is64bit"] = [ + "provider", + "Provider=MSDASQL;", + ] # turn on the 64 bit ODBC adapter only if needed + cs = ( + "%(provider)sDriver={%(driver)s};Server=%(host)s;Port=3330;" + + "Database=%(database)s;user=%(user)s;password=%(password)s;Option=3;" + ) + print(" ...Testing MySql login to {}...".format(c["host"])) + doMySqlTest, connStrMySql, dbMySqlconnect = tryconnection.try_connection( + verbose, cs, 5, **c + ) + + +if doPostgresTest: + _computername = "testpg.2txt.us" + _databasename = "adotest" + _username = "adotestuser" + _password = "12345678" + kws = {"timeout": 4} + kws["macro_is64bit"] = [ + "prov_drv", + "Provider=MSDASQL;Driver={PostgreSQL Unicode(x64)}", + "Driver=PostgreSQL Unicode", + ] + # get driver from http://www.postgresql.org/ftp/odbc/versions/ + # test using positional and keyword arguments (bad example for real code) + if proxy_host: + kws["proxy_host"] = proxy_host + print(" ...Testing PostgreSQL login to {}...".format(_computername)) + doPostgresTest, connStrPostgres, dbPostgresConnect = tryconnection.try_connection( + verbose, + "%(prov_drv)s;Server=%(host)s;Database=%(database)s;uid=%(user)s;pwd=%(password)s;port=5430;", # note nonstandard port + _username, + _password, + _computername, + _databasename, + **kws + ) + +assert ( + doAccessTest or doSqlServerTest or doMySqlTest or doPostgresTest +), "No database engine found for testing" diff --git a/MLPY/Lib/site-packages/adodbapi/test/dbapi20.py b/MLPY/Lib/site-packages/adodbapi/test/dbapi20.py new file mode 100644 index 0000000000000000000000000000000000000000..e378b1941d6f0343a13ff60c90747b6c96697888 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/dbapi20.py @@ -0,0 +1,939 @@ +#!/usr/bin/env python +""" Python DB API 2.0 driver compliance unit test suite. + + This software is Public Domain and may be used without restrictions. + + "Now we have booze and barflies entering the discussion, plus rumours of + DBAs on drugs... and I won't tell you what flashes through my mind each + time I read the subject line with 'Anal Compliance' in it. All around + this is turning out to be a thoroughly unwholesome unit test." + + -- Ian Bicking +""" + +__version__ = "$Revision: 1.15.0 $"[11:-2] +__author__ = "Stuart Bishop " + +import sys +import time +import unittest + +if sys.version[0] >= "3": # python 3.x + _BaseException = Exception + + def _failUnless(self, expr, msg=None): + self.assertTrue(expr, msg) + +else: # python 2.x + from exceptions import Exception as _BaseException + + def _failUnless(self, expr, msg=None): + self.failUnless(expr, msg) ## deprecated since Python 2.6 + + +# set this to "True" to follow API 2.0 to the letter +TEST_FOR_NON_IDEMPOTENT_CLOSE = False + +# Revision 1.15 2019/11/22 00:50:00 kf7xm +# Make Turn off IDEMPOTENT_CLOSE a proper skipTest + +# Revision 1.14 2013/05/20 11:02:05 kf7xm +# Add a literal string to the format insertion test to catch trivial re-format algorithms + +# Revision 1.13 2013/05/08 14:31:50 kf7xm +# Quick switch to Turn off IDEMPOTENT_CLOSE test. Also: Silence teardown failure + + +# Revision 1.12 2009/02/06 03:35:11 kf7xm +# Tested okay with Python 3.0, includes last minute patches from Mark H. +# +# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole +# Include latest changes from main branch +# Updates for py3k +# +# Revision 1.11 2005/01/02 02:41:01 zenzen +# Update author email address +# +# Revision 1.10 2003/10/09 03:14:14 zenzen +# Add test for DB API 2.0 optional extension, where database exceptions +# are exposed as attributes on the Connection object. +# +# Revision 1.9 2003/08/13 01:16:36 zenzen +# Minor tweak from Stefan Fleiter +# +# Revision 1.8 2003/04/10 00:13:25 zenzen +# Changes, as per suggestions by M.-A. Lemburg +# - Add a table prefix, to ensure namespace collisions can always be avoided +# +# Revision 1.7 2003/02/26 23:33:37 zenzen +# Break out DDL into helper functions, as per request by David Rushby +# +# Revision 1.6 2003/02/21 03:04:33 zenzen +# Stuff from Henrik Ekelund: +# added test_None +# added test_nextset & hooks +# +# Revision 1.5 2003/02/17 22:08:43 zenzen +# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize +# defaults to 1 & generic cursor.callproc test added +# +# Revision 1.4 2003/02/15 00:16:33 zenzen +# Changes, as per suggestions and bug reports by M.-A. Lemburg, +# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar +# - Class renamed +# - Now a subclass of TestCase, to avoid requiring the driver stub +# to use multiple inheritance +# - Reversed the polarity of buggy test in test_description +# - Test exception heirarchy correctly +# - self.populate is now self._populate(), so if a driver stub +# overrides self.ddl1 this change propogates +# - VARCHAR columns now have a width, which will hopefully make the +# DDL even more portible (this will be reversed if it causes more problems) +# - cursor.rowcount being checked after various execute and fetchXXX methods +# - Check for fetchall and fetchmany returning empty lists after results +# are exhausted (already checking for empty lists if select retrieved +# nothing +# - Fix bugs in test_setoutputsize_basic and test_setinputsizes +# +def str2bytes(sval): + if sys.version_info < (3, 0) and isinstance(sval, str): + sval = sval.decode("latin1") + return sval.encode("latin1") # python 3 make unicode into bytes + + +class DatabaseAPI20Test(unittest.TestCase): + """Test a database self.driver for DB API 2.0 compatibility. + This implementation tests Gadfly, but the TestCase + is structured so that other self.drivers can subclass this + test case to ensure compiliance with the DB-API. It is + expected that this TestCase may be expanded in the future + if ambiguities or edge conditions are discovered. + + The 'Optional Extensions' are not yet being tested. + + self.drivers should subclass this test, overriding setUp, tearDown, + self.driver, connect_args and connect_kw_args. Class specification + should be as follows: + + import dbapi20 + class mytest(dbapi20.DatabaseAPI20Test): + [...] + + Don't 'import DatabaseAPI20Test from dbapi20', or you will + confuse the unit tester - just 'import dbapi20'. + """ + + # The self.driver module. This should be the module where the 'connect' + # method is to be found + driver = None + connect_args = () # List of arguments to pass to connect + connect_kw_args = {} # Keyword arguments for connect + table_prefix = "dbapi20test_" # If you need to specify a prefix for tables + + ddl1 = "create table %sbooze (name varchar(20))" % table_prefix + ddl2 = "create table %sbarflys (name varchar(20), drink varchar(30))" % table_prefix + xddl1 = "drop table %sbooze" % table_prefix + xddl2 = "drop table %sbarflys" % table_prefix + + lowerfunc = "lower" # Name of stored procedure to convert string->lowercase + + # Some drivers may need to override these helpers, for example adding + # a 'commit' after the execute. + def executeDDL1(self, cursor): + cursor.execute(self.ddl1) + + def executeDDL2(self, cursor): + cursor.execute(self.ddl2) + + def setUp(self): + """self.drivers should override this method to perform required setup + if any is necessary, such as creating the database. + """ + pass + + def tearDown(self): + """self.drivers should override this method to perform required cleanup + if any is necessary, such as deleting the test database. + The default drops the tables that may be created. + """ + try: + con = self._connect() + try: + cur = con.cursor() + for ddl in (self.xddl1, self.xddl2): + try: + cur.execute(ddl) + con.commit() + except self.driver.Error: + # Assume table didn't exist. Other tests will check if + # execute is busted. + pass + finally: + con.close() + except _BaseException: + pass + + def _connect(self): + try: + r = self.driver.connect(*self.connect_args, **self.connect_kw_args) + except AttributeError: + self.fail("No connect method found in self.driver module") + return r + + def test_connect(self): + con = self._connect() + con.close() + + def test_apilevel(self): + try: + # Must exist + apilevel = self.driver.apilevel + # Must equal 2.0 + self.assertEqual(apilevel, "2.0") + except AttributeError: + self.fail("Driver doesn't define apilevel") + + def test_threadsafety(self): + try: + # Must exist + threadsafety = self.driver.threadsafety + # Must be a valid value + _failUnless(self, threadsafety in (0, 1, 2, 3)) + except AttributeError: + self.fail("Driver doesn't define threadsafety") + + def test_paramstyle(self): + try: + # Must exist + paramstyle = self.driver.paramstyle + # Must be a valid value + _failUnless( + self, paramstyle in ("qmark", "numeric", "named", "format", "pyformat") + ) + except AttributeError: + self.fail("Driver doesn't define paramstyle") + + def test_Exceptions(self): + # Make sure required exceptions exist, and are in the + # defined heirarchy. + if sys.version[0] == "3": # under Python 3 StardardError no longer exists + self.assertTrue(issubclass(self.driver.Warning, Exception)) + self.assertTrue(issubclass(self.driver.Error, Exception)) + else: + self.failUnless(issubclass(self.driver.Warning, Exception)) + self.failUnless(issubclass(self.driver.Error, Exception)) + + _failUnless(self, issubclass(self.driver.InterfaceError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.DatabaseError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.OperationalError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.IntegrityError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.InternalError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.ProgrammingError, self.driver.Error)) + _failUnless(self, issubclass(self.driver.NotSupportedError, self.driver.Error)) + + def test_ExceptionsAsConnectionAttributes(self): + # OPTIONAL EXTENSION + # Test for the optional DB API 2.0 extension, where the exceptions + # are exposed as attributes on the Connection object + # I figure this optional extension will be implemented by any + # driver author who is using this test suite, so it is enabled + # by default. + con = self._connect() + drv = self.driver + _failUnless(self, con.Warning is drv.Warning) + _failUnless(self, con.Error is drv.Error) + _failUnless(self, con.InterfaceError is drv.InterfaceError) + _failUnless(self, con.DatabaseError is drv.DatabaseError) + _failUnless(self, con.OperationalError is drv.OperationalError) + _failUnless(self, con.IntegrityError is drv.IntegrityError) + _failUnless(self, con.InternalError is drv.InternalError) + _failUnless(self, con.ProgrammingError is drv.ProgrammingError) + _failUnless(self, con.NotSupportedError is drv.NotSupportedError) + + def test_commit(self): + con = self._connect() + try: + # Commit must work, even if it doesn't do anything + con.commit() + finally: + con.close() + + def test_rollback(self): + con = self._connect() + # If rollback is defined, it should either work or throw + # the documented exception + if hasattr(con, "rollback"): + try: + con.rollback() + except self.driver.NotSupportedError: + pass + + def test_cursor(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + def test_cursor_isolation(self): + con = self._connect() + try: + # Make sure cursors created from the same connection have + # the documented transaction isolation level + cur1 = con.cursor() + cur2 = con.cursor() + self.executeDDL1(cur1) + cur1.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + cur2.execute("select name from %sbooze" % self.table_prefix) + booze = cur2.fetchall() + self.assertEqual(len(booze), 1) + self.assertEqual(len(booze[0]), 1) + self.assertEqual(booze[0][0], "Victoria Bitter") + finally: + con.close() + + def test_description(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual( + cur.description, + None, + "cursor.description should be none after executing a " + "statement that can return no rows (such as DDL)", + ) + cur.execute("select name from %sbooze" % self.table_prefix) + self.assertEqual( + len(cur.description), 1, "cursor.description describes too many columns" + ) + self.assertEqual( + len(cur.description[0]), + 7, + "cursor.description[x] tuples must have 7 elements", + ) + self.assertEqual( + cur.description[0][0].lower(), + "name", + "cursor.description[x][0] must return column name", + ) + self.assertEqual( + cur.description[0][1], + self.driver.STRING, + "cursor.description[x][1] must return column type. Got %r" + % cur.description[0][1], + ) + + # Make sure self.description gets reset + self.executeDDL2(cur) + self.assertEqual( + cur.description, + None, + "cursor.description not being set to None when executing " + "no-result statements (eg. DDL)", + ) + finally: + con.close() + + def test_rowcount(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + _failUnless( + self, + cur.rowcount in (-1, 0), # Bug #543885 + "cursor.rowcount should be -1 or 0 after executing no-result " + "statements", + ) + cur.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + _failUnless( + self, + cur.rowcount in (-1, 1), + "cursor.rowcount should == number or rows inserted, or " + "set to -1 after executing an insert statement", + ) + cur.execute("select name from %sbooze" % self.table_prefix) + _failUnless( + self, + cur.rowcount in (-1, 1), + "cursor.rowcount should == number of rows returned, or " + "set to -1 after executing a select statement", + ) + self.executeDDL2(cur) + self.assertEqual( + cur.rowcount, + -1, + "cursor.rowcount not being reset to -1 after executing " + "no-result statements", + ) + finally: + con.close() + + lower_func = "lower" + + def test_callproc(self): + con = self._connect() + try: + cur = con.cursor() + if self.lower_func and hasattr(cur, "callproc"): + r = cur.callproc(self.lower_func, ("FOO",)) + self.assertEqual(len(r), 1) + self.assertEqual(r[0], "FOO") + r = cur.fetchall() + self.assertEqual(len(r), 1, "callproc produced no result set") + self.assertEqual(len(r[0]), 1, "callproc produced invalid result set") + self.assertEqual(r[0][0], "foo", "callproc produced invalid results") + finally: + con.close() + + def test_close(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + # cursor.execute should raise an Error if called after connection + # closed + self.assertRaises(self.driver.Error, self.executeDDL1, cur) + + # connection.commit should raise an Error if called after connection' + # closed.' + self.assertRaises(self.driver.Error, con.commit) + + # connection.close should raise an Error if called more than once + #!!! reasonable persons differ about the usefulness of this test and this feature !!! + if TEST_FOR_NON_IDEMPOTENT_CLOSE: + self.assertRaises(self.driver.Error, con.close) + else: + self.skipTest( + "Non-idempotent close is considered a bad thing by some people." + ) + + def test_execute(self): + con = self._connect() + try: + cur = con.cursor() + self._paraminsert(cur) + finally: + con.close() + + def _paraminsert(self, cur): + self.executeDDL2(cur) + cur.execute( + "insert into %sbarflys values ('Victoria Bitter', 'thi%%s :may ca%%(u)se? troub:1e')" + % (self.table_prefix) + ) + _failUnless(self, cur.rowcount in (-1, 1)) + + if self.driver.paramstyle == "qmark": + cur.execute( + "insert into %sbarflys values (?, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "numeric": + cur.execute( + "insert into %sbarflys values (:1, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "named": + cur.execute( + "insert into %sbarflys values (:beer, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + {"beer": "Cooper's"}, + ) + elif self.driver.paramstyle == "format": + cur.execute( + "insert into %sbarflys values (%%s, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + ("Cooper's",), + ) + elif self.driver.paramstyle == "pyformat": + cur.execute( + "insert into %sbarflys values (%%(beer)s, 'thi%%s :may ca%%(u)se? troub:1e')" + % self.table_prefix, + {"beer": "Cooper's"}, + ) + else: + self.fail("Invalid paramstyle") + _failUnless(self, cur.rowcount in (-1, 1)) + + cur.execute("select name, drink from %sbarflys" % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res), 2, "cursor.fetchall returned too few rows") + beers = [res[0][0], res[1][0]] + beers.sort() + self.assertEqual( + beers[0], + "Cooper's", + "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly", + ) + self.assertEqual( + beers[1], + "Victoria Bitter", + "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly", + ) + trouble = "thi%s :may ca%(u)se? troub:1e" + self.assertEqual( + res[0][1], + trouble, + "cursor.fetchall retrieved incorrect data, or data inserted " + "incorrectly. Got=%s, Expected=%s" % (repr(res[0][1]), repr(trouble)), + ) + self.assertEqual( + res[1][1], + trouble, + "cursor.fetchall retrieved incorrect data, or data inserted " + "incorrectly. Got=%s, Expected=%s" % (repr(res[1][1]), repr(trouble)), + ) + + def test_executemany(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + largs = [("Cooper's",), ("Boag's",)] + margs = [{"beer": "Cooper's"}, {"beer": "Boag's"}] + if self.driver.paramstyle == "qmark": + cur.executemany( + "insert into %sbooze values (?)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "numeric": + cur.executemany( + "insert into %sbooze values (:1)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "named": + cur.executemany( + "insert into %sbooze values (:beer)" % self.table_prefix, margs + ) + elif self.driver.paramstyle == "format": + cur.executemany( + "insert into %sbooze values (%%s)" % self.table_prefix, largs + ) + elif self.driver.paramstyle == "pyformat": + cur.executemany( + "insert into %sbooze values (%%(beer)s)" % (self.table_prefix), + margs, + ) + else: + self.fail("Unknown paramstyle") + _failUnless( + self, + cur.rowcount in (-1, 2), + "insert using cursor.executemany set cursor.rowcount to " + "incorrect value %r" % cur.rowcount, + ) + cur.execute("select name from %sbooze" % self.table_prefix) + res = cur.fetchall() + self.assertEqual( + len(res), 2, "cursor.fetchall retrieved incorrect number of rows" + ) + beers = [res[0][0], res[1][0]] + beers.sort() + self.assertEqual( + beers[0], "Boag's", 'incorrect data "%s" retrieved' % beers[0] + ) + self.assertEqual(beers[1], "Cooper's", "incorrect data retrieved") + finally: + con.close() + + def test_fetchone(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchone should raise an Error if called before + # executing a select-type query + self.assertRaises(self.driver.Error, cur.fetchone) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + self.executeDDL1(cur) + self.assertRaises(self.driver.Error, cur.fetchone) + + cur.execute("select name from %sbooze" % self.table_prefix) + self.assertEqual( + cur.fetchone(), + None, + "cursor.fetchone should return None if a query retrieves " "no rows", + ) + _failUnless(self, cur.rowcount in (-1, 0)) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + cur.execute( + "insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix) + ) + self.assertRaises(self.driver.Error, cur.fetchone) + + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchone() + self.assertEqual( + len(r), 1, "cursor.fetchone should have retrieved a single row" + ) + self.assertEqual( + r[0], "Victoria Bitter", "cursor.fetchone retrieved incorrect data" + ) + self.assertEqual( + cur.fetchone(), + None, + "cursor.fetchone should return None if no more rows available", + ) + _failUnless(self, cur.rowcount in (-1, 1)) + finally: + con.close() + + samples = [ + "Carlton Cold", + "Carlton Draft", + "Mountain Goat", + "Redback", + "Victoria Bitter", + "XXXX", + ] + + def _populate(self): + """Return a list of sql commands to setup the DB for the fetch + tests. + """ + populate = [ + "insert into %sbooze values ('%s')" % (self.table_prefix, s) + for s in self.samples + ] + return populate + + def test_fetchmany(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchmany should raise an Error if called without + # issuing a query + self.assertRaises(self.driver.Error, cur.fetchmany, 4) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchmany() + self.assertEqual( + len(r), + 1, + "cursor.fetchmany retrieved incorrect number of rows, " + "default of arraysize is one.", + ) + cur.arraysize = 10 + r = cur.fetchmany(3) # Should get 3 rows + self.assertEqual( + len(r), 3, "cursor.fetchmany retrieved incorrect number of rows" + ) + r = cur.fetchmany(4) # Should get 2 more + self.assertEqual( + len(r), 2, "cursor.fetchmany retrieved incorrect number of rows" + ) + r = cur.fetchmany(4) # Should be an empty sequence + self.assertEqual( + len(r), + 0, + "cursor.fetchmany should return an empty sequence after " + "results are exhausted", + ) + _failUnless(self, cur.rowcount in (-1, 6)) + + # Same as above, using cursor.arraysize + cur.arraysize = 4 + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchmany() # Should get 4 rows + self.assertEqual( + len(r), 4, "cursor.arraysize not being honoured by fetchmany" + ) + r = cur.fetchmany() # Should get 2 more + self.assertEqual(len(r), 2) + r = cur.fetchmany() # Should be an empty sequence + self.assertEqual(len(r), 0) + _failUnless(self, cur.rowcount in (-1, 6)) + + cur.arraysize = 6 + cur.execute("select name from %sbooze" % self.table_prefix) + rows = cur.fetchmany() # Should get all rows + _failUnless(self, cur.rowcount in (-1, 6)) + self.assertEqual(len(rows), 6) + self.assertEqual(len(rows), 6) + rows = [r[0] for r in rows] + rows.sort() + + # Make sure we get the right data back out + for i in range(0, 6): + self.assertEqual( + rows[i], + self.samples[i], + "incorrect data retrieved by cursor.fetchmany", + ) + + rows = cur.fetchmany() # Should return an empty list + self.assertEqual( + len(rows), + 0, + "cursor.fetchmany should return an empty sequence if " + "called after the whole result set has been fetched", + ) + _failUnless(self, cur.rowcount in (-1, 6)) + + self.executeDDL2(cur) + cur.execute("select name from %sbarflys" % self.table_prefix) + r = cur.fetchmany() # Should get empty sequence + self.assertEqual( + len(r), + 0, + "cursor.fetchmany should return an empty sequence if " + "query retrieved no rows", + ) + _failUnless(self, cur.rowcount in (-1, 0)) + + finally: + con.close() + + def test_fetchall(self): + con = self._connect() + try: + cur = con.cursor() + # cursor.fetchall should raise an Error if called + # without executing a query that may return rows (such + # as a select) + self.assertRaises(self.driver.Error, cur.fetchall) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + # cursor.fetchall should raise an Error if called + # after executing a a statement that cannot return rows + self.assertRaises(self.driver.Error, cur.fetchall) + + cur.execute("select name from %sbooze" % self.table_prefix) + rows = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, len(self.samples))) + self.assertEqual( + len(rows), + len(self.samples), + "cursor.fetchall did not retrieve all rows", + ) + rows = [r[0] for r in rows] + rows.sort() + for i in range(0, len(self.samples)): + self.assertEqual( + rows[i], self.samples[i], "cursor.fetchall retrieved incorrect rows" + ) + rows = cur.fetchall() + self.assertEqual( + len(rows), + 0, + "cursor.fetchall should return an empty list if called " + "after the whole result set has been fetched", + ) + _failUnless(self, cur.rowcount in (-1, len(self.samples))) + + self.executeDDL2(cur) + cur.execute("select name from %sbarflys" % self.table_prefix) + rows = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, 0)) + self.assertEqual( + len(rows), + 0, + "cursor.fetchall should return an empty list if " + "a select query returns no rows", + ) + + finally: + con.close() + + def test_mixedfetch(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute("select name from %sbooze" % self.table_prefix) + rows1 = cur.fetchone() + rows23 = cur.fetchmany(2) + rows4 = cur.fetchone() + rows56 = cur.fetchall() + _failUnless(self, cur.rowcount in (-1, 6)) + self.assertEqual( + len(rows23), 2, "fetchmany returned incorrect number of rows" + ) + self.assertEqual( + len(rows56), 2, "fetchall returned incorrect number of rows" + ) + + rows = [rows1[0]] + rows.extend([rows23[0][0], rows23[1][0]]) + rows.append(rows4[0]) + rows.extend([rows56[0][0], rows56[1][0]]) + rows.sort() + for i in range(0, len(self.samples)): + self.assertEqual( + rows[i], self.samples[i], "incorrect data retrieved or inserted" + ) + finally: + con.close() + + def help_nextset_setUp(self, cur): + """Should create a procedure called deleteme + that returns two result sets, first the + number of rows in booze then "name from booze" + """ + raise NotImplementedError("Helper not implemented") + # sql=""" + # create procedure deleteme as + # begin + # select count(*) from booze + # select name from booze + # end + # """ + # cur.execute(sql) + + def help_nextset_tearDown(self, cur): + "If cleaning up is needed after nextSetTest" + raise NotImplementedError("Helper not implemented") + # cur.execute("drop procedure deleteme") + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + if not hasattr(cur, "nextset"): + return + + try: + self.executeDDL1(cur) + sql = self._populate() + for sql in self._populate(): + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc("deleteme") + numberofrows = cur.fetchone() + assert numberofrows[0] == len(self.samples) + assert cur.nextset() + names = cur.fetchall() + assert len(names) == len(self.samples) + s = cur.nextset() + assert s == None, "No more return sets, should return None" + finally: + self.help_nextset_tearDown(cur) + + finally: + con.close() + + def test_nextset(self): + raise NotImplementedError("Drivers need to override this test") + + def test_arraysize(self): + # Not much here - rest of the tests for this are in test_fetchmany + con = self._connect() + try: + cur = con.cursor() + _failUnless( + self, hasattr(cur, "arraysize"), "cursor.arraysize must be defined" + ) + finally: + con.close() + + def test_setinputsizes(self): + con = self._connect() + try: + cur = con.cursor() + cur.setinputsizes((25,)) + self._paraminsert(cur) # Make sure cursor still works + finally: + con.close() + + def test_setoutputsize_basic(self): + # Basic test is to make sure setoutputsize doesn't blow up + con = self._connect() + try: + cur = con.cursor() + cur.setoutputsize(1000) + cur.setoutputsize(2000, 0) + self._paraminsert(cur) # Make sure the cursor still works + finally: + con.close() + + def test_setoutputsize(self): + # Real test for setoutputsize is driver dependant + raise NotImplementedError("Driver needed to override this test") + + def test_None(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + cur.execute("insert into %sbooze values (NULL)" % self.table_prefix) + cur.execute("select name from %sbooze" % self.table_prefix) + r = cur.fetchall() + self.assertEqual(len(r), 1) + self.assertEqual(len(r[0]), 1) + self.assertEqual(r[0][0], None, "NULL value not returned as None") + finally: + con.close() + + def test_Date(self): + d1 = self.driver.Date(2002, 12, 25) + d2 = self.driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(d1),str(d2)) + + def test_Time(self): + t1 = self.driver.Time(13, 45, 30) + t2 = self.driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Timestamp(self): + t1 = self.driver.Timestamp(2002, 12, 25, 13, 45, 30) + t2 = self.driver.TimestampFromTicks( + time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)) + ) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Binary(self): + b = self.driver.Binary(str2bytes("Something")) + b = self.driver.Binary(str2bytes("")) + + def test_STRING(self): + _failUnless( + self, hasattr(self.driver, "STRING"), "module.STRING must be defined" + ) + + def test_BINARY(self): + _failUnless( + self, hasattr(self.driver, "BINARY"), "module.BINARY must be defined." + ) + + def test_NUMBER(self): + _failUnless( + self, hasattr(self.driver, "NUMBER"), "module.NUMBER must be defined." + ) + + def test_DATETIME(self): + _failUnless( + self, hasattr(self.driver, "DATETIME"), "module.DATETIME must be defined." + ) + + def test_ROWID(self): + _failUnless( + self, hasattr(self.driver, "ROWID"), "module.ROWID must be defined." + ) diff --git a/MLPY/Lib/site-packages/adodbapi/test/is64bit.py b/MLPY/Lib/site-packages/adodbapi/test/is64bit.py new file mode 100644 index 0000000000000000000000000000000000000000..ed390fad0090e54d613e03b5df202cc45ce30892 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/is64bit.py @@ -0,0 +1,41 @@ +"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version""" +import sys + + +def Python(): + if sys.platform == "cli": # IronPython + import System + + return System.IntPtr.Size == 8 + else: + try: + return sys.maxsize > 2147483647 + except AttributeError: + return sys.maxint > 2147483647 + + +def os(): + import platform + + pm = platform.machine() + if pm != ".." and pm.endswith("64"): # recent Python (not Iron) + return True + else: + import os + + if "PROCESSOR_ARCHITEW6432" in os.environ: + return True # 32 bit program running on 64 bit Windows + try: + return os.environ["PROCESSOR_ARCHITECTURE"].endswith( + "64" + ) # 64 bit Windows 64 bit program + except IndexError: + pass # not Windows + try: + return "64" in platform.architecture()[0] # this often works in Linux + except: + return False # is an older version of Python, assume also an older os (best we can guess) + + +if __name__ == "__main__": + print("is64bit.Python() =", Python(), "is64bit.os() =", os()) diff --git a/MLPY/Lib/site-packages/adodbapi/test/setuptestframework.py b/MLPY/Lib/site-packages/adodbapi/test/setuptestframework.py new file mode 100644 index 0000000000000000000000000000000000000000..83fe5ca9db4951693ec098245f17e6d2be7507ce --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/setuptestframework.py @@ -0,0 +1,134 @@ +#!/usr/bin/python2 +# Configure this in order to run the testcases. +"setuptestframework.py v 2.6.0.8" +import os +import shutil +import sys +import tempfile + +try: + OSErrors = (WindowsError, OSError) +except NameError: # not running on Windows + OSErrors = OSError + + +def maketemp(): + temphome = tempfile.gettempdir() + tempdir = os.path.join(temphome, "adodbapi_test") + try: + os.mkdir(tempdir) + except: + pass + return tempdir + + +def _cleanup_function(testfolder, mdb_name): + try: + os.unlink(os.path.join(testfolder, mdb_name)) + except: + pass # mdb database not present + try: + shutil.rmtree(testfolder) + print(" cleaned up folder", testfolder) + except: + pass # test package not present + + +def getcleanupfunction(): + return _cleanup_function + + +def find_ado_path(): + adoName = os.path.normpath(os.getcwd() + "/../../adodbapi.py") + adoPackage = os.path.dirname(adoName) + return adoPackage + + +# make a new package directory for the test copy of ado +def makeadopackage(testfolder): + adoName = os.path.normpath(os.getcwd() + "/../adodbapi.py") + adoPath = os.path.dirname(adoName) + if os.path.exists(adoName): + newpackage = os.path.join(testfolder, "adodbapi") + try: + os.mkdir(newpackage) + except OSErrors: + print( + "*Note: temporary adodbapi package already exists: may be two versions running?" + ) + for f in os.listdir(adoPath): + if f.endswith(".py"): + shutil.copy(os.path.join(adoPath, f), newpackage) + if sys.version_info >= (3, 0): # only when running Py3.n + save = sys.stdout + sys.stdout = None + from lib2to3.main import main # use 2to3 to make test package + + main("lib2to3.fixes", args=["-n", "-w", newpackage]) + sys.stdout = save + return testfolder + else: + raise EnvironmentError("Connot find source of adodbapi to test.") + + +def makemdb(testfolder, mdb_name): + # following setup code borrowed from pywin32 odbc test suite + # kindly contributed by Frank Millman. + import os + + _accessdatasource = os.path.join(testfolder, mdb_name) + if os.path.isfile(_accessdatasource): + print("using JET database=", _accessdatasource) + else: + try: + from win32com.client import constants + from win32com.client.gencache import EnsureDispatch + + win32 = True + except ImportError: # perhaps we are running IronPython + win32 = False # iron Python + try: + from System import Activator, Type + except: + pass + + # Create a brand-new database - what is the story with these? + dbe = None + for suffix in (".36", ".35", ".30"): + try: + if win32: + dbe = EnsureDispatch("DAO.DBEngine" + suffix) + else: + type = Type.GetTypeFromProgID("DAO.DBEngine" + suffix) + dbe = Activator.CreateInstance(type) + break + except: + pass + if dbe: + print(" ...Creating ACCESS db at " + _accessdatasource) + if win32: + workspace = dbe.Workspaces(0) + newdb = workspace.CreateDatabase( + _accessdatasource, constants.dbLangGeneral, constants.dbVersion40 + ) + else: + newdb = dbe.CreateDatabase( + _accessdatasource, ";LANGID=0x0409;CP=1252;COUNTRY=0" + ) + newdb.Close() + else: + print(" ...copying test ACCESS db to " + _accessdatasource) + mdbName = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "examples", "test.mdb") + ) + import shutil + + shutil.copy(mdbName, _accessdatasource) + + return _accessdatasource + + +if __name__ == "__main__": + print("Setting up a Jet database for server to use for remote testing...") + temp = maketemp() + makemdb(temp, "server_test.mdb") diff --git a/MLPY/Lib/site-packages/adodbapi/test/test_adodbapi_dbapi20.py b/MLPY/Lib/site-packages/adodbapi/test/test_adodbapi_dbapi20.py new file mode 100644 index 0000000000000000000000000000000000000000..31bbae48274b3871bcd0dcfc7a608a16082a6d5f --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/test_adodbapi_dbapi20.py @@ -0,0 +1,200 @@ +print("This module depends on the dbapi20 compliance tests created by Stuart Bishop") +print("(see db-sig mailing list history for info)") +import platform +import sys +import unittest + +import dbapi20 +import setuptestframework + +testfolder = setuptestframework.maketemp() +if "--package" in sys.argv: + pth = setuptestframework.makeadopackage(testfolder) + sys.argv.remove("--package") +else: + pth = setuptestframework.find_ado_path() +if pth not in sys.path: + sys.path.insert(1, pth) +# function to clean up the temporary folder -- calling program must run this function before exit. +cleanup = setuptestframework.getcleanupfunction() + +import adodbapi +import adodbapi.is64bit as is64bit + +db = adodbapi + +if "--verbose" in sys.argv: + db.adodbapi.verbose = 3 + +print(adodbapi.version) +print("Tested with dbapi20 %s" % dbapi20.__version__) + +try: + onWindows = bool(sys.getwindowsversion()) # seems to work on all versions of Python +except: + onWindows = False + +node = platform.node() + +conn_kws = {} +host = "testsql.2txt.us,1430" # if None, will use macro to fill in node name +instance = r"%s\SQLEXPRESS" +conn_kws["name"] = "adotest" + +conn_kws["user"] = "adotestuser" # None implies Windows security +conn_kws["password"] = "Sq1234567" +# macro definition for keyword "security" using macro "auto_security" +conn_kws["macro_auto_security"] = "security" + +if host is None: + conn_kws["macro_getnode"] = ["host", instance] +else: + conn_kws["host"] = host + +conn_kws[ + "provider" +] = "Provider=MSOLEDBSQL;DataTypeCompatibility=80;MARS Connection=True;" +connStr = "%(provider)s; %(security)s; Initial Catalog=%(name)s;Data Source=%(host)s" + +if onWindows and node != "z-PC": + pass # default should make a local SQL Server connection +elif node == "xxx": # try Postgres database + _computername = "25.223.161.222" + _databasename = "adotest" + _username = "adotestuser" + _password = "12345678" + _driver = "PostgreSQL Unicode" + _provider = "" + connStr = "%sDriver={%s};Server=%s;Database=%s;uid=%s;pwd=%s;" % ( + _provider, + _driver, + _computername, + _databasename, + _username, + _password, + ) +elif node == "yyy": # ACCESS data base is known to fail some tests. + if is64bit.Python(): + driver = "Microsoft.ACE.OLEDB.12.0" + else: + driver = "Microsoft.Jet.OLEDB.4.0" + testmdb = setuptestframework.makemdb(testfolder) + connStr = r"Provider=%s;Data Source=%s" % (driver, testmdb) +else: # try a remote connection to an SQL server + conn_kws["proxy_host"] = "25.44.77.176" + import adodbapi.remote + + db = adodbapi.remote + +print("Using Connection String like=%s" % connStr) +print("Keywords=%s" % repr(conn_kws)) + + +class test_adodbapi(dbapi20.DatabaseAPI20Test): + driver = db + connect_args = (connStr,) + connect_kw_args = conn_kws + + def __init__(self, arg): + dbapi20.DatabaseAPI20Test.__init__(self, arg) + + def getTestMethodName(self): + return self.id().split(".")[-1] + + def setUp(self): + # Call superclass setUp In case this does something in the + # future + dbapi20.DatabaseAPI20Test.setUp(self) + if self.getTestMethodName() == "test_callproc": + con = self._connect() + engine = con.dbms_name + ## print('Using database Engine=%s' % engine) ## + if engine != "MS Jet": + sql = """ + create procedure templower + @theData varchar(50) + as + select lower(@theData) + """ + else: # Jet + sql = """ + create procedure templower + (theData varchar(50)) + as + select lower(theData); + """ + cur = con.cursor() + try: + cur.execute(sql) + con.commit() + except: + pass + cur.close() + con.close() + self.lower_func = "templower" + + def tearDown(self): + if self.getTestMethodName() == "test_callproc": + con = self._connect() + cur = con.cursor() + try: + cur.execute("drop procedure templower") + except: + pass + con.commit() + dbapi20.DatabaseAPI20Test.tearDown(self) + + def help_nextset_setUp(self, cur): + "Should create a procedure called deleteme" + 'that returns two result sets, first the number of rows in booze then "name from booze"' + sql = """ + create procedure deleteme as + begin + select count(*) from %sbooze + select name from %sbooze + end + """ % ( + self.table_prefix, + self.table_prefix, + ) + cur.execute(sql) + + def help_nextset_tearDown(self, cur): + "If cleaning up is needed after nextSetTest" + try: + cur.execute("drop procedure deleteme") + except: + pass + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + + stmts = [self.ddl1] + self._populate() + for sql in stmts: + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc("deleteme") + numberofrows = cur.fetchone() + assert numberofrows[0] == 6 + assert cur.nextset() + names = cur.fetchall() + assert len(names) == len(self.samples) + s = cur.nextset() + assert s == None, "No more return sets, should return None" + finally: + try: + self.help_nextset_tearDown(cur) + finally: + con.close() + + def test_setoutputsize(self): + pass + + +if __name__ == "__main__": + unittest.main() + cleanup(testfolder, None) diff --git a/MLPY/Lib/site-packages/adodbapi/test/tryconnection.py b/MLPY/Lib/site-packages/adodbapi/test/tryconnection.py new file mode 100644 index 0000000000000000000000000000000000000000..9d3901a8c0449fcb3a2e560d7917643db25e0f31 --- /dev/null +++ b/MLPY/Lib/site-packages/adodbapi/test/tryconnection.py @@ -0,0 +1,33 @@ +remote = False # automatic testing of remote access has been removed here + + +def try_connection(verbose, *args, **kwargs): + import adodbapi + + dbconnect = adodbapi.connect + try: + s = dbconnect(*args, **kwargs) # connect to server + if verbose: + print("Connected to:", s.connection_string) + print("which has tables:", s.get_table_names()) + s.close() # thanks, it worked, goodbye + except adodbapi.DatabaseError as inst: + print(inst.args[0]) # should be the error message + print("***Failed getting connection using=", repr(args), repr(kwargs)) + return False, (args, kwargs), None + + print(" (successful)") + + return True, (args, kwargs, remote), dbconnect + + +def try_operation_with_expected_exception( + expected_exception_list, some_function, *args, **kwargs +): + try: + some_function(*args, **kwargs) + except expected_exception_list as e: + return True, e + except: + raise # an exception other than the expected occurred + return False, "The expected exception did not occur" diff --git a/MLPY/Lib/site-packages/attr/__init__.py b/MLPY/Lib/site-packages/attr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9226258a2d58777f1d5536c5695bbf1b4a635991 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/__init__.py @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._compat import Protocol +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +__all__ = [ + "Attribute", + "AttrsInstance", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + dunder_to_metadata = { + "__title__": "Name", + "__copyright__": "", + "__version__": "version", + "__version_info__": "version", + "__description__": "summary", + "__uri__": "", + "__url__": "", + "__author__": "", + "__email__": "", + "__license__": "license", + } + if name not in dunder_to_metadata: + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + import sys + import warnings + + if sys.version_info < (3, 8): + from importlib_metadata import metadata + else: + from importlib.metadata import metadata + + if name not in ("__version__", "__version_info__"): + warnings.warn( + f"Accessing {mod_name}.{name} is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for attrs's packaging metadata.", + DeprecationWarning, + stacklevel=2, + ) + + meta = metadata("attrs") + if name == "__license__": + return "MIT" + if name == "__copyright__": + return "Copyright (c) 2015 Hynek Schlawack" + if name in ("__uri__", "__url__"): + return meta["Project-URL"].split(" ", 1)[-1] + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + if name == "__author__": + return meta["Author-email"].rsplit(" ", 1)[0] + if name == "__email__": + return meta["Author-email"].rsplit("<", 1)[1][:-1] + + return meta[dunder_to_metadata[name]] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/MLPY/Lib/site-packages/attr/__init__.pyi b/MLPY/Lib/site-packages/attr/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..37a208732acf774ed369811d51f1393798f22148 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/__init__.pyi @@ -0,0 +1,555 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Mapping, + Optional, + Protocol, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[["Attribute[_T]", _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List["Attribute[Any]"]], List["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + alias: Optional[str] + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., +) -> Callable[[_C], _C]: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: Type[AttrsInstance]) -> Any: ... +def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + class_body: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/MLPY/Lib/site-packages/attr/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f83177f08ba81acf6d20f232d5a584dfe771abce Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_cmp.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_cmp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..391fba974e6010ae967016ecc4af64fcdd24e626 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_cmp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_compat.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_compat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da9279218a579bc7b15e54202447152e1b221000 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_compat.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_config.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8810992422461c3878f80a958d9ef72e225ba7cd Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_funcs.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_funcs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bac2ed8a30ee0e6c946520006e4d6d7f01d580a Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_funcs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_make.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_make.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a99143fbfec302ead1d0309e82a49b85f82c5cf7 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_make.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_next_gen.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_next_gen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb02301c0b61c89a7a8850abca2dbcae30eb5703 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_next_gen.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/_version_info.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/_version_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ffd97d992f3c3977627a135589d4bb704bdabe0 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/_version_info.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/converters.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/converters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac00104c59a80a8bd1291f2b475660b3485205b0 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/converters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3c82b147d86d633831a16358f795955ca36fcc9 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/filters.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6ade317cc46fabe278626085fe56d2b6aac301 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/setters.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/setters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c293150894fbed69fa967eaa2745fcd04a086f5 Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/setters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/__pycache__/validators.cpython-39.pyc b/MLPY/Lib/site-packages/attr/__pycache__/validators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d04c9fa061fed172e7fff6f04c57401314b2da6b Binary files /dev/null and b/MLPY/Lib/site-packages/attr/__pycache__/validators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attr/_cmp.py b/MLPY/Lib/site-packages/attr/_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..a4a35e08fc9d9b078a11edc3236d7e27027cd28e --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_cmp.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality of two + objects. + :param Optional[callable] lt: `callable` used to evaluate whether one + object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether one + object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether one + object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether one + object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/MLPY/Lib/site-packages/attr/_cmp.pyi b/MLPY/Lib/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f3dcdc1a754146303b28009cbff9ec9bf960e450 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable, Optional, Type + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: Optional[_CompareWithType] = ..., + lt: Optional[_CompareWithType] = ..., + le: Optional[_CompareWithType] = ..., + gt: Optional[_CompareWithType] = ..., + ge: Optional[_CompareWithType] = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> Type: ... diff --git a/MLPY/Lib/site-packages/attr/_compat.py b/MLPY/Lib/site-packages/attr/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..46b05ca453773da7f2972f023c4a4f447b44e824 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_compat.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_8_PLUS = sys.version_info[:2] >= (3, 8) +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) +PY310 = sys.version_info[:2] >= (3, 10) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) + + +if sys.version_info < (3, 8): + try: + from typing_extensions import Protocol + except ImportError: # pragma: no cover + Protocol = object +else: + from typing import Protocol # noqa: F401 + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/MLPY/Lib/site-packages/attr/_config.py b/MLPY/Lib/site-packages/attr/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..9c245b1461abd5dc5143f69bc74c75ae50fabdc5 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/MLPY/Lib/site-packages/attr/_funcs.py b/MLPY/Lib/site-packages/attr/_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..a888991d98fdac72abb6e5ce8ac6d620a8f0e54b --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_funcs.py @@ -0,0 +1,483 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import PY_3_9_PLUS, get_generic_base +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + :param inst: Instance of an *attrs*-decorated class. + :param bool recurse: Recurse into classes that are also + *attrs*-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name* + couldn't be found on *cls*. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _obj_setattr(new, k, v) + return new + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + :param inst: Instance of a class with *attrs* attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + """ + # Try to get instance by positional argument first. + # Use changes otherwise and warn it'll break. + if args: + try: + (inst,) = args + except ValueError: + msg = f"evolve() takes 1 positional argument, but {len(args)} were given" + raise TypeError(msg) from None + else: + try: + inst = changes.pop("inst") + except KeyError: + msg = "evolve() missing 1 required positional argument: 'inst'" + raise TypeError(msg) from None + + import warnings + + warnings.warn( + "Passing the instance per keyword argument is deprecated and " + "will stop working in, or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an *attrs* class yet. + :param bool include_extras: Resolve more accurately, if possible. + Pass ``include_extras`` to ``typing.get_hints``, if supported by the + typing module. On supported Python versions (3.9+), this resolves the + types more accurately. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/MLPY/Lib/site-packages/attr/_make.py b/MLPY/Lib/site-packages/attr/_make.py new file mode 100644 index 0000000000000000000000000000000000000000..10b4eca779621c819060c4564379fa2c098c36d5 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_make.py @@ -0,0 +1,3119 @@ +# SPDX-License-Identifier: MIT + +import contextlib +import copy +import enum +import functools +import inspect +import itertools +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY310, + PY_3_8_PLUS, + _AnnotationExtractor, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_%s" +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when ``None`` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with `attr.s` + / `attrs.define` / and so on! + + Please consider using `attrs.field` in new code (``attr.ib`` will *never* + go away, though). + + :param default: A value that is used if an *attrs*-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` will be + raised. + + The default can also be set using decorator notation as shown below. + + .. seealso:: `defaults` + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by *attrs*-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must all + pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown below. + + .. seealso:: :ref:`validators` + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` method. + If ``True``, include the attribute; if ``False``, omit it. By default, + the built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value and + returns a string. Note that the resulting string is used as-is, i.e. it + will be used directly *instead* of calling ``repr()`` (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + ``callable`` that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a ``callable`` that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + + .. seealso:: `comparison` + :type cmp: a `bool` or a `callable`. + + :param bool | None hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + + .. seealso:: `hashing` + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + + .. seealso:: `init` + :param callable converter: `callable` that is called by *attrs*-generated + ``__init__`` methods to convert attribute's value to the desired + format. It is given the passed-in value, and the returned value will + be used as the new value of the attribute. The value is converted + before being passed to the validator, if any. + + .. seealso:: :ref:`converters` + :param dict | None metadata: An arbitrary mapping, to be used by + third-party components. See `extending-metadata`. + + :param type: The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). This + argument is provided for backward compatibility. Regardless of the + approach used, the type will be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + :param bool kw_only: Make this attribute keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + :param str | None alias: Override this attribute's parameter name in the + generated ``__init__`` method. If left `None`, default to ``name`` + stripped of leading underscores. See `private-attributes`. + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs): + """ + Create the method with the script given and return the method object. + """ + locs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + attr_class_template = [ + f"class {attr_class_name}(tuple):", + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + a = attrib() if a is NOTHING else attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + attrs = [ + a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a + for a in attrs + ] + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +def _make_cached_property_getattr( + cached_properties, + original_getattr, + cls, +): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper():", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " if hasattr(super(), '__getattr__'):", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper()", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _obj_setattr.__get__, + "_cls": cls, + "original_getattr": original_getattr, + } + + return _make_method( + "__getattr__", + "\n".join(lines), + unique_filename, + glob, + ) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_pre_init_has_args", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + if PY310: + import abc + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self.abc.update_abstractmethods( + self._patch_original_class() + ) + + else: + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _obj_setattr + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _obj_setattr + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + if PY_3_8_PLUS: + cached_properties = { + name: cached_property.func + for name, cached_property in cd.items() + if isinstance(cached_property, functools.cached_property) + } + else: + # `functools.cached_property` was introduced in 3.8. + # So can't be used before this. + cached_properties = {} + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + # Add cached properties to names for slotting. + names += tuple(cached_properties.keys()) + + for name in cached_properties: + # Clear out function from class to avoid clashing. + del cd[name] + + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = ( + "Method generated by attrs for class " + f"{self._cls.__qualname__}." + ) + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Please consider using `attrs.define` / `attrs.frozen` in new code + (``attr.s`` will *never* go away, though). + + :param these: A dictionary of name to `attr.ib` mappings. This is useful + to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside *these*. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, *attrs* + will deduce ``eq=False`` and will create *neither* ``__eq__`` *nor* + ``__ne__`` (but Python classes come with a sensible ``__ne__`` by + default, so it *should* be enough to only implement ``__eq__`` in most + cases). + + .. warning:: + + If you prevent *attrs* from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, *cmp*, + or *hash* overrides whatever *auto_detect* would determine. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of *attrs* attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for `Exception`\ s. + :param bool | None eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are *identical*! + + .. seealso:: `comparison` + :param bool | None order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + + .. seealso:: `comparison` + :param bool | None cmp: Setting *cmp* is equivalent to setting *eq* and + *order* to the same value. Must not be mixed with *eq* or *order*. + + .. seealso:: `comparison` + :param bool | None unsafe_hash: If ``None`` (default), the ``__hash__`` + method is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force *attrs* + to create one (e.g. if the class is immutable even though you didn't + freeze it programmatically) by passing ``True`` or not. Both of these + cases are rather special and should be used carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ + behavior `_ for + more details. + + :param bool | None hash: Alias for *unsafe_hash*. *unsafe_hash* takes + precedence. + :param bool init: Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name. If + a ``__attrs_pre_init__`` method exists on the class, it will be called + before the class is initialized. If a ``__attrs_post_init__`` method + exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be injected + instead. This allows you to define a custom ``__init__`` method that + can do pre-init work such as ``super().__init__()``, and then call + ``__attrs_init__()`` and ``__attrs_post_init__()``. + + .. seealso:: `init` + :param bool slots: Create a :term:`slotted class ` that's + more memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, so + we encourage you to read the :term:`glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attrs.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using ``object.__setattr__(self, + "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes from the class body. + + In this case, you **must** annotate every field. If *attrs* encounters + a field that is set to an `attr.ib` but lacks a type annotation, an + `attr.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = attr.ib(...)`` if you don't want to set a + type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + :ref:`validators `), you still *must* assign `attr.ib` + to them. Otherwise Python will either not find the name or try to + use the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + :param bool kw_only: Make all attributes keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed only + once and stored on the object. If this is set to ``True``, hashing + must be either explicitly or implicitly enabled for this class. If the + hash code is cached, avoid any reassignments of fields involved in hash + code computation or mutations of the objects those fields point to + after object creation. If such changes occur, the behavior of the + object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` (which + implicitly includes any subclass of any exception), the following + happens to behave like a well-behaved Python exceptions class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. *attrs* will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way *attrs* + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + .. seealso:: + Issue `#428 `_ + + :param bool | None getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left `None`, it's `True` by default for slotted classes + and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, and + **either** ``__getstate__`` or ``__setstate__`` is detected directly on + the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + :param callable | None field_transformer: + A function that is called with the original class object and all fields + right before *attrs* finalizes the class. You can use this, e.g., to + automatically add converters or validators to fields based on their + types. + + .. seealso:: `transform-fields` + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + nonlocal hash + if ( + hash is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_hash_cache_field} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_hash_cache_field} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_hash_cache_field}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append(f" {cmp_name}(self.{a.name}),") + others.append(f" {cmp_name}(other.{a.name}),") + else: + lines.append(f" self.{a.name},") + others.append(f" other.{a.name},") + + lines += [*others, " )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* + class. + + :rtype: dict + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _obj_setattr.__get__ + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr('%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr_get(self)" + ) + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _init_factory_pat % (a.name,) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None: + # Try to get the type from the converter. + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: # noqa: SIM108 + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = args + if kw_only_args: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + pre_init_kw_only_args = ", ".join( + ["%s=%s" % (kw_arg, kw_arg) for kw_arg in kw_only_args] + ) + pre_init_args += ( + ", " if pre_init_args else "" + ) # handle only kwargs and no regular args + pre_init_args += pre_init_kw_only_args + + if pre_init and pre_init_has_args: + # If pre init method has arguments, pass same arguments as `__init__` + lines[0] = "self.__attrs_pre_init__(%s)" % pre_init_args + + return ( + "def %s(self, %s):\n %s\n" + % ( + ("__attrs_init__" if attrs_init else "__init__"), + args, + "\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = "Type annotation and type argument cannot both be present" + raise ValueError(msg) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with `Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + types.MappingProxyType(dict(value)) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + "alias", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + `attr.ib`\ s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes inside + *attrs*. Otherwise the order of the definition of the attributes is + used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param dict class_body: An optional dictionary of class attributes for the new class. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return pipe_converter diff --git a/MLPY/Lib/site-packages/attr/_next_gen.py b/MLPY/Lib/site-packages/attr/_next_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb9f259b53b851336c3135f06cfa377ab3240d7 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_next_gen.py @@ -0,0 +1,229 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an *attrs* class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- if + *frozen* is `False`. + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so please + make sure to read :term:`slotted classes`. + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/MLPY/Lib/site-packages/attr/_typing_compat.pyi b/MLPY/Lib/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ca7b71e906a28f88726bbd342fdfe636af0281e7 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/MLPY/Lib/site-packages/attr/_version_info.py b/MLPY/Lib/site-packages/attr/_version_info.py new file mode 100644 index 0000000000000000000000000000000000000000..51a1312f9759f21063caea779a62882d7f7c86ae --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/MLPY/Lib/site-packages/attr/_version_info.pyi b/MLPY/Lib/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000000000000000000000000000000000000..45ced086337783c4b73b26cd17d2c1c260e24029 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/MLPY/Lib/site-packages/attr/converters.py b/MLPY/Lib/site-packages/attr/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf4c902a66faeeda4cbae89d75f063df99c5039 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/converters.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + msg = f"Cannot convert value to bool: {val}" + raise ValueError(msg) diff --git a/MLPY/Lib/site-packages/attr/converters.pyi b/MLPY/Lib/site-packages/attr/converters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5abb49f6d5a8c3447d0f223a308e2278ad027416 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, TypeVar, overload + +from . import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/MLPY/Lib/site-packages/attr/exceptions.py b/MLPY/Lib/site-packages/attr/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7abb8154108aa1d0ae52fa9ee8e489f05b5563 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/MLPY/Lib/site-packages/attr/exceptions.pyi b/MLPY/Lib/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f2680118b404db8f5227d04d27e8439331341c4d --- /dev/null +++ b/MLPY/Lib/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/MLPY/Lib/site-packages/attr/filters.py b/MLPY/Lib/site-packages/attr/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..a1e40c98db853aa375ab0b24559e0559f91e6152 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/filters.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s + + :rtype: `callable` + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s. + + :rtype: `callable` + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/MLPY/Lib/site-packages/attr/filters.pyi b/MLPY/Lib/site-packages/attr/filters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8a02fa0fc0631dde0b4501c8d1c168b467c0d1a9 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any, Union + +from . import Attribute, _FilterType + +def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/MLPY/Lib/site-packages/attr/py.typed b/MLPY/Lib/site-packages/attr/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/attr/setters.py b/MLPY/Lib/site-packages/attr/setters.py new file mode 100644 index 0000000000000000000000000000000000000000..12ed6750df35b96e2ccde24a9752dca22929188d --- /dev/null +++ b/MLPY/Lib/site-packages/attr/setters.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. +NO_OP = object() diff --git a/MLPY/Lib/site-packages/attr/setters.pyi b/MLPY/Lib/site-packages/attr/setters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..72f7ce4761c343860d8b230dd50dcdeba10b03fb --- /dev/null +++ b/MLPY/Lib/site-packages/attr/setters.pyi @@ -0,0 +1,19 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute, _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/MLPY/Lib/site-packages/attr/validators.py b/MLPY/Lib/site-packages/attr/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..34d6b761d37857e876a7d0fd1970a758f8f71981 --- /dev/null +++ b/MLPY/Lib/site-packages/attr/validators.py @@ -0,0 +1,681 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = "'{name}' must be {type!r} (got {value!r} that is a {actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ) + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of type + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = "'{name}' must match regex {pattern!r} ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ) + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted(e and e.__name__ or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator: + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + msg = "'{name}' must provide {interface!r} which {value!r} doesn't.".format( + name=attr.name, interface=self.interface, value=value + ) + raise TypeError( + msg, + attr, + self.interface, + value, + ) + + def __repr__(self): + return f"" + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + + .. deprecated:: 23.1.0 + """ + import warnings + + warnings.warn( + "attrs's zope-interface support is deprecated and will be removed in, " + "or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param Callable | tuple[Callable] | list[Callable] validator: A validator + (or validators) that is used for non-``None`` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator: + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self.options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self.options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises attrs.exceptions.NotCallableError: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator(s) to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return ( + "" + ).format( + what=self.validator, + exc_types=self.exc_types, + ) + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + :param validator: A validator to be logically inverted. + :param msg: Message to raise if validator fails. + Formatted with keys ``exc_types`` and ``validator``. + :type msg: str + :param exc_types: Exception type(s) to capture. + Other types raised by child validators will not be intercepted and + pass through. + + :raises ValueError: With a human readable error message, + the attribute (of type `attrs.Attribute`), + the validator that failed to raise an exception, + the value it got, + and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) diff --git a/MLPY/Lib/site-packages/attr/validators.pyi b/MLPY/Lib/site-packages/attr/validators.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d194a75abcacfa434f2445e66ea25975236dffcf --- /dev/null +++ b/MLPY/Lib/site-packages/attr/validators.pyi @@ -0,0 +1,88 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + List, + Mapping, + Match, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +from . import _ValidatorType +from . import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[ + _ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]] + ] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Union[Pattern[AnyStr], AnyStr], + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: Optional[str] = None, + exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ..., +) -> _ValidatorType[_T]: ... diff --git a/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/METADATA b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c20be76c7476ea998872511afa4449f41d8bb832 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/METADATA @@ -0,0 +1,202 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 23.2.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Requires-Dist: importlib-metadata; python_version < '3.8' +Provides-Extra: cov +Requires-Dist: attrs[tests]; extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Provides-Extra: dev +Requires-Dist: attrs[tests]; extra == 'dev' +Requires-Dist: pre-commit; extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier; extra == 'docs' +Requires-Dist: zope-interface; extra == 'docs' +Provides-Extra: tests +Requires-Dist: attrs[tests-no-zope]; extra == 'tests' +Requires-Dist: zope-interface; extra == 'tests' +Provides-Extra: tests-mypy +Requires-Dist: mypy>=1.6; (platform_python_implementation == 'CPython' and python_version >= '3.8') and extra == 'tests-mypy' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.8') and extra == 'tests-mypy' +Provides-Extra: tests-no-zope +Requires-Dist: attrs[tests-mypy]; extra == 'tests-no-zope' +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests-no-zope' +Requires-Dist: hypothesis; extra == 'tests-no-zope' +Requires-Dist: pympler; extra == 'tests-no-zope' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests-no-zope' +Requires-Dist: pytest>=4.3.0; extra == 'tests-no-zope' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + +

+ + + +

+ +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Please check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for a more in-depth explanation. + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes). + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: please use the `python-attrs` tag on [StackOverflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the Tidelift Subscription. + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +[Learn more.](https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) + +## Release Information + +### Changes + +- The type annotation for `attrs.resolve_types()` is now correct. + [#1141](https://github.com/python-attrs/attrs/issues/1141) +- Type stubs now use `typing.dataclass_transform` to decorate dataclass-like decorators, instead of the non-standard `__dataclass_transform__` special form, which is only supported by Pyright. + [#1158](https://github.com/python-attrs/attrs/issues/1158) +- Fixed serialization of namedtuple fields using `attrs.asdict/astuple()` with `retain_collection_types=True`. + [#1165](https://github.com/python-attrs/attrs/issues/1165) +- `attrs.AttrsInstance` is now a `typing.Protocol` in both type hints and code. + This allows you to subclass it along with another `Protocol`. + [#1172](https://github.com/python-attrs/attrs/issues/1172) +- If *attrs* detects that `__attrs_pre_init__` accepts more than just `self`, it will call it with the same arguments as `__init__` was called. + This allows you to, for example, pass arguments to `super().__init__()`. + [#1187](https://github.com/python-attrs/attrs/issues/1187) +- Slotted classes now transform `functools.cached_property` decorated methods to support equivalent semantics. + [#1200](https://github.com/python-attrs/attrs/issues/1200) +- Added *class_body* argument to `attrs.make_class()` to provide additional attributes for newly created classes. + It is, for example, now possible to attach methods. + [#1203](https://github.com/python-attrs/attrs/issues/1203) + + +--- + +[Full changelog](https://www.attrs.org/en/stable/changelog.html) diff --git a/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/RECORD b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c2fb2747c812c55a8d42454771f364649907c812 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=WlXJN6ICB0Y_HZ0lmuTUgia0kuSdn2p67d4N6cYxNZM,3307 +attr/__init__.pyi,sha256=u08EujYHy_rSyebNn-I9Xv2S_cXmtA9xWGc0cBsyl18,16976 +attr/__pycache__/__init__.cpython-39.pyc,, +attr/__pycache__/_cmp.cpython-39.pyc,, +attr/__pycache__/_compat.cpython-39.pyc,, +attr/__pycache__/_config.cpython-39.pyc,, +attr/__pycache__/_funcs.cpython-39.pyc,, +attr/__pycache__/_make.cpython-39.pyc,, +attr/__pycache__/_next_gen.cpython-39.pyc,, +attr/__pycache__/_version_info.cpython-39.pyc,, +attr/__pycache__/converters.cpython-39.pyc,, +attr/__pycache__/exceptions.cpython-39.pyc,, +attr/__pycache__/filters.cpython-39.pyc,, +attr/__pycache__/setters.cpython-39.pyc,, +attr/__pycache__/validators.cpython-39.pyc,, +attr/_cmp.py,sha256=OQZlWdFX74z18adGEUp40Ojqm0NNu1Flqnv2JE8B2ng,4025 +attr/_cmp.pyi,sha256=sGQmOM0w3_K4-X8cTXR7g0Hqr290E8PTObA9JQxWQqc,399 +attr/_compat.py,sha256=QmRyxii295wcQfaugWqxuIumAPsNQ2-RUF82QZPqMKw,2540 +attr/_config.py,sha256=z81Vt-GeT_2taxs1XZfmHx9TWlSxjPb6eZH1LTGsS54,843 +attr/_funcs.py,sha256=VBTUFKLklsmqxys3qWSTK_Ac9Z4s0mAJWwgW9nA7Llk,17173 +attr/_make.py,sha256=LnVy2e0HygoqaZknhC19z7JmOt7qGkAadf2LZgWVJWI,101923 +attr/_next_gen.py,sha256=as1voi8siAI_o2OQG8YIiZvmn0G7-S3_j_774rnoZ_g,6203 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=Kyw5MY0yfnUR_RwN1Vydf0EiE---htDxOgSc_-NYL6A,3622 +attr/converters.pyi,sha256=jKlpHBEt6HVKJvgrMFJRrHq8p61GXg4-Nd5RZWKJX7M,406 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=9pYvXqdg6mtLvKIIb56oALRMoHFnQTcGCO4EXTc1qyM,1470 +attr/filters.pyi,sha256=0mRCjLKxdcvAo0vD-Cr81HfRXXCp9j_cAXjOoAHtPGM,225 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400 +attr/setters.pyi,sha256=pyY8TVNBu8TWhOldv_RxHzmGvdgFQH981db70r0fn5I,567 +attr/validators.py,sha256=LGVpbiNg_KGzYrKUD5JPiZkx8TMfynDZGoQoLJNCIMo,19676 +attr/validators.pyi,sha256=167Dl9nt7NUhE9wht1I-buo039qyUT1nEUT_nKjSWr4,2580 +attrs-23.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-23.2.0.dist-info/METADATA,sha256=WwvG7OHyKjEPpyFUZCCYt1n0E_CcqdRb7bliGEdcm-A,9531 +attrs-23.2.0.dist-info/RECORD,, +attrs-23.2.0.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87 +attrs-23.2.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=9_5waVbFs7rLqtXZ73tNDrxhezyZ8VZeX4BbvQ3EeJw,1039 +attrs/__init__.pyi,sha256=s_ajQ_U14DOsOz0JbmAKDOi46B3v2PcdO0UAV1MY6Ek,2168 +attrs/__pycache__/__init__.cpython-39.pyc,, +attrs/__pycache__/converters.cpython-39.pyc,, +attrs/__pycache__/exceptions.cpython-39.pyc,, +attrs/__pycache__/filters.cpython-39.pyc,, +attrs/__pycache__/setters.cpython-39.pyc,, +attrs/__pycache__/validators.cpython-39.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/WHEEL b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2860816abecb3c5ad7da6d018a4334b87e6b6cfc --- /dev/null +++ b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2bd6453d255e19b973f19b128596a8b6dd65b2c3 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MLPY/Lib/site-packages/attrs/__init__.py b/MLPY/Lib/site-packages/attrs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0c2481561a93a912503754396782e987fcdd9629 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/__init__.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Factory, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "AttrsInstance", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/MLPY/Lib/site-packages/attrs/__init__.pyi b/MLPY/Lib/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9372cfea16e89790cb0f515b1d9d48d8f1897151 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/__init__.pyi @@ -0,0 +1,67 @@ +from typing import ( + Any, + Callable, + Dict, + Mapping, + Optional, + Sequence, + Tuple, + Type, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import _FilterType +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import define as define +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import field as field +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import frozen as frozen +from attr import has as has +from attr import make_class as make_class +from attr import mutable as mutable +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators + +# TODO: see definition of attr.asdict/astuple +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: bool = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a72f49f80e26b1321ca5891e3f14d76ff60376e2 Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/converters.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/converters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f4b6774e3e72c622041297261a7ac591e4cc0bc Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/converters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b35d751fc9ae819f79ca34a6d0014dd435b1cb21 Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/filters.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aa248581e4269db7d339d1e5d203b2f209dc67f Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/setters.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/setters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d122a69e4d422d427d8d128a2cf19833693c72d1 Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/setters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/__pycache__/validators.cpython-39.pyc b/MLPY/Lib/site-packages/attrs/__pycache__/validators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ae694c98f89d1fb03c01b80dc5f4069807de90 Binary files /dev/null and b/MLPY/Lib/site-packages/attrs/__pycache__/validators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/attrs/converters.py b/MLPY/Lib/site-packages/attrs/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..7821f6c02cca81277d1ecc87b6bdafad886d8b70 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa: F403 diff --git a/MLPY/Lib/site-packages/attrs/exceptions.py b/MLPY/Lib/site-packages/attrs/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3323f9d2112c54b203763d45b455bd5abbe020f6 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/MLPY/Lib/site-packages/attrs/filters.py b/MLPY/Lib/site-packages/attrs/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..3080f48398e5ed8d3428ca3efeb7500633b0cb0f --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa: F403 diff --git a/MLPY/Lib/site-packages/attrs/py.typed b/MLPY/Lib/site-packages/attrs/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/attrs/setters.py b/MLPY/Lib/site-packages/attrs/setters.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d73bb793dd49c138950961f41943bb26c57fde --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa: F403 diff --git a/MLPY/Lib/site-packages/attrs/validators.py b/MLPY/Lib/site-packages/attrs/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..037e124f29f32d37c1642d159bf828de44f7c349 --- /dev/null +++ b/MLPY/Lib/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa: F403 diff --git a/MLPY/Lib/site-packages/cattr/__init__.py b/MLPY/Lib/site-packages/cattr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6703ddc09b233579250451ee36bf68246ea0b726 --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/__init__.py @@ -0,0 +1,31 @@ +from .converters import Converter, GenConverter, UnstructureStrategy +from .gen import override + +__all__ = ( + "global_converter", + "unstructure", + "structure", + "structure_attrs_fromtuple", + "structure_attrs_fromdict", + "UnstructureStrategy", + "Converter", + "GenConverter", + "override", +) + +__author__ = "Tin Tvrtković" +__email__ = "tinchester@gmail.com" + + +global_converter = Converter() + +unstructure = global_converter.unstructure +structure = global_converter.structure +structure_attrs_fromtuple = global_converter.structure_attrs_fromtuple +structure_attrs_fromdict = global_converter.structure_attrs_fromdict +register_structure_hook = global_converter.register_structure_hook +register_structure_hook_func = global_converter.register_structure_hook_func +register_unstructure_hook = global_converter.register_unstructure_hook +register_unstructure_hook_func = ( + global_converter.register_unstructure_hook_func +) diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d32b6a11f1cdd87691d9298264264d57b41b06a2 Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/_compat.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/_compat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44b07a5b6690d68bff0eab1ac19943a57f872f51 Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/_compat.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/converters.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/converters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff90cf152e04b2408245209815066515e97f937c Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/converters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/disambiguators.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/disambiguators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e8ffd8cbe7409e49e223d74afe04ae40047db3f Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/disambiguators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/dispatch.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/dispatch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f5140d5c7a8beff8b6f7cce2e947282977a0f27 Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/dispatch.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/__pycache__/gen.cpython-39.pyc b/MLPY/Lib/site-packages/cattr/__pycache__/gen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb4d36a32aea9315685f45a7e946325209476a76 Binary files /dev/null and b/MLPY/Lib/site-packages/cattr/__pycache__/gen.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cattr/_compat.py b/MLPY/Lib/site-packages/cattr/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..7547f0e2e632d012c1f460b0698e58780e5d50a6 --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/_compat.py @@ -0,0 +1,285 @@ +import sys +from attr import ( + Factory, + NOTHING, + fields as attrs_fields, + Attribute, +) +from dataclasses import ( + MISSING, + is_dataclass, + fields as dataclass_fields, +) +from typing import ( + Any, + Dict, + FrozenSet, + List, + Mapping as TypingMapping, + MutableMapping as TypingMutableMapping, + MutableSequence as TypingMutableSequence, + MutableSet as TypingMutableSet, + Sequence as TypingSequence, + Set as TypingSet, + Tuple, +) + +version_info = sys.version_info[0:3] +is_py37 = version_info[:2] == (3, 7) +is_py38 = version_info[:2] == (3, 8) +is_py39_plus = version_info[:2] >= (3, 9) + +if is_py37: + + def get_args(cl): + return cl.__args__ + + def get_origin(cl): + return getattr(cl, "__origin__", None) + + +else: + from typing import get_args, get_origin # NOQA + + +def has(cls): + return hasattr(cls, "__attrs_attrs__") or hasattr( + cls, "__dataclass_fields__" + ) + + +def has_with_generic(cls): + """Test whether the class if a normal or generic attrs or dataclass.""" + return has(cls) or has(get_origin(cls)) + + +def fields(type): + try: + return type.__attrs_attrs__ + except AttributeError: + try: + return dataclass_fields(type) + except AttributeError: + raise Exception("Not an attrs or dataclass class.") + + +def adapted_fields(type) -> List[Attribute]: + """Return the attrs format of `fields()` for attrs and dataclasses.""" + if is_dataclass(type): + return [ + Attribute( + attr.name, + attr.default + if attr.default is not MISSING + else ( + Factory(attr.default_factory) + if attr.default_factory is not MISSING + else NOTHING + ), + None, + True, + None, + True, + attr.init, + True, + type=attr.type, + ) + for attr in dataclass_fields(type) + ] + else: + return attrs_fields(type) + + +if is_py37 or is_py38: + Set = TypingSet + MutableSet = TypingMutableSet + Sequence = TypingSequence + MutableSequence = TypingMutableSequence + MutableMapping = TypingMutableMapping + Mapping = TypingMapping + + from typing import Union, _GenericAlias + + def is_annotated(_): + return False + + def is_tuple(type): + return type in (Tuple, tuple) or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, Tuple) + ) + + def is_union_type(obj): + return ( + obj is Union + or isinstance(obj, _GenericAlias) + and obj.__origin__ is Union + ) + + def is_sequence(type: Any) -> bool: + return type in (List, list, Tuple, tuple) or ( + type.__class__ is _GenericAlias + and type.__origin__ is not Union + and issubclass(type.__origin__, TypingSequence) + ) + + def is_mutable_set(type): + return type is set or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, MutableSet) + ) + + def is_frozenset(type): + return type is frozenset or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, FrozenSet) + ) + + def is_mapping(type): + return type in (TypingMapping, dict) or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, TypingMapping) + ) + + bare_list_args = List.__args__ + bare_seq_args = TypingSequence.__args__ + bare_mapping_args = TypingMapping.__args__ + bare_dict_args = Dict.__args__ + bare_mutable_seq_args = TypingMutableSequence.__args__ + + def is_bare(type): + args = type.__args__ + return ( + args == bare_list_args + or args == bare_seq_args + or args == bare_mapping_args + or args == bare_dict_args + or args == bare_mutable_seq_args + ) + + +else: + # 3.9+ + from typing import ( + Union, + _GenericAlias, + _SpecialGenericAlias, + _UnionGenericAlias, + _AnnotatedAlias, + ) + from collections.abc import ( + MutableSequence as AbcMutableSequence, + Sequence as AbcSequence, + MutableSet as AbcMutableSet, + Set as AbcSet, + MutableMapping as AbcMutableMapping, + Mapping as AbcMapping, + ) + + Set = AbcSet + MutableSet = AbcMutableSet + Sequence = AbcSequence + MutableSequence = AbcMutableSequence + MutableMapping = AbcMutableMapping + Mapping = AbcMapping + + def is_annotated(type) -> bool: + return getattr(type, "__class__", None) is _AnnotatedAlias + + def is_tuple(type): + return ( + type in (Tuple, tuple) + or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, Tuple) + ) + or (getattr(type, "__origin__", None) is tuple) + ) + + def is_union_type(obj): + return ( + obj is Union + or isinstance(obj, _UnionGenericAlias) + and obj.__origin__ is Union + ) + + def is_sequence(type: Any) -> bool: + origin = getattr(type, "__origin__", None) + return ( + type + in ( + List, + list, + TypingSequence, + TypingMutableSequence, + AbcMutableSequence, + tuple, + ) + or ( + type.__class__ is _GenericAlias + and ( + (origin is not tuple) + and issubclass( + origin, + TypingSequence, + ) + or origin is tuple + and type.__args__[1] is ... + ) + ) + or (origin in (list, AbcMutableSequence, AbcSequence)) + or (origin is tuple and type.__args__[1] is ...) + ) + + def is_mutable_set(type): + return ( + type in (TypingSet, TypingMutableSet, set) + or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, TypingMutableSet) + ) + or ( + getattr(type, "__origin__", None) + in (set, AbcMutableSet, AbcSet) + ) + ) + + def is_frozenset(type): + return ( + type in (FrozenSet, frozenset) + or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, FrozenSet) + ) + or (getattr(type, "__origin__", None) is frozenset) + ) + + def is_bare(type): + return isinstance(type, _SpecialGenericAlias) or ( + not hasattr(type, "__origin__") and not hasattr(type, "__args__") + ) + + def is_mapping(type): + return ( + type + in ( + TypingMapping, + Dict, + TypingMutableMapping, + dict, + AbcMutableMapping, + ) + or ( + type.__class__ is _GenericAlias + and issubclass(type.__origin__, TypingMapping) + ) + or ( + getattr(type, "__origin__", None) + in (dict, AbcMutableMapping, AbcMapping) + ) + or issubclass(type, dict) + ) + + +def is_generic(obj): + return isinstance(obj, _GenericAlias) diff --git a/MLPY/Lib/site-packages/cattr/converters.py b/MLPY/Lib/site-packages/cattr/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..1160533fb56b779601cf79b774e857830e6e1f93 --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/converters.py @@ -0,0 +1,669 @@ +from enum import Enum +from functools import lru_cache +from typing import ( + Any, + Callable, + Dict, + Optional, + Tuple, + Type, + TypeVar, +) + +from attr import resolve_types, has as attrs_has + +from ._compat import ( + get_origin, + is_bare, + is_frozenset, + is_generic, + is_mapping, + is_mutable_set, + is_sequence, + is_tuple, + is_union_type, + is_annotated, + has, + fields, + has_with_generic, + Set, + MutableSet, + Sequence, + MutableSequence, + Mapping, + MutableMapping, +) +from .disambiguators import create_uniq_field_dis_func +from .dispatch import MultiStrategyDispatch +from .gen import ( + AttributeOverride, + make_dict_structure_fn, + make_dict_unstructure_fn, + make_iterable_unstructure_fn, + make_mapping_unstructure_fn, +) +from collections import Counter + +NoneType = type(None) +T = TypeVar("T") +V = TypeVar("V") + + +class UnstructureStrategy(Enum): + """`attrs` classes unstructuring strategies.""" + + AS_DICT = "asdict" + AS_TUPLE = "astuple" + + +def _subclass(typ): + """ a shortcut """ + return lambda cls: issubclass(cls, typ) + + +class Converter(object): + """Converts between structured and unstructured data.""" + + __slots__ = ( + "_dis_func_cache", + "_unstructure_func", + "_unstructure_attrs", + "_structure_attrs", + "_dict_factory", + "_union_struct_registry", + "_structure_func", + ) + + def __init__( + self, + dict_factory: Callable[[], Any] = dict, + unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT, + ) -> None: + unstruct_strat = UnstructureStrategy(unstruct_strat) + + # Create a per-instance cache. + if unstruct_strat is UnstructureStrategy.AS_DICT: + self._unstructure_attrs = self.unstructure_attrs_asdict + self._structure_attrs = self.structure_attrs_fromdict + else: + self._unstructure_attrs = self.unstructure_attrs_astuple + self._structure_attrs = self.structure_attrs_fromtuple + + self._dis_func_cache = lru_cache()(self._get_dis_func) + + self._unstructure_func = MultiStrategyDispatch( + self._unstructure_identity + ) + self._unstructure_func.register_cls_list( + [ + (bytes, self._unstructure_identity), + (str, self._unstructure_identity), + ] + ) + self._unstructure_func.register_func_list( + [ + (is_mapping, self._unstructure_mapping), + (is_sequence, self._unstructure_seq), + (is_mutable_set, self._unstructure_seq), + (is_frozenset, self._unstructure_seq), + (_subclass(Enum), self._unstructure_enum), + (has, self._unstructure_attrs), + (is_union_type, self._unstructure_union), + ] + ) + + # Per-instance register of to-attrs converters. + # Singledispatch dispatches based on the first argument, so we + # store the function and switch the arguments in self.loads. + self._structure_func = MultiStrategyDispatch(self._structure_default) + self._structure_func.register_func_list( + [ + (is_sequence, self._structure_list), + (is_mutable_set, self._structure_set), + (is_frozenset, self._structure_frozenset), + (is_tuple, self._structure_tuple), + (is_mapping, self._structure_dict), + (is_union_type, self._structure_union), + (has, self._structure_attrs), + ] + ) + # Strings are sequences. + self._structure_func.register_cls_list( + [ + ( + str, + self._structure_call, + ), + (bytes, self._structure_call), + (int, self._structure_call), + (float, self._structure_call), + (Enum, self._structure_call), + ] + ) + + self._dict_factory = dict_factory + + # Unions are instances now, not classes. We use different registries. + self._union_struct_registry: Dict[ + Any, Callable[[Any, Type[T]], T] + ] = {} + + def unstructure(self, obj: Any, unstructure_as=None) -> Any: + return self._unstructure_func.dispatch( + obj.__class__ if unstructure_as is None else unstructure_as + )(obj) + + @property + def unstruct_strat(self) -> UnstructureStrategy: + """The default way of unstructuring ``attrs`` classes.""" + return ( + UnstructureStrategy.AS_DICT + if self._unstructure_attrs == self.unstructure_attrs_asdict + else UnstructureStrategy.AS_TUPLE + ) + + def register_unstructure_hook( + self, cls: Any, func: Callable[[T], Any] + ) -> None: + """Register a class-to-primitive converter function for a class. + + The converter function should take an instance of the class and return + its Python equivalent. + """ + if attrs_has(cls): + resolve_types(cls) + if is_union_type(cls): + self._unstructure_func.register_func_list( + [(lambda t: t == cls, func)] + ) + else: + self._unstructure_func.register_cls_list([(cls, func)]) + + def register_unstructure_hook_func( + self, check_func: Callable[[Any], bool], func: Callable[[T], Any] + ): + """Register a class-to-primitive converter function for a class, using + a function to check if it's a match. + """ + self._unstructure_func.register_func_list([(check_func, func)]) + + def register_structure_hook( + self, cl: Any, func: Callable[[Any, Type[T]], T] + ): + """Register a primitive-to-class converter function for a type. + + The converter function should take two arguments: + * a Python object to be converted, + * the type to convert to + + and return the instance of the class. The type may seem redundant, but + is sometimes needed (for example, when dealing with generic classes). + """ + if attrs_has(cl): + resolve_types(cl) + if is_union_type(cl): + self._union_struct_registry[cl] = func + else: + self._structure_func.register_cls_list([(cl, func)]) + + def register_structure_hook_func( + self, + check_func: Callable[[Type[T]], bool], + func: Callable[[Any, Type[T]], T], + ): + """Register a class-to-primitive converter function for a class, using + a function to check if it's a match. + """ + self._structure_func.register_func_list([(check_func, func)]) + + def structure(self, obj: Any, cl: Type[T]) -> T: + """Convert unstructured Python data structures to structured data.""" + + return self._structure_func.dispatch(cl)(obj, cl) + + # Classes to Python primitives. + def unstructure_attrs_asdict(self, obj) -> Dict[str, Any]: + """Our version of `attrs.asdict`, so we can call back to us.""" + attrs = fields(obj.__class__) + dispatch = self._unstructure_func.dispatch + rv = self._dict_factory() + for a in attrs: + name = a.name + v = getattr(obj, name) + rv[name] = dispatch(a.type or v.__class__)(v) + return rv + + def unstructure_attrs_astuple(self, obj) -> Tuple[Any, ...]: + """Our version of `attrs.astuple`, so we can call back to us.""" + attrs = fields(obj.__class__) + dispatch = self._unstructure_func.dispatch + res = list() + for a in attrs: + name = a.name + v = getattr(obj, name) + res.append(dispatch(a.type or v.__class__)(v)) + return tuple(res) + + def _unstructure_enum(self, obj): + """Convert an enum to its value.""" + return obj.value + + def _unstructure_identity(self, obj): + """Just pass it through.""" + return obj + + def _unstructure_seq(self, seq): + """Convert a sequence to primitive equivalents.""" + # We can reuse the sequence class, so tuples stay tuples. + dispatch = self._unstructure_func.dispatch + return seq.__class__(dispatch(e.__class__)(e) for e in seq) + + def _unstructure_mapping(self, mapping): + """Convert a mapping of attr classes to primitive equivalents.""" + + # We can reuse the mapping class, so dicts stay dicts and OrderedDicts + # stay OrderedDicts. + dispatch = self._unstructure_func.dispatch + return mapping.__class__( + (dispatch(k.__class__)(k), dispatch(v.__class__)(v)) + for k, v in mapping.items() + ) + + def _unstructure_union(self, obj): + """ + Unstructure an object as a union. + + By default, just unstructures the instance. + """ + return self._unstructure_func.dispatch(obj.__class__)(obj) + + # Python primitives to classes. + + def _structure_default(self, obj, cl): + """This is the fallthrough case. Everything is a subclass of `Any`. + + A special condition here handles ``attrs`` classes. + + Bare optionals end here too (optionals with arguments are unions.) We + treat bare optionals as Any. + """ + if cl is Any or cl is Optional or cl is None: + return obj + + if is_generic(cl): + fn = make_dict_structure_fn(cl, self) + self.register_structure_hook(cl, fn) + return fn(obj) + + # We don't know what this is, so we complain loudly. + msg = ( + "Unsupported type: {0}. Register a structure hook for " + "it.".format(cl) + ) + raise ValueError(msg) + + @staticmethod + def _structure_call(obj, cl): + """Just call ``cl`` with the given ``obj``. + + This is just an optimization on the ``_structure_default`` case, when + we know we can skip the ``if`` s. Use for ``str``, ``bytes``, ``enum``, + etc. + """ + return cl(obj) + + # Attrs classes. + + def structure_attrs_fromtuple( + self, obj: Tuple[Any, ...], cl: Type[T] + ) -> T: + """Load an attrs class from a sequence (tuple).""" + conv_obj = [] # A list of converter parameters. + for a, value in zip(fields(cl), obj): # type: ignore + # We detect the type by the metadata. + converted = self._structure_attr_from_tuple(a, a.name, value) + conv_obj.append(converted) + + return cl(*conv_obj) # type: ignore + + def _structure_attr_from_tuple(self, a, _, value): + """Handle an individual attrs attribute.""" + type_ = a.type + if type_ is None: + # No type metadata. + return value + return self._structure_func.dispatch(type_)(value, type_) + + def structure_attrs_fromdict( + self, obj: Mapping[str, Any], cl: Type[T] + ) -> T: + """Instantiate an attrs class from a mapping (dict).""" + # For public use. + + conv_obj = {} # Start with a fresh dict, to ignore extra keys. + dispatch = self._structure_func.dispatch + for a in fields(cl): # type: ignore + # We detect the type by metadata. + type_ = a.type + name = a.name + + try: + val = obj[name] + except KeyError: + continue + + if name[0] == "_": + name = name[1:] + + conv_obj[name] = ( + dispatch(type_)(val, type_) if type_ is not None else val + ) + + return cl(**conv_obj) # type: ignore + + def _structure_list(self, obj, cl): + """Convert an iterable to a potentially generic list.""" + if is_bare(cl) or cl.__args__[0] is Any: + return [e for e in obj] + else: + elem_type = cl.__args__[0] + return [ + self._structure_func.dispatch(elem_type)(e, elem_type) + for e in obj + ] + + def _structure_set(self, obj, cl): + """Convert an iterable into a potentially generic set.""" + if is_bare(cl) or cl.__args__[0] is Any: + return set(obj) + else: + elem_type = cl.__args__[0] + return { + self._structure_func.dispatch(elem_type)(e, elem_type) + for e in obj + } + + def _structure_frozenset(self, obj, cl): + """Convert an iterable into a potentially generic frozenset.""" + if is_bare(cl) or cl.__args__[0] is Any: + return frozenset(obj) + else: + elem_type = cl.__args__[0] + dispatch = self._structure_func.dispatch + return frozenset(dispatch(elem_type)(e, elem_type) for e in obj) + + def _structure_dict(self, obj, cl): + """Convert a mapping into a potentially generic dict.""" + if is_bare(cl) or cl.__args__ == (Any, Any): + return dict(obj) + else: + key_type, val_type = cl.__args__ + if key_type is Any: + val_conv = self._structure_func.dispatch(val_type) + return {k: val_conv(v, val_type) for k, v in obj.items()} + elif val_type is Any: + key_conv = self._structure_func.dispatch(key_type) + return {key_conv(k, key_type): v for k, v in obj.items()} + else: + key_conv = self._structure_func.dispatch(key_type) + val_conv = self._structure_func.dispatch(val_type) + return { + key_conv(k, key_type): val_conv(v, val_type) + for k, v in obj.items() + } + + def _structure_union(self, obj, union): + """Deal with converting a union.""" + # Unions with NoneType in them are basically optionals. + # We check for NoneType early and handle the case of obj being None, + # so disambiguation functions don't need to handle NoneType. + union_params = union.__args__ + if NoneType in union_params: # type: ignore + if obj is None: + return None + if len(union_params) == 2: + # This is just a NoneType and something else. + other = ( + union_params[0] + if union_params[1] is NoneType # type: ignore + else union_params[1] + ) + # We can't actually have a Union of a Union, so this is safe. + return self._structure_func.dispatch(other)(obj, other) + + # Check the union registry first. + handler = self._union_struct_registry.get(union) + if handler is not None: + return handler(obj, union) + + # Getting here means either this is not an optional, or it's an + # optional with more than one parameter. + # Let's support only unions of attr classes for now. + cl = self._dis_func_cache(union)(obj) + return self._structure_func.dispatch(cl)(obj, cl) + + def _structure_tuple(self, obj, tup: Type[T]): + """Deal with converting to a tuple.""" + if tup in (Tuple, tuple): + tup_params = None + else: + tup_params = tup.__args__ + has_ellipsis = tup_params and tup_params[-1] is Ellipsis + if tup_params is None or (has_ellipsis and tup_params[0] is Any): + # Just a Tuple. (No generic information.) + return tuple(obj) + if has_ellipsis: + # We're dealing with a homogenous tuple, Tuple[int, ...] + tup_type = tup_params[0] + conv = self._structure_func.dispatch(tup_type) + return tuple(conv(e, tup_type) for e in obj) + else: + # We're dealing with a heterogenous tuple. + return tuple( + self._structure_func.dispatch(t)(e, t) + for t, e in zip(tup_params, obj) + ) + + @staticmethod + def _get_dis_func(union): + # type: (Type) -> Callable[..., Type] + """Fetch or try creating a disambiguation function for a union.""" + union_types = union.__args__ + if NoneType in union_types: # type: ignore + # We support unions of attrs classes and NoneType higher in the + # logic. + union_types = tuple( + e for e in union_types if e is not NoneType # type: ignore + ) + + if not all(has(get_origin(e) or e) for e in union_types): + raise ValueError( + "Only unions of attr classes supported " + "currently. Register a loads hook manually." + ) + return create_uniq_field_dis_func(*union_types) + + +class GenConverter(Converter): + """A converter which generates specialized un/structuring functions.""" + + __slots__ = ( + "omit_if_default", + "forbid_extra_keys", + "type_overrides", + "_unstruct_collection_overrides", + ) + + def __init__( + self, + dict_factory: Callable[[], Any] = dict, + unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT, + omit_if_default: bool = False, + forbid_extra_keys: bool = False, + type_overrides: Mapping[Type, AttributeOverride] = {}, + unstruct_collection_overrides: Mapping[Type, Callable] = {}, + ): + super().__init__( + dict_factory=dict_factory, unstruct_strat=unstruct_strat + ) + self.omit_if_default = omit_if_default + self.forbid_extra_keys = forbid_extra_keys + self.type_overrides = dict(type_overrides) + + self._unstruct_collection_overrides = unstruct_collection_overrides + + # Do a little post-processing magic to make things easier for users. + co = unstruct_collection_overrides + + # abc.Set overrides, if defined, apply to abc.MutableSets and sets + if Set in co: + if MutableSet not in co: + co[MutableSet] = co[Set] + + # abc.MutableSet overrrides, if defined, apply to sets + if MutableSet in co: + if set not in co: + co[set] = co[MutableSet] + + # abc.Sequence overrides, if defined, can apply to MutableSequences, lists and tuples + if Sequence in co: + if MutableSequence not in co: + co[MutableSequence] = co[Sequence] + if tuple not in co: + co[tuple] = co[Sequence] + + # abc.MutableSequence overrides, if defined, can apply to lists + if MutableSequence in co: + if list not in co: + co[list] = co[MutableSequence] + + # abc.Mapping overrides, if defined, can apply to MutableMappings + if Mapping in co: + if MutableMapping not in co: + co[MutableMapping] = co[Mapping] + + # abc.MutableMapping overrides, if defined, can apply to dicts + if MutableMapping in co: + if dict not in co: + co[dict] = co[MutableMapping] + + # builtins.dict overrides, if defined, can apply to counters + if dict in co: + if Counter not in co: + co[Counter] = co[dict] + + if unstruct_strat is UnstructureStrategy.AS_DICT: + # Override the attrs handler. + self._unstructure_func.register_func_list( + [ + ( + has_with_generic, + self.gen_unstructure_attrs_fromdict, + True, + ), + ] + ) + self._structure_func.register_func_list( + [ + (has, self.gen_structure_attrs_fromdict, True), + ] + ) + + self._unstructure_func.register_func_list( + [ + (is_annotated, self.gen_unstructure_annotated, True), + ( + is_sequence, + self.gen_unstructure_iterable, + True, + ), + (is_mapping, self.gen_unstructure_mapping, True), + ( + is_mutable_set, + lambda cl: self.gen_unstructure_iterable( + cl, unstructure_to=set + ), + True, + ), + ( + is_frozenset, + lambda cl: self.gen_unstructure_iterable( + cl, unstructure_to=frozenset + ), + True, + ), + ] + ) + self._structure_func.register_func_list( + [(is_annotated, self.gen_structure_annotated, True)] + ) + + def gen_unstructure_annotated(self, type): + origin = type.__origin__ + h = self._unstructure_func.dispatch(origin) + return h + + def gen_structure_annotated(self, type): + origin = type.__origin__ + h = self._structure_func.dispatch(origin) + return h + + def gen_unstructure_attrs_fromdict(self, cl: Type[T]) -> Dict[str, Any]: + origin = get_origin(cl) + if origin is not None: + cl = origin + attribs = fields(cl) + if any(isinstance(a.type, str) for a in attribs): + # PEP 563 annotations - need to be resolved. + resolve_types(cl) + attrib_overrides = { + a.name: self.type_overrides[a.type] + for a in attribs + if a.type in self.type_overrides + } + + h = make_dict_unstructure_fn( + cl, self, omit_if_default=self.omit_if_default, **attrib_overrides + ) + self._unstructure_func.register_cls_list([(cl, h)], direct=True) + return h + + def gen_structure_attrs_fromdict(self, cl: Type[T]) -> T: + attribs = fields(cl) + if any(isinstance(a.type, str) for a in attribs): + # PEP 563 annotations - need to be resolved. + resolve_types(cl) + attrib_overrides = { + a.name: self.type_overrides[a.type] + for a in attribs + if a.type in self.type_overrides + } + h = make_dict_structure_fn( + cl, + self, + _cattrs_forbid_extra_keys=self.forbid_extra_keys, + **attrib_overrides + ) + self._structure_func.register_cls_list([(cl, h)], direct=True) + # only direct dispatch so that subclasses get separately generated + return h + + def gen_unstructure_iterable(self, cl: Any, unstructure_to=None): + unstructure_to = self._unstruct_collection_overrides.get( + get_origin(cl) or cl, unstructure_to or list + ) + h = make_iterable_unstructure_fn( + cl, self, unstructure_to=unstructure_to + ) + self._unstructure_func.register_cls_list([(cl, h)], direct=True) + return h + + def gen_unstructure_mapping(self, cl: Any, unstructure_to=None): + unstructure_to = self._unstruct_collection_overrides.get( + get_origin(cl) or cl, unstructure_to or dict + ) + h = make_mapping_unstructure_fn( + cl, self, unstructure_to=unstructure_to + ) + self._unstructure_func.register_cls_list([(cl, h)], direct=True) + return h diff --git a/MLPY/Lib/site-packages/cattr/disambiguators.py b/MLPY/Lib/site-packages/cattr/disambiguators.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc2e233bf3ec5530e46b9165d58ca73208f5aa2 --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/disambiguators.py @@ -0,0 +1,65 @@ +"""Utilities for union (sum type) disambiguation.""" +from collections import OrderedDict +from functools import reduce +from operator import or_ +from typing import ( # noqa: F401, imported for Mypy. + Callable, + Dict, + Mapping, + Optional, + Type, +) + +from attr import fields, NOTHING + +from cattr._compat import get_origin + + +def create_uniq_field_dis_func(*classes: Type) -> Callable: + """Given attr classes, generate a disambiguation function. + + The function is based on unique fields.""" + if len(classes) < 2: + raise ValueError("At least two classes required.") + cls_and_attrs = [ + (cl, set(at.name for at in fields(get_origin(cl) or cl))) + for cl in classes + ] + if len([attrs for _, attrs in cls_and_attrs if len(attrs) == 0]) > 1: + raise ValueError("At least two classes have no attributes.") + # TODO: Deal with a single class having no required attrs. + # For each class, attempt to generate a single unique required field. + uniq_attrs_dict = OrderedDict() # type: Dict[str, Type] + cls_and_attrs.sort(key=lambda c_a: -len(c_a[1])) + + fallback = None # If none match, try this. + + for i, (cl, cl_reqs) in enumerate(cls_and_attrs): + other_classes = cls_and_attrs[i + 1 :] + if other_classes: + other_reqs = reduce(or_, (c_a[1] for c_a in other_classes)) + uniq = cl_reqs - other_reqs + if not uniq: + m = "{} has no usable unique attributes.".format(cl) + raise ValueError(m) + # We need a unique attribute with no default. + cl_fields = fields(get_origin(cl) or cl) + for attr_name in uniq: + if getattr(cl_fields, attr_name).default is NOTHING: + break + else: + raise ValueError(f"{cl} has no usable non-default attributes.") + uniq_attrs_dict[attr_name] = cl + else: + fallback = cl + + def dis_func(data): + # type: (Mapping) -> Optional[Type] + if not isinstance(data, Mapping): + raise ValueError("Only input mappings are supported.") + for k, v in uniq_attrs_dict.items(): + if k in data: + return v + return fallback + + return dis_func diff --git a/MLPY/Lib/site-packages/cattr/dispatch.py b/MLPY/Lib/site-packages/cattr/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..359c3f7e441ac18cc08803ffa1fd5f39e291e38d --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/dispatch.py @@ -0,0 +1,124 @@ +from functools import lru_cache, singledispatch +from typing import Any, Callable, List, Tuple, Union + +import attr + + +@attr.s +class _DispatchNotFound: + """A dummy object to help signify a dispatch not found.""" + + pass + + +class MultiStrategyDispatch: + """ + MultiStrategyDispatch uses a combination of exact-match dispatch, + singledispatch, and FunctionDispatch. + """ + + __slots__ = ( + "_direct_dispatch", + "_function_dispatch", + "_single_dispatch", + "_generators", + "dispatch", + ) + + def __init__(self, fallback_func): + self._direct_dispatch = {} + self._function_dispatch = FunctionDispatch() + self._function_dispatch.register(lambda _: True, fallback_func) + self._single_dispatch = singledispatch(_DispatchNotFound) + self.dispatch = lru_cache(maxsize=None)(self._dispatch) + + def _dispatch(self, cl): + try: + dispatch = self._single_dispatch.dispatch(cl) + if dispatch is not _DispatchNotFound: + return dispatch + except Exception: + pass + + direct_dispatch = self._direct_dispatch.get(cl) + if direct_dispatch is not None: + return direct_dispatch + + return self._function_dispatch.dispatch(cl) + + def register_cls_list(self, cls_and_handler, direct: bool = False): + """ register a class to direct or singledispatch """ + for cls, handler in cls_and_handler: + if direct: + self._direct_dispatch[cls] = handler + else: + self._single_dispatch.register(cls, handler) + self.clear_direct() + self.dispatch.cache_clear() + + def register_func_list( + self, + func_and_handler: List[ + Union[ + Tuple[Callable[[Any], bool], Any], + Tuple[Callable[[Any], bool], Any, bool], + ] + ], + ): + """register a function to determine if the handle + should be used for the type + """ + for tup in func_and_handler: + if len(tup) == 2: + func, handler = tup + self._function_dispatch.register(func, handler) + else: + func, handler, is_gen = tup + self._function_dispatch.register( + func, handler, is_generator=is_gen + ) + self.clear_direct() + self.dispatch.cache_clear() + + def clear_direct(self): + """Clear the direct dispatch.""" + self._direct_dispatch.clear() + + +class FunctionDispatch: + """ + FunctionDispatch is similar to functools.singledispatch, but + instead dispatches based on functions that take the type of the + first argument in the method, and return True or False. + + objects that help determine dispatch should be instantiated objects. + """ + + __slots__ = ("_handler_pairs",) + + def __init__(self): + self._handler_pairs = [] + + def register( + self, can_handle: Callable[[Any], bool], func, is_generator=False + ): + self._handler_pairs.insert(0, (can_handle, func, is_generator)) + + def dispatch(self, typ): + """ + returns the appropriate handler, for the object passed. + """ + for can_handle, handler, is_generator in self._handler_pairs: + # can handle could raise an exception here + # such as issubclass being called on an instance. + # it's easier to just ignore that case. + try: + ch = can_handle(typ) + except Exception: + continue + if ch: + if is_generator: + return handler(typ) + else: + return handler + raise KeyError("unable to find handler for {0}".format(typ)) diff --git a/MLPY/Lib/site-packages/cattr/gen.py b/MLPY/Lib/site-packages/cattr/gen.py new file mode 100644 index 0000000000000000000000000000000000000000..9b332a0bc52dbe646e8ee2eea561ef4c98f93595 --- /dev/null +++ b/MLPY/Lib/site-packages/cattr/gen.py @@ -0,0 +1,281 @@ +import re +from typing import Any, Optional, Type, TypeVar +from dataclasses import is_dataclass + +import attr +from attr import NOTHING, resolve_types + +from ._compat import get_args, get_origin, is_generic, adapted_fields + + +@attr.s(slots=True, frozen=True) +class AttributeOverride(object): + omit_if_default: Optional[bool] = attr.ib(default=None) + rename: Optional[str] = attr.ib(default=None) + + +def override(omit_if_default=None, rename=None): + return AttributeOverride(omit_if_default=omit_if_default, rename=rename) + + +_neutral = AttributeOverride() + + +def make_dict_unstructure_fn(cl, converter, omit_if_default=False, **kwargs): + """Generate a specialized dict unstructuring function for an attrs class.""" + cl_name = cl.__name__ + fn_name = "unstructure_" + cl_name + globs = {} + lines = [] + post_lines = [] + + attrs = adapted_fields(cl) # type: ignore + + lines.append(f"def {fn_name}(i):") + lines.append(" res = {") + for a in attrs: + attr_name = a.name + override = kwargs.pop(attr_name, _neutral) + kn = attr_name if override.rename is None else override.rename + d = a.default + + # For each attribute, we try resolving the type here and now. + # If a type is manually overwritten, this function should be + # regenerated. + if a.type is not None: + handler = converter._unstructure_func.dispatch(a.type) + else: + handler = converter.unstructure + + is_identity = handler == converter._unstructure_identity + + if not is_identity: + unstruct_handler_name = f"__cattr_unstruct_handler_{attr_name}" + globs[unstruct_handler_name] = handler + invoke = f"{unstruct_handler_name}(i.{attr_name})" + else: + invoke = f"i.{attr_name}" + + if d is not attr.NOTHING and ( + (omit_if_default and override.omit_if_default is not False) + or override.omit_if_default + ): + def_name = f"__cattr_def_{attr_name}" + + if isinstance(d, attr.Factory): + globs[def_name] = d.factory + if d.takes_self: + post_lines.append( + f" if i.{attr_name} != {def_name}(i):" + ) + else: + post_lines.append(f" if i.{attr_name} != {def_name}():") + post_lines.append(f" res['{kn}'] = {invoke}") + else: + globs[def_name] = d + post_lines.append(f" if i.{attr_name} != {def_name}:") + post_lines.append(f" res['{kn}'] = {invoke}") + + else: + # No default or no override. + lines.append(f" '{kn}': {invoke},") + lines.append(" }") + + total_lines = lines + post_lines + [" return res"] + + eval(compile("\n".join(total_lines), "", "exec"), globs) + + fn = globs[fn_name] + + return fn + + +def generate_mapping(cl: Type, old_mapping): + mapping = {} + for p, t in zip(get_origin(cl).__parameters__, get_args(cl)): + if isinstance(t, TypeVar): + continue + mapping[p.__name__] = t + + if not mapping: + return old_mapping + + cls = attr.make_class( + "GenericMapping", + {x: attr.attrib() for x in mapping.keys()}, + frozen=True, + ) + + return cls(**mapping) + + +def make_dict_structure_fn( + cl: Type, converter, _cattrs_forbid_extra_keys: bool = False, **kwargs +): + """Generate a specialized dict structuring function for an attrs class.""" + + mapping = None + if is_generic(cl): + base = get_origin(cl) + mapping = generate_mapping(cl, mapping) + cl = base + + for base in getattr(cl, "__orig_bases__", ()): + if is_generic(base) and not str(base).startswith("typing.Generic"): + mapping = generate_mapping(base, mapping) + break + + if isinstance(cl, TypeVar): + cl = getattr(mapping, cl.__name__, cl) + + cl_name = cl.__name__ + fn_name = "structure_" + cl_name + + # We have generic parameters and need to generate a unique name for the function + for p in getattr(cl, "__parameters__", ()): + # This is nasty, I am not sure how best to handle `typing.List[str]` or `TClass[int, int]` as a parameter type here + name_base = getattr(mapping, p.__name__) + name = getattr(name_base, "__name__", str(name_base)) + name = re.sub(r"[\[\.\] ,]", "_", name) + fn_name += f"_{name}" + + globs = {"__c_s": converter.structure, "__cl": cl, "__m": mapping} + lines = [] + post_lines = [] + + attrs = adapted_fields(cl) + is_dc = is_dataclass(cl) + + if any(isinstance(a.type, str) for a in attrs): + # PEP 563 annotations - need to be resolved. + resolve_types(cl) + + lines.append(f"def {fn_name}(o, *_):") + lines.append(" res = {") + for a in attrs: + an = a.name + override = kwargs.pop(an, _neutral) + type = a.type + if isinstance(type, TypeVar): + type = getattr(mapping, type.__name__, type) + + # For each attribute, we try resolving the type here and now. + # If a type is manually overwritten, this function should be + # regenerated. + if type is not None: + handler = converter._structure_func.dispatch(type) + else: + handler = converter.structure + + struct_handler_name = f"__cattr_struct_handler_{an}" + globs[struct_handler_name] = handler + + ian = an if (is_dc or an[0] != "_") else an[1:] + kn = an if override.rename is None else override.rename + globs[f"__c_t_{an}"] = type + if a.default is NOTHING: + lines.append( + f" '{ian}': {struct_handler_name}(o['{kn}'], __c_t_{an})," + ) + else: + post_lines.append(f" if '{kn}' in o:") + post_lines.append( + f" res['{ian}'] = {struct_handler_name}(o['{kn}'], __c_t_{an})" + ) + lines.append(" }") + if _cattrs_forbid_extra_keys: + allowed_fields = {a.name for a in attrs} + globs["__c_a"] = allowed_fields + post_lines += [ + " unknown_fields = set(o.keys()) - __c_a", + " if unknown_fields:", + " raise Exception(", + f" 'Extra fields in constructor for {cl_name}: ' + ', '.join(unknown_fields)" + " )", + ] + + total_lines = lines + post_lines + [" return __cl(**res)"] + + eval(compile("\n".join(total_lines), "", "exec"), globs) + + return globs[fn_name] + + +def make_iterable_unstructure_fn(cl: Any, converter, unstructure_to=None): + """Generate a specialized unstructure function for an iterable.""" + handler = converter.unstructure + + fn_name = "unstructure_iterable" + + # Let's try fishing out the type args. + if getattr(cl, "__args__", None) is not None: + type_arg = get_args(cl)[0] + # We can do the dispatch here and now. + handler = converter._unstructure_func.dispatch(type_arg) + + globs = {"__cattr_seq_cl": unstructure_to or cl, "__cattr_u": handler} + lines = [] + + lines.append(f"def {fn_name}(iterable):") + lines.append(" res = __cattr_seq_cl(__cattr_u(i) for i in iterable)") + + total_lines = lines + [" return res"] + + eval(compile("\n".join(total_lines), "", "exec"), globs) + + fn = globs[fn_name] + + return fn + + +def make_mapping_unstructure_fn(cl: Any, converter, unstructure_to=None): + """Generate a specialized unstructure function for a mapping.""" + key_handler = converter.unstructure + val_handler = converter.unstructure + + fn_name = "unstructure_mapping" + + # Let's try fishing out the type args. + if getattr(cl, "__args__", None) is not None: + args = get_args(cl) + if len(args) == 2: + key_arg, val_arg = args + else: + # Probably a Counter + key_arg, val_arg = args, Any + # We can do the dispatch here and now. + key_handler = converter._unstructure_func.dispatch(key_arg) + if key_handler == converter._unstructure_identity: + key_handler = None + + val_handler = converter._unstructure_func.dispatch(val_arg) + if val_handler == converter._unstructure_identity: + val_handler = None + + globs = { + "__cattr_mapping_cl": unstructure_to or cl, + "__cattr_k_u": key_handler, + "__cattr_v_u": val_handler, + } + if key_handler is not None: + globs["__cattr_k_u"] + if val_handler is not None: + globs["__cattr_v_u"] + + k_u = "__cattr_k_u(k)" if key_handler is not None else "k" + v_u = "__cattr_v_u(v)" if val_handler is not None else "v" + + lines = [] + + lines.append(f"def {fn_name}(mapping):") + lines.append( + f" res = __cattr_mapping_cl(({k_u}, {v_u}) for k, v in mapping.items())" + ) + + total_lines = lines + [" return res"] + + eval(compile("\n".join(total_lines), "", "exec"), globs) + + fn = globs[fn_name] + + return fn diff --git a/MLPY/Lib/site-packages/cattr/py.typed b/MLPY/Lib/site-packages/cattr/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/LICENSE b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..340022c335c359953f29e61e4beb8049ea038d7c --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/LICENSE @@ -0,0 +1,11 @@ + +MIT License + +Copyright (c) 2016, Tin Tvrtković + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/METADATA b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..bff3cf011a1752e07b23c9143353f430e2f9d2c6 --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/METADATA @@ -0,0 +1,380 @@ +Metadata-Version: 2.1 +Name: cattrs +Version: 1.5.0 +Summary: Composable complex class support for attrs and dataclasses. +Home-page: https://github.com/Tinche/cattrs +Author: Tin Tvrtković +Author-email: tinchester@gmail.com +License: MIT license +Keywords: cattrs +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Requires-Python: ~=3.7 +Requires-Dist: attrs (>=20.1.0) +Provides-Extra: dev +Requires-Dist: bumpversion ; extra == 'dev' +Requires-Dist: wheel ; extra == 'dev' +Requires-Dist: watchdog ; extra == 'dev' +Requires-Dist: flake8 ; extra == 'dev' +Requires-Dist: tox ; extra == 'dev' +Requires-Dist: coverage ; extra == 'dev' +Requires-Dist: Sphinx ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: pytest-benchmark ; extra == 'dev' +Requires-Dist: hypothesis ; extra == 'dev' +Requires-Dist: pendulum ; extra == 'dev' +Requires-Dist: isort ; extra == 'dev' +Requires-Dist: black ; extra == 'dev' +Requires-Dist: immutables ; extra == 'dev' + +====== +cattrs +====== + + +.. image:: https://img.shields.io/pypi/v/cattrs.svg + :target: https://pypi.python.org/pypi/cattrs + +.. image:: https://github.com/Tinche/cattrs/workflows/CI/badge.svg + :target: https://github.com/Tinche/cattrs/actions?workflow=CI + +.. image:: https://readthedocs.org/projects/cattrs/badge/?version=latest + :target: https://cattrs.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/pypi/pyversions/cattrs.svg + :target: https://github.com/Tinche/cattrs + :alt: Supported Python versions + +.. image:: https://codecov.io/gh/Tinche/cattrs/branch/master/graph/badge.svg + :target: https://codecov.io/gh/Tinche/cattrs + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/ambv/black + + +---- + +``cattrs`` is an open source Python library for structuring and unstructuring +data. ``cattrs`` works best with ``attrs`` classes, dataclasses and the usual +Python collections, but other kinds of classes are supported by manually +registering converters. + +Python has a rich set of powerful, easy to use, built-in data types like +dictionaries, lists and tuples. These data types are also the lingua franca +of most data serialization libraries, for formats like json, msgpack, yaml or +toml. + +Data types like this, and mappings like ``dict`` s in particular, represent +unstructured data. Your data is, in all likelihood, structured: not all +combinations of field names are values are valid inputs to your programs. In +Python, structured data is better represented with classes and enumerations. +``attrs`` is an excellent library for declaratively describing the structure of +your data, and validating it. + +When you're handed unstructured data (by your network, file system, database...), +``cattrs`` helps to convert this data into structured data. When you have to +convert your structured data into data types other libraries can handle, +``cattrs`` turns your classes and enumerations into dictionaries, integers and +strings. + +Here's a simple taste. The list containing a float, an int and a string +gets converted into a tuple of three ints. + +.. code-block:: pycon + + >>> import cattr + >>> + >>> cattr.structure([1.0, 2, "3"], tuple[int, int, int]) + (1, 2, 3) + +``cattrs`` works well with ``attrs`` classes out of the box. + +.. code-block:: pycon + + >>> import attr, cattr + >>> + >>> @attr.frozen # It works with normal classes too. + ... class C: + ... a = attr.ib() + ... b = attr.ib() + ... + >>> instance = C(1, 'a') + >>> cattr.unstructure(instance) + {'a': 1, 'b': 'a'} + >>> cattr.structure({'a': 1, 'b': 'a'}, C) + C(a=1, b='a') + +Here's a much more complex example, involving ``attrs`` classes with type +metadata. + +.. code-block:: pycon + + >>> from enum import unique, Enum + >>> from typing import Optional, Sequence, Union + >>> from cattr import structure, unstructure + >>> import attr + >>> + >>> @unique + ... class CatBreed(Enum): + ... SIAMESE = "siamese" + ... MAINE_COON = "maine_coon" + ... SACRED_BIRMAN = "birman" + ... + >>> @attr.define + ... class Cat: + ... breed: CatBreed + ... names: Sequence[str] + ... + >>> @attr.define + ... class DogMicrochip: + ... chip_id = attr.ib() + ... time_chipped: float = attr.ib() + ... + >>> @attr.define + ... class Dog: + ... cuteness: int + ... chip: Optional[DogMicrochip] + ... + >>> p = unstructure([Dog(cuteness=1, chip=DogMicrochip(chip_id=1, time_chipped=10.0)), + ... Cat(breed=CatBreed.MAINE_COON, names=('Fluffly', 'Fluffer'))]) + ... + >>> print(p) + [{'cuteness': 1, 'chip': {'chip_id': 1, 'time_chipped': 10.0}}, {'breed': 'maine_coon', 'names': ('Fluffly', 'Fluffer')}] + >>> print(structure(p, list[Union[Dog, Cat]])) + [Dog(cuteness=1, chip=DogMicrochip(chip_id=1, time_chipped=10.0)), Cat(breed=, names=['Fluffly', 'Fluffer'])] + +Consider unstructured data a low-level representation that needs to be converted +to structured data to be handled, and use ``structure``. When you're done, +``unstructure`` the data to its unstructured form and pass it along to another +library or module. Use `attrs type metadata `_ +to add type metadata to attributes, so ``cattrs`` will know how to structure and +destructure them. + +* Free software: MIT license +* Documentation: https://cattrs.readthedocs.io. +* Python versions supported: 3.7 and up. (Older Python versions, like 2.7, 3.5 and 3.6 are supported by older versions; see the changelog.) + + +Features +-------- + +* Converts structured data into unstructured data, recursively: + + * ``attrs`` classes and dataclasses are converted into dictionaries in a way similar to ``attr.asdict``, or into tuples in a way similar to ``attr.astuple``. + * Enumeration instances are converted to their values. + * Other types are let through without conversion. This includes types such as + integers, dictionaries, lists and instances of non-``attrs`` classes. + * Custom converters for any type can be registered using ``register_unstructure_hook``. + +* Converts unstructured data into structured data, recursively, according to + your specification given as a type. The following types are supported: + + * ``typing.Optional[T]``. + * ``typing.List[T]``, ``typing.MutableSequence[T]``, ``typing.Sequence[T]`` (converts to a list). + * ``typing.Tuple`` (both variants, ``Tuple[T, ...]`` and ``Tuple[X, Y, Z]``). + * ``typing.MutableSet[T]``, ``typing.Set[T]`` (converts to a set). + * ``typing.FrozenSet[T]`` (converts to a frozenset). + * ``typing.Dict[K, V]``, ``typing.MutableMapping[K, V]``, ``typing.Mapping[K, V]`` (converts to a dict). + * ``attrs`` classes with simple attributes and the usual ``__init__``. + + * Simple attributes are attributes that can be assigned unstructured data, + like numbers, strings, and collections of unstructured data. + + * All `attrs` classes and dataclasses with the usual ``__init__``, if their complex attributes have type metadata. + * ``typing.Union`` s of supported ``attrs`` classes, given that all of the classes have a unique field. + * ``typing.Union`` s of anything, given that you provide a disambiguation function for it. + * Custom converters for any type can be registered using ``register_structure_hook``. + +Credits +------- + +Major credits to Hynek Schlawack for creating attrs_ and its predecessor, +characteristic_. + +``cattrs`` is tested with Hypothesis_, by David R. MacIver. + +``cattrs`` is benchmarked using perf_ and pytest-benchmark_. + +This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. + +.. _attrs: https://github.com/hynek/attrs +.. _characteristic: https://github.com/hynek/characteristic +.. _Hypothesis: http://hypothesis.readthedocs.io/en/latest/ +.. _perf: https://github.com/haypo/perf +.. _pytest-benchmark: https://pytest-benchmark.readthedocs.io/en/latest/index.html +.. _Cookiecutter: https://github.com/audreyr/cookiecutter +.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage + + + +======= +History +======= + +1.5.0 (2021-04-15) +------------------ +* Fix an issue with ``GenConverter`` unstructuring ``attrs`` classes and dataclasses with generic fields. + (`#65 `_) +* ``GenConverter`` has support for easy overriding of collection unstructuring types (for example, unstructure all sets to lists) through its ``unstruct_collection_overrides`` argument. + (`#137 `_) +* Unstructuring mappings with ``GenConverter`` is significantly faster. +* ``GenConverter`` supports strict handling of unexpected dictionary keys through its ``forbid_extra_keys`` argument. + (`#142 `_) + +1.4.0 (2021-03-21) +------------------ +* Fix an issue with ``GenConverter`` un/structuring hooks when a function hook is registered after the converter has already been used. +* Add support for ``collections.abc.{Sequence, MutableSequence, Set, MutableSet}``. These should be used on 3.9+ instead of their ``typing`` alternatives, which are deprecated. + (`#128 `_) +* The ``GenConverter`` will unstructure iterables (``list[T]``, ``tuple[T, ...]``, ``set[T]``) using their type argument instead of the runtime class if its elements, if possible. These unstructuring operations are up to 40% faster. + (`#129 `_) +* Flesh out ``Converter`` and ``GenConverter`` initializer type annotations. + (`#131 `_) +* Add support for ``typing.Annotated`` on Python 3.9+. ``cattrs`` will use the first annotation present. ``cattrs`` specific annotations may be added in the future. + (`#127 `_) +* Add support for dataclasses. + (`#43 `_) + +1.3.0 (2021-02-25) +------------------ +* ``cattrs`` now has a benchmark suite to help make and keep cattrs the fastest it can be. The instructions on using it can be found under the `Benchmarking ` section in the docs. + (`#123 `_) +* Fix an issue unstructuring tuples of non-primitives. + (`#125 `_) +* ``cattrs`` now calls ``attr.resolve_types`` on ``attrs`` classes when registering un/structuring hooks. +* ``GenConverter`` structuring and unstructuring of ``attrs`` classes is significantly faster. + +1.2.0 (2021-01-31) +------------------ +* ``converter.unstructure`` now supports an optional parameter, `unstructure_as`, which can be used to unstructure something as a different type. Useful for unions. +* Improve support for union un/structuring hooks. Flesh out docs for advanced union handling. + (`#115 `_) +* Fix `GenConverter` behavior with inheritance hierarchies of `attrs` classes. + (`#117 `_) (`#116 `_) +* Refactor `GenConverter.un/structure_attrs_fromdict` into `GenConverter.gen_un/structure_attrs_fromdict` to allow calling back to `Converter.un/structure_attrs_fromdict` without sideeffects. + (`#118 `_) + +1.1.2 (2020-11-29) +------------------ +* The default disambiguator will not consider non-required fields any more. + (`#108 `_) +* Fix a couple type annotations. + (`#107 `_) (`#105 `_) +* Fix a `GenConverter` unstructuring issue and tests. + +1.1.1 (2020-10-30) +------------------ +* Add metadata for supported Python versions. + (`#103 `_) + +1.1.0 (2020-10-29) +------------------ +* Python 2, 3.5 and 3.6 support removal. If you need it, use a version below 1.1.0. +* Python 3.9 support, including support for built-in generic types (``list[int]`` vs ``typing.List[int]``). +* ``cattrs`` now includes functions to generate specialized structuring and unstructuring hooks. Specialized hooks are faster and support overrides (``omit_if_default`` and ``rename``). See the ``cattr.gen`` module. +* ``cattrs`` now includes a converter variant, ``cattr.GenConverter``, that automatically generates specialized hooks for attrs classes. This converter will become the default in the future. +* Generating specialized structuring hooks now invokes `attr.resolve_types `_ on a class if the class makes use of the new PEP 563 annotations. +* ``cattrs`` now depends on ``attrs`` >= 20.1.0, because of ``attr.resolve_types``. +* Specialized hooks now support generic classes. The default converter will generate and use a specialized hook upon encountering a generic class. + +1.0.0 (2019-12-27) +------------------ +* ``attrs`` classes with private attributes can now be structured by default. +* Structuring from dictionaries is now more lenient: extra keys are ignored. +* ``cattrs`` has improved type annotations for use with Mypy. +* Unstructuring sets and frozensets now works properly. + +0.9.1 (2019-10-26) +------------------ +* Python 3.8 support. + +0.9.0 (2018-07-22) +------------------ +* Python 3.7 support. + +0.8.1 (2018-06-19) +------------------ +* The disambiguation function generator now supports unions of ``attrs`` classes and NoneType. + +0.8.0 (2018-04-14) +------------------ +* Distribution fix. + +0.7.0 (2018-04-12) +------------------ +* Removed the undocumented ``Converter.unstruct_strat`` property setter. +* | Removed the ability to set the ``Converter.structure_attrs`` instance field. + | As an alternative, create a new ``Converter``:: + | + | .. code-block:: python + | + | >>> converter = cattr.Converter(unstruct_strat=cattr.UnstructureStrategy.AS_TUPLE) +* Some micro-optimizations were applied; a ``structure(unstructure(obj))`` roundtrip + is now up to 2 times faster. + +0.6.0 (2017-12-25) +------------------ +* Packaging fixes. + (`#17 `_) + +0.5.0 (2017-12-11) +------------------ +* structure/unstructure now supports using functions as well as classes for deciding the appropriate function. +* added `Converter.register_structure_hook_func`, to register a function instead of a class for determining handler func. +* added `Converter.register_unstructure_hook_func`, to register a function instead of a class for determining handler func. +* vendored typing is no longer needed, nor provided. +* Attributes with default values can now be structured if they are missing in the input. + (`#15 `_) +* | `Optional` attributes can no longer be structured if they are missing in the input. + | In other words, this no longer works: + | + | .. code-block:: python + | + | @attr.s + | class A: + | a: Optional[int] = attr.ib() + | + | >>> cattr.structure({}, A) + | +* ``cattr.typed`` removed since the functionality is now present in ``attrs`` itself. + Replace instances of ``cattr.typed(type)`` with ``attr.ib(type=type)``. + +0.4.0 (2017-07-17) +------------------ +* `Converter.loads` is now `Converter.structure`, and `Converter.dumps` is now `Converter.unstructure`. +* Python 2.7 is supported. +* Moved ``cattr.typing`` to ``cattr.vendor.typing`` to support different vendored versions of typing.py for Python 2 and Python 3. +* Type metadata can be added to ``attrs`` classes using ``cattr.typed``. + + +0.3.0 (2017-03-18) +------------------ +* Python 3.4 is no longer supported. +* Introduced ``cattr.typing`` for use with Python versions 3.5.2 and 3.6.0. +* Minor changes to work with newer versions of ``typing``. + + * Bare Optionals are not supported any more (use ``Optional[Any]``). + +* Attempting to load unrecognized classes will result in a ValueError, and a helpful message to register a loads hook. +* Loading ``attrs`` classes is now documented. +* The global converter is now documented. +* ``cattr.loads_attrs_fromtuple`` and ``cattr.loads_attrs_fromdict`` are now exposed. + + +0.2.0 (2016-10-02) +------------------ +* Tests and documentation. + +0.1.0 (2016-08-13) +------------------ +* First release on PyPI. + + diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/RECORD b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..beeb463d23a66aad59bdd4005c4152b807f67c83 --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/RECORD @@ -0,0 +1,19 @@ +cattr/__init__.py,sha256=nmZdAsap4xUpY8wlotb7s0_iBUWynLu28h7U9f_WpT8,941 +cattr/__pycache__/__init__.cpython-39.pyc,, +cattr/__pycache__/_compat.cpython-39.pyc,, +cattr/__pycache__/converters.cpython-39.pyc,, +cattr/__pycache__/disambiguators.cpython-39.pyc,, +cattr/__pycache__/dispatch.cpython-39.pyc,, +cattr/__pycache__/gen.cpython-39.pyc,, +cattr/_compat.py,sha256=CEkgleEvbrvwIXU6pZ_7Di-xz4EWa6WbmBwUMXaPgXs,7543 +cattr/converters.py,sha256=PBzpPtazi7FOa_ufE_zuhv6yn_GIxH9MHK4vgvoGLN8,23155 +cattr/disambiguators.py,sha256=ecmyo5m0u40yB6gJF1lLS7ahc1BYQi0rb429tJ0ljrM,2265 +cattr/dispatch.py,sha256=IHTQsdnLUO8EykRIJqQpbNibjwg5mzwxl_btZ8fv0CY,3806 +cattr/gen.py,sha256=ljGgFUt22fst_qJfkcA3h_xf81qOp7bkqPKapN3fw8c,8985 +cattr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +cattrs-1.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cattrs-1.5.0.dist-info/LICENSE,sha256=9fudHt43qIykf0IMSZ3KD0oFvJk-Esd9I1IKrSkcAb8,1074 +cattrs-1.5.0.dist-info/METADATA,sha256=sU6YCD_ZErpNWSHI_0GnpKWFNAv6TPU4Q3xwOgtVlZQ,16768 +cattrs-1.5.0.dist-info/RECORD,, +cattrs-1.5.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +cattrs-1.5.0.dist-info/top_level.txt,sha256=rWlxXw8tRmQ7r41hUbAKU-YsBZofc-QB3me1KD0S_tA,6 diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/WHEEL b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..af8c300133b82c9cf309f3d456126cf69c5e390a --- /dev/null +++ b/MLPY/Lib/site-packages/cattrs-1.5.0.dist-info/top_level.txt @@ -0,0 +1 @@ +cattr diff --git a/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/METADATA b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e1dabf19cec84dadbc2875424decc36e7dd212ed --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/METADATA @@ -0,0 +1,184 @@ +Metadata-Version: 2.1 +Name: cloudpickle +Version: 3.0.0 +Summary: Pickler class to extend the standard pickle.Pickler functionality +Home-page: https://github.com/cloudpipe/cloudpickle +License: BSD-3-Clause +Author: The cloudpickle developer team +Author-email: cloudpipe@googlegroups.com +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: System :: Distributed Computing + +# cloudpickle + +[![Automated Tests](https://github.com/cloudpipe/cloudpickle/workflows/Automated%20Tests/badge.svg?branch=master&event=push)](https://github.com/cloudpipe/cloudpickle/actions) +[![codecov.io](https://codecov.io/github/cloudpipe/cloudpickle/coverage.svg?branch=master)](https://codecov.io/github/cloudpipe/cloudpickle?branch=master) + +`cloudpickle` makes it possible to serialize Python constructs not supported +by the default `pickle` module from the Python standard library. + +`cloudpickle` is especially useful for **cluster computing** where Python +code is shipped over the network to execute on remote hosts, possibly close +to the data. + +Among other things, `cloudpickle` supports pickling for **lambda functions** +along with **functions and classes defined interactively** in the +`__main__` module (for instance in a script, a shell or a Jupyter notebook). + +Cloudpickle can only be used to send objects between the **exact same version +of Python**. + +Using `cloudpickle` for **long-term object storage is not supported and +strongly discouraged.** + +**Security notice**: one should **only load pickle data from trusted sources** as +otherwise `pickle.load` can lead to arbitrary code execution resulting in a critical +security vulnerability. + + +Installation +------------ + +The latest release of `cloudpickle` is available from +[pypi](https://pypi.python.org/pypi/cloudpickle): + + pip install cloudpickle + + +Examples +-------- + +Pickling a lambda expression: + +```python +>>> import cloudpickle +>>> squared = lambda x: x ** 2 +>>> pickled_lambda = cloudpickle.dumps(squared) + +>>> import pickle +>>> new_squared = pickle.loads(pickled_lambda) +>>> new_squared(2) +4 +``` + +Pickling a function interactively defined in a Python shell session +(in the `__main__` module): + +```python +>>> CONSTANT = 42 +>>> def my_function(data: int) -> int: +... return data + CONSTANT +... +>>> pickled_function = cloudpickle.dumps(my_function) +>>> depickled_function = pickle.loads(pickled_function) +>>> depickled_function + int> +>>> depickled_function(43) +85 +``` + + +Overriding pickle's serialization mechanism for importable constructs: +---------------------------------------------------------------------- + +An important difference between `cloudpickle` and `pickle` is that +`cloudpickle` can serialize a function or class **by value**, whereas `pickle` +can only serialize it **by reference**. Serialization by reference treats +functions and classes as attributes of modules, and pickles them through +instructions that trigger the import of their module at load time. +Serialization by reference is thus limited in that it assumes that the module +containing the function or class is available/importable in the unpickling +environment. This assumption breaks when pickling constructs defined in an +interactive session, a case that is automatically detected by `cloudpickle`, +that pickles such constructs **by value**. + +Another case where the importability assumption is expected to break is when +developing a module in a distributed execution environment: the worker +processes may not have access to the said module, for example if they live on a +different machine than the process in which the module is being developed. By +itself, `cloudpickle` cannot detect such "locally importable" modules and +switch to serialization by value; instead, it relies on its default mode, which +is serialization by reference. However, since `cloudpickle 2.0.0`, one can +explicitly specify modules for which serialization by value should be used, +using the +`register_pickle_by_value(module)`/`/unregister_pickle_by_value(module)` API: + +```python +>>> import cloudpickle +>>> import my_module +>>> cloudpickle.register_pickle_by_value(my_module) +>>> cloudpickle.dumps(my_module.my_function) # my_function is pickled by value +>>> cloudpickle.unregister_pickle_by_value(my_module) +>>> cloudpickle.dumps(my_module.my_function) # my_function is pickled by reference +``` + +Using this API, there is no need to re-install the new version of the module on +all the worker nodes nor to restart the workers: restarting the client Python +process with the new source code is enough. + +Note that this feature is still **experimental**, and may fail in the following +situations: + +- If the body of a function/class pickled by value contains an `import` statement: + ```python + >>> def f(): + >>> ... from another_module import g + >>> ... # calling f in the unpickling environment may fail if another_module + >>> ... # is unavailable + >>> ... return g() + 1 + ``` + +- If a function pickled by reference uses a function pickled by value during its execution. + + +Running the tests +----------------- + +- With `tox`, to test run the tests for all the supported versions of + Python and PyPy: + + pip install tox + tox + + or alternatively for a specific environment: + + tox -e py312 + + +- With `pytest` to only run the tests for your current version of + Python: + + pip install -r dev-requirements.txt + PYTHONPATH='.:tests' pytest + +History +------- + +`cloudpickle` was initially developed by [picloud.com](http://web.archive.org/web/20140721022102/http://blog.picloud.com/2013/11/17/picloud-has-joined-dropbox/) and shipped as part of +the client SDK. + +A copy of `cloudpickle.py` was included as part of PySpark, the Python +interface to [Apache Spark](https://spark.apache.org/). Davies Liu, Josh +Rosen, Thom Neale and other Apache Spark developers improved it significantly, +most notably to add support for PyPy and Python 3. + +The aim of the `cloudpickle` project is to make that work available to a wider +audience outside of the Spark ecosystem and to make it easier to improve it +further notably with the help of a dedicated non-regression test suite. + diff --git a/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/RECORD b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2f5817a53bd0f145d2f3f6a924134b36f0683461 --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/RECORD @@ -0,0 +1,10 @@ +cloudpickle-3.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cloudpickle-3.0.0.dist-info/METADATA,sha256=KypidktC6rU0wvKKVftMrMAIg3sVaYWMSzkModFn5FY,6955 +cloudpickle-3.0.0.dist-info/RECORD,, +cloudpickle-3.0.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +cloudpickle/__init__.py,sha256=vb2JCOn1EpLUdVyPe1ESyhLymcvh-Rk3ISHJ-52aDLw,308 +cloudpickle/__pycache__/__init__.cpython-39.pyc,, +cloudpickle/__pycache__/cloudpickle.cpython-39.pyc,, +cloudpickle/__pycache__/cloudpickle_fast.cpython-39.pyc,, +cloudpickle/cloudpickle.py,sha256=APCGMuIfVpWcelGsLlo2zRmwKRloaoiznQEOAoEWH9Y,55283 +cloudpickle/cloudpickle_fast.py,sha256=1GqUD4nLKsv0vv9ty2La3eVLyeWNrPFlhUCN-aNI-30,322 diff --git a/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/WHEEL b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle-3.0.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/MLPY/Lib/site-packages/cloudpickle/__init__.py b/MLPY/Lib/site-packages/cloudpickle/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle/__init__.py @@ -0,0 +1,18 @@ +from . import cloudpickle +from .cloudpickle import * # noqa + +__doc__ = cloudpickle.__doc__ + +__version__ = "3.0.0" + +__all__ = [ # noqa + "__version__", + "Pickler", + "CloudPickler", + "dumps", + "loads", + "dump", + "load", + "register_pickle_by_value", + "unregister_pickle_by_value", +] diff --git a/MLPY/Lib/site-packages/cloudpickle/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/cloudpickle/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1032220147ede9887787deee61ef83e417cfe41e Binary files /dev/null and b/MLPY/Lib/site-packages/cloudpickle/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle.cpython-39.pyc b/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405d287435b5481ea0dc0d0061b9fa8283531416 Binary files /dev/null and b/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle_fast.cpython-39.pyc b/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle_fast.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d35141928434f636903be04f748d167bee9aa287 Binary files /dev/null and b/MLPY/Lib/site-packages/cloudpickle/__pycache__/cloudpickle_fast.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/cloudpickle/cloudpickle.py b/MLPY/Lib/site-packages/cloudpickle/cloudpickle.py new file mode 100644 index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle/cloudpickle.py @@ -0,0 +1,1487 @@ +"""Pickler class to extend the standard pickle.Pickler functionality + +The main objective is to make it natural to perform distributed computing on +clusters (such as PySpark, Dask, Ray...) with interactively defined code +(functions, classes, ...) written in notebooks or console. + +In particular this pickler adds the following features: +- serialize interactively-defined or locally-defined functions, classes, + enums, typevars, lambdas and nested functions to compiled byte code; +- deal with some other non-serializable objects in an ad-hoc manner where + applicable. + +This pickler is therefore meant to be used for the communication between short +lived Python processes running the same version of Python and libraries. In +particular, it is not meant to be used for long term storage of Python objects. + +It does not include an unpickler, as standard Python unpickling suffices. + +This module was extracted from the `cloud` package, developed by `PiCloud, Inc. +`_. + +Copyright (c) 2012-now, CloudPickle developers and contributors. +Copyright (c) 2012, Regents of the University of California. +Copyright (c) 2009 `PiCloud, Inc. `_. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the University of California, Berkeley nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import _collections_abc +from collections import ChainMap, OrderedDict +import abc +import builtins +import copyreg +import dataclasses +import dis +from enum import Enum +import io +import itertools +import logging +import opcode +import pickle +from pickle import _getattribute +import platform +import struct +import sys +import threading +import types +import typing +import uuid +import warnings +import weakref + +# The following import is required to be imported in the cloudpickle +# namespace to be able to load pickle files generated with older versions of +# cloudpickle. See: tests/test_backward_compat.py +from types import CellType # noqa: F401 + + +# cloudpickle is meant for inter process communication: we expect all +# communicating processes to run the same Python version hence we favor +# communication speed over compatibility: +DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + +# Names of modules whose resources should be treated as dynamic. +_PICKLE_BY_VALUE_MODULES = set() + +# Track the provenance of reconstructed dynamic classes to make it possible to +# reconstruct instances from the matching singleton class definition when +# appropriate and preserve the usual "isinstance" semantics of Python objects. +_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary() +_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary() +_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock() + +PYPY = platform.python_implementation() == "PyPy" + +builtin_code_type = None +if PYPY: + # builtin-code objects only exist in pypy + builtin_code_type = type(float.__new__.__code__) + +_extract_code_globals_cache = weakref.WeakKeyDictionary() + + +def _get_or_create_tracker_id(class_def): + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def) + if class_tracker_id is None: + class_tracker_id = uuid.uuid4().hex + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def + return class_tracker_id + + +def _lookup_class_or_track(class_tracker_id, class_def): + if class_tracker_id is not None: + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault( + class_tracker_id, class_def + ) + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + return class_def + + +def register_pickle_by_value(module): + """Register a module to make it functions and classes picklable by value. + + By default, functions and classes that are attributes of an importable + module are to be pickled by reference, that is relying on re-importing + the attribute from the module at load time. + + If `register_pickle_by_value(module)` is called, all its functions and + classes are subsequently to be pickled by value, meaning that they can + be loaded in Python processes where the module is not importable. + + This is especially useful when developing a module in a distributed + execution environment: restarting the client Python process with the new + source code is enough: there is no need to re-install the new version + of the module on all the worker nodes nor to restart the workers. + + Note: this feature is considered experimental. See the cloudpickle + README.md file for more details and limitations. + """ + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + # In the future, cloudpickle may need a way to access any module registered + # for pickling by value in order to introspect relative imports inside + # functions pickled by value. (see + # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633). + # This access can be ensured by checking that module is present in + # sys.modules at registering time and assuming that it will still be in + # there when accessed during pickling. Another alternative would be to + # store a weakref to the module. Even though cloudpickle does not implement + # this introspection yet, in order to avoid a possible breaking change + # later, we still enforce the presence of module inside sys.modules. + if module.__name__ not in sys.modules: + raise ValueError( + f"{module} was not imported correctly, have you used an " + "`import` statement to access it?" + ) + _PICKLE_BY_VALUE_MODULES.add(module.__name__) + + +def unregister_pickle_by_value(module): + """Unregister that the input module should be pickled by value.""" + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + if module.__name__ not in _PICKLE_BY_VALUE_MODULES: + raise ValueError(f"{module} is not registered for pickle by value") + else: + _PICKLE_BY_VALUE_MODULES.remove(module.__name__) + + +def list_registry_pickle_by_value(): + return _PICKLE_BY_VALUE_MODULES.copy() + + +def _is_registered_pickle_by_value(module): + module_name = module.__name__ + if module_name in _PICKLE_BY_VALUE_MODULES: + return True + while True: + parent_name = module_name.rsplit(".", 1)[0] + if parent_name == module_name: + break + if parent_name in _PICKLE_BY_VALUE_MODULES: + return True + module_name = parent_name + return False + + +def _whichmodule(obj, name): + """Find the module an object belongs to. + + This function differs from ``pickle.whichmodule`` in two ways: + - it does not mangle the cases where obj's module is __main__ and obj was + not found in any module. + - Errors arising during module introspection are ignored, as those errors + are considered unwanted side effects. + """ + module_name = getattr(obj, "__module__", None) + + if module_name is not None: + return module_name + # Protect the iteration by using a copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr or + # other threads importing at the same time. + for module_name, module in sys.modules.copy().items(): + # Some modules such as coverage can inject non-module objects inside + # sys.modules + if ( + module_name == "__main__" + or module is None + or not isinstance(module, types.ModuleType) + ): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except Exception: + pass + return None + + +def _should_pickle_by_reference(obj, name=None): + """Test whether an function or a class should be pickled by reference + + Pickling by reference means by that the object (typically a function or a + class) is an attribute of a module that is assumed to be importable in the + target Python environment. Loading will therefore rely on importing the + module and then calling `getattr` on it to access the function or class. + + Pickling by reference is the only option to pickle functions and classes + in the standard library. In cloudpickle the alternative option is to + pickle by value (for instance for interactively or locally defined + functions and classes or for attributes of modules that have been + explicitly registered to be pickled by value. + """ + if isinstance(obj, types.FunctionType) or issubclass(type(obj), type): + module_and_name = _lookup_module_and_qualname(obj, name=name) + if module_and_name is None: + return False + module, name = module_and_name + return not _is_registered_pickle_by_value(module) + + elif isinstance(obj, types.ModuleType): + # We assume that sys.modules is primarily used as a cache mechanism for + # the Python import machinery. Checking if a module has been added in + # is sys.modules therefore a cheap and simple heuristic to tell us + # whether we can assume that a given module could be imported by name + # in another Python process. + if _is_registered_pickle_by_value(obj): + return False + return obj.__name__ in sys.modules + else: + raise TypeError( + "cannot check importability of {} instances".format(type(obj).__name__) + ) + + +def _lookup_module_and_qualname(obj, name=None): + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: # pragma: no cover + # This used to be needed for Python 2.7 support but is probably not + # needed anymore. However we keep the __name__ introspection in case + # users of cloudpickle rely on this old behavior for unknown reasons. + name = getattr(obj, "__name__", None) + + module_name = _whichmodule(obj, name) + + if module_name is None: + # In this case, obj.__module__ is None AND obj was not found in any + # imported module. obj is thus treated as dynamic. + return None + + if module_name == "__main__": + return None + + # Note: if module_name is in sys.modules, the corresponding module is + # assumed importable at unpickling time. See #357 + module = sys.modules.get(module_name, None) + if module is None: + # The main reason why obj's module would not be imported is that this + # module has been dynamically created, using for example + # types.ModuleType. The other possibility is that module was removed + # from sys.modules after obj was created/imported. But this case is not + # supported, as the standard pickle does not support it either. + return None + + try: + obj2, parent = _getattribute(module, name) + except AttributeError: + # obj was not found inside the module it points to + return None + if obj2 is not obj: + return None + return module, name + + +def _extract_code_globals(co): + """Find all globals names read or written to by codeblock co.""" + out_names = _extract_code_globals_cache.get(co) + if out_names is None: + # We use a dict with None values instead of a set to get a + # deterministic order and avoid introducing non-deterministic pickle + # bytes as a results. + out_names = {name: None for name in _walk_global_ops(co)} + + # Declaring a function inside another one using the "def ..." syntax + # generates a constant code object corresponding to the one of the + # nested function's As the nested function may itself need global + # variables, we need to introspect its code, extract its globals, (look + # for code object in it's co_consts attribute..) and add the result to + # code_globals + if co.co_consts: + for const in co.co_consts: + if isinstance(const, types.CodeType): + out_names.update(_extract_code_globals(const)) + + _extract_code_globals_cache[co] = out_names + + return out_names + + +def _find_imported_submodules(code, top_level_dependencies): + """Find currently imported submodules used by a function. + + Submodules used by a function need to be detected and referenced for the + function to work correctly at depickling time. Because submodules can be + referenced as attribute of their parent package (``package.submodule``), we + need a special introspection technique that does not rely on GLOBAL-related + opcodes to find references of them in a code object. + + Example: + ``` + import concurrent.futures + import cloudpickle + def func(): + x = concurrent.futures.ThreadPoolExecutor + if __name__ == '__main__': + cloudpickle.dumps(func) + ``` + The globals extracted by cloudpickle in the function's state include the + concurrent package, but not its submodule (here, concurrent.futures), which + is the module used by func. Find_imported_submodules will detect the usage + of concurrent.futures. Saving this module alongside with func will ensure + that calling func once depickled does not fail due to concurrent.futures + not being imported + """ + + subimports = [] + # check if any known dependency is an imported package + for x in top_level_dependencies: + if ( + isinstance(x, types.ModuleType) + and hasattr(x, "__package__") + and x.__package__ + ): + # check if the package has any currently loaded sub-imports + prefix = x.__name__ + "." + # A concurrent thread could mutate sys.modules, + # make sure we iterate over a copy to avoid exceptions + for name in list(sys.modules): + # Older versions of pytest will add a "None" module to + # sys.modules. + if name is not None and name.startswith(prefix): + # check whether the function can address the sub-module + tokens = set(name[len(prefix) :].split(".")) + if not tokens - set(code.co_names): + subimports.append(sys.modules[name]) + return subimports + + +# relevant opcodes +STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"] +DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"] +LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"] +GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL) +HAVE_ARGUMENT = dis.HAVE_ARGUMENT +EXTENDED_ARG = dis.EXTENDED_ARG + + +_BUILTIN_TYPE_NAMES = {} +for k, v in types.__dict__.items(): + if type(v) is type: + _BUILTIN_TYPE_NAMES[v] = k + + +def _builtin_type(name): + if name == "ClassType": # pragma: no cover + # Backward compat to load pickle files generated with cloudpickle + # < 1.3 even if loading pickle files from older versions is not + # officially supported. + return type + return getattr(types, name) + + +def _walk_global_ops(code): + """Yield referenced name for global-referencing instructions in code.""" + for instr in dis.get_instructions(code): + op = instr.opcode + if op in GLOBAL_OPS: + yield instr.argval + + +def _extract_class_dict(cls): + """Retrieve a copy of the dict of a class without the inherited method.""" + clsdict = dict(cls.__dict__) # copy dict proxy to a dict + if len(cls.__bases__) == 1: + inherited_dict = cls.__bases__[0].__dict__ + else: + inherited_dict = {} + for base in reversed(cls.__bases__): + inherited_dict.update(base.__dict__) + to_remove = [] + for name, value in clsdict.items(): + try: + base_value = inherited_dict[name] + if value is base_value: + to_remove.append(name) + except KeyError: + pass + for name in to_remove: + clsdict.pop(name) + return clsdict + + +def is_tornado_coroutine(func): + """Return whether `func` is a Tornado coroutine function. + + Running coroutines are not supported. + """ + warnings.warn( + "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be " + "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function " + "directly instead.", + category=DeprecationWarning, + ) + if "tornado.gen" not in sys.modules: + return False + gen = sys.modules["tornado.gen"] + if not hasattr(gen, "is_coroutine_function"): + # Tornado version is too old + return False + return gen.is_coroutine_function(func) + + +def subimport(name): + # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is + # the name of a submodule, __import__ will return the top-level root module + # of this submodule. For instance, __import__('os.path') returns the `os` + # module. + __import__(name) + return sys.modules[name] + + +def dynamic_subimport(name, vars): + mod = types.ModuleType(name) + mod.__dict__.update(vars) + mod.__dict__["__builtins__"] = builtins.__dict__ + return mod + + +def _get_cell_contents(cell): + try: + return cell.cell_contents + except ValueError: + # Handle empty cells explicitly with a sentinel value. + return _empty_cell_value + + +def instance(cls): + """Create a new instance of a class. + + Parameters + ---------- + cls : type + The class to create an instance of. + + Returns + ------- + instance : cls + A new instance of ``cls``. + """ + return cls() + + +@instance +class _empty_cell_value: + """Sentinel for empty closures.""" + + @classmethod + def __reduce__(cls): + return cls.__name__ + + +def _make_function(code, globals, name, argdefs, closure): + # Setting __builtins__ in globals is needed for nogil CPython. + globals["__builtins__"] = __builtins__ + return types.FunctionType(code, globals, name, argdefs, closure) + + +def _make_empty_cell(): + if False: + # trick the compiler into creating an empty cell in our lambda + cell = None + raise AssertionError("this route should not be executed") + + return (lambda: cell).__closure__[0] + + +def _make_cell(value=_empty_cell_value): + cell = _make_empty_cell() + if value is not _empty_cell_value: + cell.cell_contents = value + return cell + + +def _make_skeleton_class( + type_constructor, name, bases, type_kwargs, class_tracker_id, extra +): + """Build dynamic class with an empty __dict__ to be filled once memoized + + If class_tracker_id is not None, try to lookup an existing class definition + matching that id. If none is found, track a newly reconstructed class + definition under that id so that other instances stemming from the same + class id will also reuse this class definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + skeleton_class = types.new_class( + name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs) + ) + return _lookup_class_or_track(class_tracker_id, skeleton_class) + + +def _make_skeleton_enum( + bases, name, qualname, members, module, class_tracker_id, extra +): + """Build dynamic enum with an empty __dict__ to be filled once memoized + + The creation of the enum class is inspired by the code of + EnumMeta._create_. + + If class_tracker_id is not None, try to lookup an existing enum definition + matching that id. If none is found, track a newly reconstructed enum + definition under that id so that other instances stemming from the same + class id will also reuse this enum definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + # enums always inherit from their base Enum class at the last position in + # the list of base classes: + enum_base = bases[-1] + metacls = enum_base.__class__ + classdict = metacls.__prepare__(name, bases) + + for member_name, member_value in members.items(): + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, name, bases, classdict) + enum_class.__module__ = module + enum_class.__qualname__ = qualname + + return _lookup_class_or_track(class_tracker_id, enum_class) + + +def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id): + tv = typing.TypeVar( + name, + *constraints, + bound=bound, + covariant=covariant, + contravariant=contravariant, + ) + return _lookup_class_or_track(class_tracker_id, tv) + + +def _decompose_typevar(obj): + return ( + obj.__name__, + obj.__bound__, + obj.__constraints__, + obj.__covariant__, + obj.__contravariant__, + _get_or_create_tracker_id(obj), + ) + + +def _typevar_reduce(obj): + # TypeVar instances require the module information hence why we + # are not using the _should_pickle_by_reference directly + module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__) + + if module_and_name is None: + return (_make_typevar, _decompose_typevar(obj)) + elif _is_registered_pickle_by_value(module_and_name[0]): + return (_make_typevar, _decompose_typevar(obj)) + + return (getattr, module_and_name) + + +def _get_bases(typ): + if "__orig_bases__" in getattr(typ, "__dict__", {}): + # For generic types (see PEP 560) + # Note that simply checking `hasattr(typ, '__orig_bases__')` is not + # correct. Subclasses of a fully-parameterized generic class does not + # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')` + # will return True because it's defined in the base class. + bases_attr = "__orig_bases__" + else: + # For regular class objects + bases_attr = "__bases__" + return getattr(typ, bases_attr) + + +def _make_dict_keys(obj, is_ordered=False): + if is_ordered: + return OrderedDict.fromkeys(obj).keys() + else: + return dict.fromkeys(obj).keys() + + +def _make_dict_values(obj, is_ordered=False): + if is_ordered: + return OrderedDict((i, _) for i, _ in enumerate(obj)).values() + else: + return {i: _ for i, _ in enumerate(obj)}.values() + + +def _make_dict_items(obj, is_ordered=False): + if is_ordered: + return OrderedDict(obj).items() + else: + return obj.items() + + +# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS +# ------------------------------------------------- + + +def _class_getnewargs(obj): + type_kwargs = {} + if "__module__" in obj.__dict__: + type_kwargs["__module__"] = obj.__module__ + + __dict__ = obj.__dict__.get("__dict__", None) + if isinstance(__dict__, property): + type_kwargs["__dict__"] = __dict__ + + return ( + type(obj), + obj.__name__, + _get_bases(obj), + type_kwargs, + _get_or_create_tracker_id(obj), + None, + ) + + +def _enum_getnewargs(obj): + members = {e.name: e.value for e in obj} + return ( + obj.__bases__, + obj.__name__, + obj.__qualname__, + members, + obj.__module__, + _get_or_create_tracker_id(obj), + None, + ) + + +# COLLECTION OF OBJECTS RECONSTRUCTORS +# ------------------------------------ +def _file_reconstructor(retval): + return retval + + +# COLLECTION OF OBJECTS STATE GETTERS +# ----------------------------------- + + +def _function_getstate(func): + # - Put func's dynamic attributes (stored in func.__dict__) in state. These + # attributes will be restored at unpickling time using + # f.__dict__.update(state) + # - Put func's members into slotstate. Such attributes will be restored at + # unpickling time by iterating over slotstate and calling setattr(func, + # slotname, slotvalue) + slotstate = { + "__name__": func.__name__, + "__qualname__": func.__qualname__, + "__annotations__": func.__annotations__, + "__kwdefaults__": func.__kwdefaults__, + "__defaults__": func.__defaults__, + "__module__": func.__module__, + "__doc__": func.__doc__, + "__closure__": func.__closure__, + } + + f_globals_ref = _extract_code_globals(func.__code__) + f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__} + + if func.__closure__ is not None: + closure_values = list(map(_get_cell_contents, func.__closure__)) + else: + closure_values = () + + # Extract currently-imported submodules used by func. Storing these modules + # in a smoke _cloudpickle_subimports attribute of the object's state will + # trigger the side effect of importing these modules at unpickling time + # (which is necessary for func to work correctly once depickled) + slotstate["_cloudpickle_submodules"] = _find_imported_submodules( + func.__code__, itertools.chain(f_globals.values(), closure_values) + ) + slotstate["__globals__"] = f_globals + + state = func.__dict__ + return state, slotstate + + +def _class_getstate(obj): + clsdict = _extract_class_dict(obj) + clsdict.pop("__weakref__", None) + + if issubclass(type(obj), abc.ABCMeta): + # If obj is an instance of an ABCMeta subclass, don't pickle the + # cache/negative caches populated during isinstance/issubclass + # checks, but pickle the list of registered subclasses of obj. + clsdict.pop("_abc_cache", None) + clsdict.pop("_abc_negative_cache", None) + clsdict.pop("_abc_negative_cache_version", None) + registry = clsdict.pop("_abc_registry", None) + if registry is None: + # The abc caches and registered subclasses of a + # class are bundled into the single _abc_impl attribute + clsdict.pop("_abc_impl", None) + (registry, _, _, _) = abc._get_dump(obj) + + clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] + else: + # In the above if clause, registry is a set of weakrefs -- in + # this case, registry is a WeakSet + clsdict["_abc_impl"] = [type_ for type_ in registry] + + if "__slots__" in clsdict: + # pickle string length optimization: member descriptors of obj are + # created automatically from obj's __slots__ attribute, no need to + # save them in obj's state + if isinstance(obj.__slots__, str): + clsdict.pop(obj.__slots__) + else: + for k in obj.__slots__: + clsdict.pop(k, None) + + clsdict.pop("__dict__", None) # unpicklable property object + + return (clsdict, {}) + + +def _enum_getstate(obj): + clsdict, slotstate = _class_getstate(obj) + + members = {e.name: e.value for e in obj} + # Cleanup the clsdict that will be passed to _make_skeleton_enum: + # Those attributes are already handled by the metaclass. + for attrname in [ + "_generate_next_value_", + "_member_names_", + "_member_map_", + "_member_type_", + "_value2member_map_", + ]: + clsdict.pop(attrname, None) + for member in members: + clsdict.pop(member) + # Special handling of Enum subclasses + return clsdict, slotstate + + +# COLLECTIONS OF OBJECTS REDUCERS +# ------------------------------- +# A reducer is a function taking a single argument (obj), and that returns a +# tuple with all the necessary data to re-construct obj. Apart from a few +# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to +# correctly pickle an object. +# While many built-in objects (Exceptions objects, instances of the "object" +# class, etc), are shipped with their own built-in reducer (invoked using +# obj.__reduce__), some do not. The following methods were created to "fill +# these holes". + + +def _code_reduce(obj): + """code object reducer.""" + # If you are not sure about the order of arguments, take a look at help + # of the specific type from types, for example: + # >>> from types import CodeType + # >>> help(CodeType) + if hasattr(obj, "co_exceptiontable"): + # Python 3.11 and later: there are some new attributes + # related to the enhanced exceptions. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_qualname, + obj.co_firstlineno, + obj.co_linetable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_linetable"): + # Python 3.10 and later: obj.co_lnotab is deprecated and constructor + # expects obj.co_linetable instead. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_linetable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_nmeta"): # pragma: no cover + # "nogil" Python: modified attributes from 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_framesize, + obj.co_ndefaultargs, + obj.co_nmeta, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_exc_handlers, + obj.co_jump_table, + obj.co_freevars, + obj.co_cellvars, + obj.co_free2reg, + obj.co_cell2reg, + ) + else: + # Backward compat for 3.8 and 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + obj.co_filename, + obj.co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + return types.CodeType, args + + +def _cell_reduce(obj): + """Cell (containing values of a function's free variables) reducer.""" + try: + obj.cell_contents + except ValueError: # cell is empty + return _make_empty_cell, () + else: + return _make_cell, (obj.cell_contents,) + + +def _classmethod_reduce(obj): + orig_func = obj.__func__ + return type(obj), (orig_func,) + + +def _file_reduce(obj): + """Save a file.""" + import io + + if not hasattr(obj, "name") or not hasattr(obj, "mode"): + raise pickle.PicklingError( + "Cannot pickle files that do not map to an actual file" + ) + if obj is sys.stdout: + return getattr, (sys, "stdout") + if obj is sys.stderr: + return getattr, (sys, "stderr") + if obj is sys.stdin: + raise pickle.PicklingError("Cannot pickle standard input") + if obj.closed: + raise pickle.PicklingError("Cannot pickle closed files") + if hasattr(obj, "isatty") and obj.isatty(): + raise pickle.PicklingError("Cannot pickle files that map to tty objects") + if "r" not in obj.mode and "+" not in obj.mode: + raise pickle.PicklingError( + "Cannot pickle files that are not opened for reading: %s" % obj.mode + ) + + name = obj.name + + retval = io.StringIO() + + try: + # Read the whole file + curloc = obj.tell() + obj.seek(0) + contents = obj.read() + obj.seek(curloc) + except OSError as e: + raise pickle.PicklingError( + "Cannot pickle file %s as it cannot be read" % name + ) from e + retval.write(contents) + retval.seek(curloc) + + retval.name = name + return _file_reconstructor, (retval,) + + +def _getset_descriptor_reduce(obj): + return getattr, (obj.__objclass__, obj.__name__) + + +def _mappingproxy_reduce(obj): + return types.MappingProxyType, (dict(obj),) + + +def _memoryview_reduce(obj): + return bytes, (obj.tobytes(),) + + +def _module_reduce(obj): + if _should_pickle_by_reference(obj): + return subimport, (obj.__name__,) + else: + # Some external libraries can populate the "__builtins__" entry of a + # module's `__dict__` with unpicklable objects (see #316). For that + # reason, we do not attempt to pickle the "__builtins__" entry, and + # restore a default value for it at unpickling time. + state = obj.__dict__.copy() + state.pop("__builtins__", None) + return dynamic_subimport, (obj.__name__, state) + + +def _method_reduce(obj): + return (types.MethodType, (obj.__func__, obj.__self__)) + + +def _logger_reduce(obj): + return logging.getLogger, (obj.name,) + + +def _root_logger_reduce(obj): + return logging.getLogger, () + + +def _property_reduce(obj): + return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__) + + +def _weakset_reduce(obj): + return weakref.WeakSet, (list(obj),) + + +def _dynamic_class_reduce(obj): + """Save a class that can't be referenced as a module attribute. + + This method is used to serialize classes that are defined inside + functions, or that otherwise can't be serialized as attribute lookups + from importable modules. + """ + if Enum is not None and issubclass(obj, Enum): + return ( + _make_skeleton_enum, + _enum_getnewargs(obj), + _enum_getstate(obj), + None, + None, + _class_setstate, + ) + else: + return ( + _make_skeleton_class, + _class_getnewargs(obj), + _class_getstate(obj), + None, + None, + _class_setstate, + ) + + +def _class_reduce(obj): + """Select the reducer depending on the dynamic nature of the class obj.""" + if obj is type(None): # noqa + return type, (None,) + elif obj is type(Ellipsis): + return type, (Ellipsis,) + elif obj is type(NotImplemented): + return type, (NotImplemented,) + elif obj in _BUILTIN_TYPE_NAMES: + return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],) + elif not _should_pickle_by_reference(obj): + return _dynamic_class_reduce(obj) + return NotImplemented + + +def _dict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj),) + + +def _dict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj),) + + +def _dict_items_reduce(obj): + return _make_dict_items, (dict(obj),) + + +def _odict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj), True) + + +def _odict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj), True) + + +def _odict_items_reduce(obj): + return _make_dict_items, (dict(obj), True) + + +def _dataclass_field_base_reduce(obj): + return _get_dataclass_field_type_sentinel, (obj.name,) + + +# COLLECTIONS OF OBJECTS STATE SETTERS +# ------------------------------------ +# state setters are called at unpickling time, once the object is created and +# it has to be updated to how it was at unpickling time. + + +def _function_setstate(obj, state): + """Update the state of a dynamic function. + + As __closure__ and __globals__ are readonly attributes of a function, we + cannot rely on the native setstate routine of pickle.load_build, that calls + setattr on items of the slotstate. Instead, we have to modify them inplace. + """ + state, slotstate = state + obj.__dict__.update(state) + + obj_globals = slotstate.pop("__globals__") + obj_closure = slotstate.pop("__closure__") + # _cloudpickle_subimports is a set of submodules that must be loaded for + # the pickled function to work correctly at unpickling time. Now that these + # submodules are depickled (hence imported), they can be removed from the + # object's state (the object state only served as a reference holder to + # these submodules) + slotstate.pop("_cloudpickle_submodules") + + obj.__globals__.update(obj_globals) + obj.__globals__["__builtins__"] = __builtins__ + + if obj_closure is not None: + for i, cell in enumerate(obj_closure): + try: + value = cell.cell_contents + except ValueError: # cell is empty + continue + obj.__closure__[i].cell_contents = value + + for k, v in slotstate.items(): + setattr(obj, k, v) + + +def _class_setstate(obj, state): + state, slotstate = state + registry = None + for attrname, attr in state.items(): + if attrname == "_abc_impl": + registry = attr + else: + setattr(obj, attrname, attr) + if registry is not None: + for subclass in registry: + obj.register(subclass) + + return obj + + +# COLLECTION OF DATACLASS UTILITIES +# --------------------------------- +# There are some internal sentinel values whose identity must be preserved when +# unpickling dataclass fields. Each sentinel value has a unique name that we can +# use to retrieve its identity at unpickling time. + + +_DATACLASSE_FIELD_TYPE_SENTINELS = { + dataclasses._FIELD.name: dataclasses._FIELD, + dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR, + dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR, +} + + +def _get_dataclass_field_type_sentinel(name): + return _DATACLASSE_FIELD_TYPE_SENTINELS[name] + + +class Pickler(pickle.Pickler): + # set of reducers defined and used by cloudpickle (private) + _dispatch_table = {} + _dispatch_table[classmethod] = _classmethod_reduce + _dispatch_table[io.TextIOWrapper] = _file_reduce + _dispatch_table[logging.Logger] = _logger_reduce + _dispatch_table[logging.RootLogger] = _root_logger_reduce + _dispatch_table[memoryview] = _memoryview_reduce + _dispatch_table[property] = _property_reduce + _dispatch_table[staticmethod] = _classmethod_reduce + _dispatch_table[CellType] = _cell_reduce + _dispatch_table[types.CodeType] = _code_reduce + _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce + _dispatch_table[types.ModuleType] = _module_reduce + _dispatch_table[types.MethodType] = _method_reduce + _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce + _dispatch_table[weakref.WeakSet] = _weakset_reduce + _dispatch_table[typing.TypeVar] = _typevar_reduce + _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce + _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce + _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce + _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce + _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce + _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce + _dispatch_table[abc.abstractmethod] = _classmethod_reduce + _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce + _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce + _dispatch_table[abc.abstractproperty] = _property_reduce + _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce + + dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table) + + # function reducers are defined as instance methods of cloudpickle.Pickler + # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref) + def _dynamic_function_reduce(self, func): + """Reduce a function that is not pickleable via attribute lookup.""" + newargs = self._function_getnewargs(func) + state = _function_getstate(func) + return (_make_function, newargs, state, None, None, _function_setstate) + + def _function_reduce(self, obj): + """Reducer for function objects. + + If obj is a top-level attribute of a file-backed module, this reducer + returns NotImplemented, making the cloudpickle.Pickler fall back to + traditional pickle.Pickler routines to save obj. Otherwise, it reduces + obj using a custom cloudpickle reducer designed specifically to handle + dynamic functions. + """ + if _should_pickle_by_reference(obj): + return NotImplemented + else: + return self._dynamic_function_reduce(obj) + + def _function_getnewargs(self, func): + code = func.__code__ + + # base_globals represents the future global namespace of func at + # unpickling time. Looking it up and storing it in + # cloudpickle.Pickler.globals_ref allow functions sharing the same + # globals at pickling time to also share them once unpickled, at one + # condition: since globals_ref is an attribute of a cloudpickle.Pickler + # instance, and that a new cloudpickle.Pickler is created each time + # cloudpickle.dump or cloudpickle.dumps is called, functions also need + # to be saved within the same invocation of + # cloudpickle.dump/cloudpickle.dumps (for example: + # cloudpickle.dumps([f1, f2])). There is no such limitation when using + # cloudpickle.Pickler.dump, as long as the multiple invocations are + # bound to the same cloudpickle.Pickler instance. + base_globals = self.globals_ref.setdefault(id(func.__globals__), {}) + + if base_globals == {}: + # Add module attributes used to resolve relative imports + # instructions inside func. + for k in ["__package__", "__name__", "__path__", "__file__"]: + if k in func.__globals__: + base_globals[k] = func.__globals__[k] + + # Do not bind the free variables before the function is created to + # avoid infinite recursion. + if func.__closure__ is None: + closure = None + else: + closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars))) + + return code, base_globals, None, None, closure + + def dump(self, obj): + try: + return super().dump(obj) + except RuntimeError as e: + if len(e.args) > 0 and "recursion" in e.args[0]: + msg = "Could not pickle object as excessively deep recursion required." + raise pickle.PicklingError(msg) from e + else: + raise + + def __init__(self, file, protocol=None, buffer_callback=None): + if protocol is None: + protocol = DEFAULT_PROTOCOL + super().__init__(file, protocol=protocol, buffer_callback=buffer_callback) + # map functions __globals__ attribute ids, to ensure that functions + # sharing the same global namespace at pickling time also share + # their global namespace at unpickling time. + self.globals_ref = {} + self.proto = int(protocol) + + if not PYPY: + # pickle.Pickler is the C implementation of the CPython pickler and + # therefore we rely on reduce_override method to customize the pickler + # behavior. + + # `cloudpickle.Pickler.dispatch` is only left for backward + # compatibility - note that when using protocol 5, + # `cloudpickle.Pickler.dispatch` is not an extension of + # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler` + # subclasses the C-implemented `pickle.Pickler`, which does not expose + # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler` + # used `cloudpickle.Pickler.dispatch` as a class-level attribute + # storing all reducers implemented by cloudpickle, but the attribute + # name was not a great choice given because it would collide with a + # similarly named attribute in the pure-Python `pickle._Pickler` + # implementation in the standard library. + dispatch = dispatch_table + + # Implementation of the reducer_override callback, in order to + # efficiently serialize dynamic functions and classes by subclassing + # the C-implemented `pickle.Pickler`. + # TODO: decorrelate reducer_override (which is tied to CPython's + # implementation - would it make sense to backport it to pypy? - and + # pickle's protocol 5 which is implementation agnostic. Currently, the + # availability of both notions coincide on CPython's pickle, but it may + # not be the case anymore when pypy implements protocol 5. + + def reducer_override(self, obj): + """Type-agnostic reducing callback for function and classes. + + For performance reasons, subclasses of the C `pickle.Pickler` class + cannot register custom reducers for functions and classes in the + dispatch_table attribute. Reducers for such types must instead + implemented via the special `reducer_override` method. + + Note that this method will be called for any object except a few + builtin-types (int, lists, dicts etc.), which differs from reducers + in the Pickler's dispatch_table, each of them being invoked for + objects of a specific type only. + + This property comes in handy for classes: although most classes are + instances of the ``type`` metaclass, some of them can be instances + of other custom metaclasses (such as enum.EnumMeta for example). In + particular, the metaclass will likely not be known in advance, and + thus cannot be special-cased using an entry in the dispatch_table. + reducer_override, among other things, allows us to register a + reducer that will be called for any class, independently of its + type. + + Notes: + + * reducer_override has the priority over dispatch_table-registered + reducers. + * reducer_override can be used to fix other limitations of + cloudpickle for other types that suffered from type-specific + reducers, such as Exceptions. See + https://github.com/cloudpipe/cloudpickle/issues/248 + """ + t = type(obj) + try: + is_anyclass = issubclass(t, type) + except TypeError: # t is not a class (old Boost; see SF #502085) + is_anyclass = False + + if is_anyclass: + return _class_reduce(obj) + elif isinstance(obj, types.FunctionType): + return self._function_reduce(obj) + else: + # fallback to save_global, including the Pickler's + # dispatch_table + return NotImplemented + + else: + # When reducer_override is not available, hack the pure-Python + # Pickler's types.FunctionType and type savers. Note: the type saver + # must override Pickler.save_global, because pickle.py contains a + # hard-coded call to save_global when pickling meta-classes. + dispatch = pickle.Pickler.dispatch.copy() + + def _save_reduce_pickle5( + self, + func, + args, + state=None, + listitems=None, + dictitems=None, + state_setter=None, + obj=None, + ): + save = self.save + write = self.write + self.save_reduce( + func, + args, + state=None, + listitems=listitems, + dictitems=dictitems, + obj=obj, + ) + # backport of the Python 3.8 state_setter pickle operations + save(state_setter) + save(obj) # simple BINGET opcode as obj is already memoized. + save(state) + write(pickle.TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(pickle.REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(pickle.POP) + + def save_global(self, obj, name=None, pack=struct.pack): + """Main dispatch method. + + The name of this method is somewhat misleading: all types get + dispatched here. + """ + if obj is type(None): # noqa + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(Ellipsis): + return self.save_reduce(type, (Ellipsis,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj in _BUILTIN_TYPE_NAMES: + return self.save_reduce( + _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj + ) + + if name is not None: + super().save_global(obj, name=name) + elif not _should_pickle_by_reference(obj, name=name): + self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj) + else: + super().save_global(obj, name=name) + + dispatch[type] = save_global + + def save_function(self, obj, name=None): + """Registered with the dispatch to handle all function types. + + Determines what kind of function obj is (e.g. lambda, defined at + interactive prompt, etc) and handles the pickling appropriately. + """ + if _should_pickle_by_reference(obj, name=name): + return super().save_global(obj, name=name) + elif PYPY and isinstance(obj.__code__, builtin_code_type): + return self.save_pypy_builtin_func(obj) + else: + return self._save_reduce_pickle5( + *self._dynamic_function_reduce(obj), obj=obj + ) + + def save_pypy_builtin_func(self, obj): + """Save pypy equivalent of builtin functions. + + PyPy does not have the concept of builtin-functions. Instead, + builtin-functions are simple function instances, but with a + builtin-code attribute. + Most of the time, builtin functions should be pickled by attribute. + But PyPy has flaky support for __qualname__, so some builtin + functions such as float.__new__ will be classified as dynamic. For + this reason only, we created this special routine. Because + builtin-functions are not expected to have closure or globals, + there is no additional hack (compared the one already implemented + in pickle) to protect ourselves from reference cycles. A simple + (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note + also that PyPy improved their support for __qualname__ in v3.6, so + this routing should be removed when cloudpickle supports only PyPy + 3.6 and later. + """ + rv = ( + types.FunctionType, + (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__), + obj.__dict__, + ) + self.save_reduce(*rv, obj=obj) + + dispatch[types.FunctionType] = save_function + + +# Shorthands similar to pickle.dump/pickle.dumps + + +def dump(obj, file, protocol=None, buffer_callback=None): + """Serialize obj as bytes streamed into file + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj) + + +def dumps(obj, protocol=None, buffer_callback=None): + """Serialize obj as a string of bytes allocated in memory + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + with io.BytesIO() as file: + cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback) + cp.dump(obj) + return file.getvalue() + + +# Include pickles unloading functions in this namespace for convenience. +load, loads = pickle.load, pickle.loads + +# Backward compat alias. +CloudPickler = Pickler diff --git a/MLPY/Lib/site-packages/cloudpickle/cloudpickle_fast.py b/MLPY/Lib/site-packages/cloudpickle/cloudpickle_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb --- /dev/null +++ b/MLPY/Lib/site-packages/cloudpickle/cloudpickle_fast.py @@ -0,0 +1,13 @@ +"""Compatibility module. + +It can be necessary to load files generated by previous versions of cloudpickle +that rely on symbols being defined under the `cloudpickle.cloudpickle_fast` +namespace. + +See: tests/test_backward_compat.py +""" +from . import cloudpickle + + +def __getattr__(name): + return getattr(cloudpickle, name) diff --git a/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/INSTALLER b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/METADATA b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..a63517822ba11864a4150e7cdc5bdba3442218b0 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/METADATA @@ -0,0 +1,58 @@ +Metadata-Version: 2.3 +Name: filelock +Version: 3.15.4 +Summary: A platform independent file lock. +Project-URL: Documentation, https://py-filelock.readthedocs.io +Project-URL: Homepage, https://github.com/tox-dev/py-filelock +Project-URL: Source, https://github.com/tox-dev/py-filelock +Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues +Maintainer-email: Bernát Gábor +License-Expression: Unlicense +License-File: LICENSE +Keywords: application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: The Unlicense (Unlicense) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System +Requires-Python: >=3.8 +Provides-Extra: docs +Requires-Dist: furo>=2023.9.10; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints!=1.23.4,>=1.25.2; extra == 'docs' +Requires-Dist: sphinx>=7.2.6; extra == 'docs' +Provides-Extra: testing +Requires-Dist: covdefaults>=2.3; extra == 'testing' +Requires-Dist: coverage>=7.3.2; extra == 'testing' +Requires-Dist: diff-cover>=8.0.1; extra == 'testing' +Requires-Dist: pytest-asyncio>=0.21; extra == 'testing' +Requires-Dist: pytest-cov>=4.1; extra == 'testing' +Requires-Dist: pytest-mock>=3.12; extra == 'testing' +Requires-Dist: pytest-timeout>=2.2; extra == 'testing' +Requires-Dist: pytest>=7.4.3; extra == 'testing' +Requires-Dist: virtualenv>=20.26.2; extra == 'testing' +Provides-Extra: typing +Requires-Dist: typing-extensions>=4.8; (python_version < '3.11') and extra == 'typing' +Description-Content-Type: text/markdown + +# filelock + +[![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/) +[![Supported Python +versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/) +[![Documentation +status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest) +[![Code style: +black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock) +[![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml) + +For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html). diff --git a/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/RECORD b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b8b91f230ca741addb39584b08012f9cfb308775 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/RECORD @@ -0,0 +1,24 @@ +filelock-3.15.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.15.4.dist-info/METADATA,sha256=ovkK1sT-vSUNz24pW1xv7Yrna9npgz4mL-Hc3G9Z3z4,2903 +filelock-3.15.4.dist-info/RECORD,, +filelock-3.15.4.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 +filelock-3.15.4.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210 +filelock/__init__.py,sha256=_t_-OAGXo_qyPa9lNQ1YnzVYEvSW3I0onPqzpomsVVg,1769 +filelock/__pycache__/__init__.cpython-39.pyc,, +filelock/__pycache__/_api.cpython-39.pyc,, +filelock/__pycache__/_error.cpython-39.pyc,, +filelock/__pycache__/_soft.cpython-39.pyc,, +filelock/__pycache__/_unix.cpython-39.pyc,, +filelock/__pycache__/_util.cpython-39.pyc,, +filelock/__pycache__/_windows.cpython-39.pyc,, +filelock/__pycache__/asyncio.cpython-39.pyc,, +filelock/__pycache__/version.cpython-39.pyc,, +filelock/_api.py,sha256=GVeBEGjpDD8S1bYqG6_u0MZfbYHS6XrHs_n3PVKq-h0,14541 +filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787 +filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711 +filelock/_unix.py,sha256=-FXP0tjInBHUYygOlMpp4taUmD87QOkrD_4ybg_iT7Q,2259 +filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715 +filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177 +filelock/asyncio.py,sha256=NJp4NJeBuLrhtP0DBfb6Jt_Z0wNj0cnX7iNjRMRpXsY,12449 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=TW171tz9aZUg-FeBVFGiMFMaQn7faeSQ3lay3071xLM,413 diff --git a/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/WHEEL b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..516596c76787b10928cbab24f22c0ea00433b15d --- /dev/null +++ b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.24.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/licenses/LICENSE b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/MLPY/Lib/site-packages/filelock-3.15.4.dist-info/licenses/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/MLPY/Lib/site-packages/filelock/__init__.py b/MLPY/Lib/site-packages/filelock/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d8c5b8ebe565a652b3671b3dfa066f7346af45 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/__init__.py @@ -0,0 +1,70 @@ +""" +A platform independent file lock that supports the with-statement. + +.. autodata:: filelock.__version__ + :no-value: + +""" + +from __future__ import annotations + +import sys +import warnings +from typing import TYPE_CHECKING + +from ._api import AcquireReturnProxy, BaseFileLock +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock, has_fcntl +from ._windows import WindowsFileLock +from .asyncio import ( + AsyncAcquireReturnProxy, + AsyncSoftFileLock, + AsyncUnixFileLock, + AsyncWindowsFileLock, + BaseAsyncFileLock, +) +from .version import version + +#: version of the project as a string +__version__: str = version + + +if sys.platform == "win32": # pragma: win32 cover + _FileLock: type[BaseFileLock] = WindowsFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncWindowsFileLock +else: # pragma: win32 no cover # noqa: PLR5501 + if has_fcntl: + _FileLock: type[BaseFileLock] = UnixFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncUnixFileLock + else: + _FileLock = SoftFileLock + _AsyncFileLock = AsyncSoftFileLock + if warnings is not None: + warnings.warn("only soft file lock is available", stacklevel=2) + +if TYPE_CHECKING: + FileLock = SoftFileLock + AsyncFileLock = AsyncSoftFileLock +else: + #: Alias for the lock, which should be used for the current platform. + FileLock = _FileLock + AsyncFileLock = _AsyncFileLock + + +__all__ = [ + "AcquireReturnProxy", + "AsyncAcquireReturnProxy", + "AsyncFileLock", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", + "BaseFileLock", + "FileLock", + "SoftFileLock", + "Timeout", + "UnixFileLock", + "WindowsFileLock", + "__version__", +] diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ff8dc16469e7cce86b0cea586d8533107d4f823 Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_api.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_api.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98c110811feec995e0dc08806bdbb9b7b6c0085d Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_api.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_error.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_error.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..588dbc72e6a5e7e85e8983f5e494ce3eb85b67a5 Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_error.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_soft.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_soft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a7e7f0169d7c297c93e28137e7885f9cf4b4db7 Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_soft.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_unix.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_unix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2cc1b351381c562c259f251645aaa97e246ecc Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_unix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_util.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7abe330e0372d65964f51d5e2b69da3d72671b71 Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/_windows.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/_windows.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..670cfc611b323beb981a6dfc4cda0364d782cb2e Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/_windows.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/asyncio.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/asyncio.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..603fd5cc97d97434066dedb9197b1ab2bd6e19e8 Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/asyncio.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/__pycache__/version.cpython-39.pyc b/MLPY/Lib/site-packages/filelock/__pycache__/version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1607305de5f5ee63f80c8bbca53d6bc33d59d88f Binary files /dev/null and b/MLPY/Lib/site-packages/filelock/__pycache__/version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/filelock/_api.py b/MLPY/Lib/site-packages/filelock/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..771a559a658bf1cdc61ed2ff74e1fca7db4683e5 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_api.py @@ -0,0 +1,403 @@ +from __future__ import annotations + +import contextlib +import inspect +import logging +import os +import time +import warnings +from abc import ABCMeta, abstractmethod +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any, cast +from weakref import WeakValueDictionary + +from ._error import Timeout + +if TYPE_CHECKING: + import sys + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: + self.lock = lock + + def __enter__(self) -> BaseFileLock: + return self.lock + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.lock.release() + + +@dataclass +class FileLockContext: + """A dataclass which holds the context for a ``BaseFileLock`` object.""" + + # The context is held in a separate class to allow optional use of thread local storage via the + # ThreadLocalFileContext class. + + #: The path to the lock file. + lock_file: str + + #: The default timeout value. + timeout: float + + #: The mode for the lock files + mode: int + + #: Whether the lock should be blocking or not + blocking: bool + + #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held + lock_file_fd: int | None = None + + #: The lock counter is used for implementing the nested locking mechanism. + lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 + + +class ThreadLocalFileContext(FileLockContext, local): + """A thread local version of the ``FileLockContext`` class.""" + + +class FileLockMeta(ABCMeta): + def __call__( # noqa: PLR0913 + cls, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + **kwargs: Any, # capture remaining kwargs for subclasses # noqa: ANN401 + ) -> BaseFileLock: + if is_singleton: + instance = cls._instances.get(str(lock_file)) # type: ignore[attr-defined] + if instance: + params_to_check = { + "thread_local": (thread_local, instance.is_thread_local()), + "timeout": (timeout, instance.timeout), + "mode": (mode, instance.mode), + "blocking": (blocking, instance.blocking), + } + + non_matching_params = { + name: (passed_param, set_param) + for name, (passed_param, set_param) in params_to_check.items() + if passed_param != set_param + } + if not non_matching_params: + return cast(BaseFileLock, instance) + + # parameters do not match; raise error + msg = "Singleton lock instances cannot be initialized with differing arguments" + msg += "\nNon-matching arguments: " + for param_name, (passed_param, set_param) in non_matching_params.items(): + msg += f"\n\t{param_name} (existing lock has {set_param} but {passed_param} was passed)" + raise ValueError(msg) + + # Workaround to make `__init__`'s params optional in subclasses + # E.g. virtualenv changes the signature of the `__init__` method in the `BaseFileLock` class descendant + # (https://github.com/tox-dev/filelock/pull/340) + + all_params = { + "timeout": timeout, + "mode": mode, + "thread_local": thread_local, + "blocking": blocking, + "is_singleton": is_singleton, + **kwargs, + } + + present_params = inspect.signature(cls.__init__).parameters # type: ignore[misc] + init_params = {key: value for key, value in all_params.items() if key in present_params} + + instance = super().__call__(lock_file, **init_params) + + if is_singleton: + cls._instances[str(lock_file)] = instance # type: ignore[attr-defined] + + return cast(BaseFileLock, instance) + + +class BaseFileLock(contextlib.ContextDecorator, metaclass=FileLockMeta): + """Abstract base class for a file lock object.""" + + _instances: WeakValueDictionary[str, BaseFileLock] + + def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None: + """Setup unique state for lock subclasses.""" + super().__init_subclass__(**kwargs) + cls._instances = WeakValueDictionary() + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \ + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \ + to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \ + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \ + per lock file. This is useful if you want to use the lock object for reentrant locking without needing \ + to pass the same object around. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + } + self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) + + def is_thread_local(self) -> bool: + """:return: a flag indicating if this lock is thread local or not""" + return self._is_thread_local + + @property + def is_singleton(self) -> bool: + """:return: a flag indicating if this lock is singleton or not""" + return self._is_singleton + + @property + def lock_file(self) -> str: + """:return: path to the lock file""" + return self._context.lock_file + + @property + def timeout(self) -> float: + """ + :return: the default timeout value, in seconds + + .. versionadded:: 2.0.0 + """ + return self._context.timeout + + @timeout.setter + def timeout(self, value: float | str) -> None: + """ + Change the default timeout value. + + :param value: the new value, in seconds + + """ + self._context.timeout = float(value) + + @property + def blocking(self) -> bool: + """:return: whether the locking is blocking or not""" + return self._context.blocking + + @blocking.setter + def blocking(self, value: bool) -> None: + """ + Change the default blocking value. + + :param value: the new value as bool + + """ + self._context.blocking = value + + @property + def mode(self) -> int: + """:return: the file permissions for the lockfile""" + return self._context.mode + + @abstractmethod + def _acquire(self) -> None: + """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" + raise NotImplementedError + + @abstractmethod + def _release(self) -> None: + """Releases the lock and sets self._context.lock_file_fd to None.""" + raise NotImplementedError + + @property + def is_locked(self) -> bool: + """ + + :return: A boolean indicating if the lock file is holding the lock currently. + + .. versionchanged:: 2.0.0 + + This was previously a method and is now a property. + """ + return self._context.lock_file_fd is not None + + @property + def lock_counter(self) -> int: + """:return: The number of times this lock has been acquired (but not yet released).""" + return self._context.lock_counter + + def acquire( + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + poll_intervall: float | None = None, + blocking: bool | None = None, + ) -> AcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and + if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + .. versionchanged:: 2.0.0 + + This method returns now a *proxy* object instead of *self*, + so that it can be used in a with statement without side effects. + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + if poll_intervall is not None: + msg = "use poll_interval instead of poll_intervall" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + poll_interval = poll_intervall + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + self._acquire() + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + time.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AcquireReturnProxy(lock=self) + + def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. + Also note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + self._release() + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + def __enter__(self) -> Self: + """ + Acquire the lock. + + :return: the lock object + + """ + self.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + self.release(force=True) + + +__all__ = [ + "AcquireReturnProxy", + "BaseFileLock", +] diff --git a/MLPY/Lib/site-packages/filelock/_error.py b/MLPY/Lib/site-packages/filelock/_error.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ff08c0f508ad7077eb6ed1990898840c952b3a --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_error.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Any + + +class Timeout(TimeoutError): # noqa: N818 + """Raised when the lock could not be acquired in *timeout* seconds.""" + + def __init__(self, lock_file: str) -> None: + super().__init__() + self._lock_file = lock_file + + def __reduce__(self) -> str | tuple[Any, ...]: + return self.__class__, (self._lock_file,) # Properly pickle the exception + + def __str__(self) -> str: + return f"The file lock '{self._lock_file}' could not be acquired." + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.lock_file!r})" + + @property + def lock_file(self) -> str: + """:return: The path of the file lock.""" + return self._lock_file + + +__all__ = [ + "Timeout", +] diff --git a/MLPY/Lib/site-packages/filelock/_soft.py b/MLPY/Lib/site-packages/filelock/_soft.py new file mode 100644 index 0000000000000000000000000000000000000000..28c67f74cc82b8f55e47afd6a71972cc1fb95eb6 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_soft.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES, EEXIST +from pathlib import Path + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + + +class SoftFileLock(BaseFileLock): + """Simply watches the existence of the lock file.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + # first check for exists and read-only mode as the open will mask this case as EEXIST + flags = ( + os.O_WRONLY # open for writing only + | os.O_CREAT + | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists + | os.O_TRUNC # truncate the file to zero byte + ) + try: + file_handler = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: # re-raise unless expected exception + if not ( + exception.errno == EEXIST # lock already exist + or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock + ): # pragma: win32 no cover + raise + else: + self._context.lock_file_fd = file_handler + + def _release(self) -> None: + assert self._context.lock_file_fd is not None # noqa: S101 + os.close(self._context.lock_file_fd) # the lock file is definitely not None + self._context.lock_file_fd = None + with suppress(OSError): # the file is already deleted and that's what we want + Path(self.lock_file).unlink() + + +__all__ = [ + "SoftFileLock", +] diff --git a/MLPY/Lib/site-packages/filelock/_unix.py b/MLPY/Lib/site-packages/filelock/_unix.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae1fbe916f95762418cd62251f91f74ba35fc8c --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_unix.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import ENOSYS +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists + +#: a flag to indicate if the fcntl API is available +has_fcntl = False +if sys.platform == "win32": # pragma: win32 cover + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + +else: # pragma: win32 no cover + try: + import fcntl + except ImportError: + pass + else: + has_fcntl = True + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + ensure_directory_exists(self.lock_file) + open_flags = os.O_RDWR | os.O_TRUNC + if not Path(self.lock_file).exists(): + open_flags |= os.O_CREAT + fd = os.open(self.lock_file, open_flags, self._context.mode) + with suppress(PermissionError): # This locked is not owned by this UID + os.fchmod(fd, self._context.mode) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as exception: + os.close(fd) + if exception.errno == ENOSYS: # NotImplemented error + msg = "FileSystem does not appear to support flock; use SoftFileLock instead" + raise NotImplementedError(msg) from exception + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + # Do not remove the lockfile: + # https://github.com/tox-dev/py-filelock/issues/31 + # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + fcntl.flock(fd, fcntl.LOCK_UN) + os.close(fd) + + +__all__ = [ + "UnixFileLock", + "has_fcntl", +] diff --git a/MLPY/Lib/site-packages/filelock/_util.py b/MLPY/Lib/site-packages/filelock/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c671e8533873948f0e1b5575ff952c722019f067 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_util.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import os +import stat +import sys +from errno import EACCES, EISDIR +from pathlib import Path + + +def raise_on_not_writable_file(filename: str) -> None: + """ + Raise an exception if attempting to open the file for writing would fail. + + This is done so files that will never be writable can be separated from files that are writable but currently + locked. + + :param filename: file to check + :raises OSError: as if the file was opened for writing. + + """ + try: # use stat to do exists + can write to check without race condition + file_stat = os.stat(filename) # noqa: PTH116 + except OSError: + return # swallow does not exist or other errors + + if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it + if not (file_stat.st_mode & stat.S_IWUSR): + raise PermissionError(EACCES, "Permission denied", filename) + + if stat.S_ISDIR(file_stat.st_mode): + if sys.platform == "win32": # pragma: win32 cover + # On Windows, this is PermissionError + raise PermissionError(EACCES, "Permission denied", filename) + else: # pragma: win32 no cover # noqa: RET506 + # On linux / macOS, this is IsADirectoryError + raise IsADirectoryError(EISDIR, "Is a directory", filename) + + +def ensure_directory_exists(filename: Path | str) -> None: + """ + Ensure the directory containing the file exists (create it if necessary). + + :param filename: file. + + """ + Path(filename).parent.mkdir(parents=True, exist_ok=True) + + +__all__ = [ + "ensure_directory_exists", + "raise_on_not_writable_file", +] diff --git a/MLPY/Lib/site-packages/filelock/_windows.py b/MLPY/Lib/site-packages/filelock/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..8db55dcbaa3e7bab091781b17ce22fde1fc239f2 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/_windows.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + +if sys.platform == "win32": # pragma: win32 cover + import msvcrt + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + flags = ( + os.O_RDWR # open for read and write + | os.O_CREAT # create file if not exists + | os.O_TRUNC # truncate file if not empty + ) + try: + fd = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: + if exception.errno != EACCES: # has no access to this lock + raise + else: + try: + msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) + except OSError as exception: + os.close(fd) # close file first + if exception.errno != EACCES: # file is already locked + raise + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + os.close(fd) + + with suppress(OSError): # Probably another instance of the application hat acquired the file lock. + Path(self.lock_file).unlink() + +else: # pragma: win32 no cover + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + + +__all__ = [ + "WindowsFileLock", +] diff --git a/MLPY/Lib/site-packages/filelock/asyncio.py b/MLPY/Lib/site-packages/filelock/asyncio.py new file mode 100644 index 0000000000000000000000000000000000000000..f5848c8969bc187e7ec9ebfc97313a27478689e0 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/asyncio.py @@ -0,0 +1,342 @@ +"""An asyncio-based implementation of the file lock.""" + +from __future__ import annotations + +import asyncio +import contextlib +import logging +import os +import time +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any, Callable, NoReturn, cast + +from ._api import BaseFileLock, FileLockContext, FileLockMeta +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock +from ._windows import WindowsFileLock + +if TYPE_CHECKING: + import sys + from concurrent import futures + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: # noqa: D107 + self.lock = lock + + async def __aenter__(self) -> BaseAsyncFileLock: # noqa: D105 + return self.lock + + async def __aexit__( # noqa: D105 + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self.lock.release() + + +class AsyncFileLockMeta(FileLockMeta): + def __call__( # type: ignore[override] # noqa: PLR0913 + cls, # noqa: N805 + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> BaseAsyncFileLock: + if thread_local and run_in_executor: + msg = "run_in_executor is not supported when thread_local is True" + raise ValueError(msg) + instance = super().__call__( + lock_file=lock_file, + timeout=timeout, + mode=mode, + thread_local=thread_local, + blocking=blocking, + is_singleton=is_singleton, + loop=loop, + run_in_executor=run_in_executor, + executor=executor, + ) + return cast(BaseAsyncFileLock, instance) + + +class BaseAsyncFileLock(BaseFileLock, metaclass=AsyncFileLockMeta): + """Base class for asynchronous file locks.""" + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \ + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \ + to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \ + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \ + per lock file. This is useful if you want to use the lock object for reentrant locking without needing \ + to pass the same object around. + :param loop: The event loop to use. If not specified, the running event loop will be used. + :param run_in_executor: If this is set to ``True`` then the lock will be acquired in an executor. + :param executor: The executor to use. If not specified, the default executor will be used. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + "loop": loop, + "run_in_executor": run_in_executor, + "executor": executor, + } + self._context: AsyncFileLockContext = (AsyncThreadLocalFileContext if thread_local else AsyncFileLockContext)( + **kwargs + ) + + @property + def run_in_executor(self) -> bool: + """::return: whether run in executor.""" + return self._context.run_in_executor + + @property + def executor(self) -> futures.Executor | None: + """::return: the executor.""" + return self._context.executor + + @executor.setter + def executor(self, value: futures.Executor | None) -> None: # pragma: no cover + """ + Change the executor. + + :param value: the new executor or ``None`` + :type value: futures.Executor | None + + """ + self._context.executor = value + + @property + def loop(self) -> asyncio.AbstractEventLoop | None: + """::return: the event loop.""" + return self._context.loop + + async def acquire( # type: ignore[override] + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + blocking: bool | None = None, + ) -> AsyncAcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default + :attr:`~BaseFileLock.timeout` is and if ``timeout < 0``, there is no timeout and + this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._acquire) + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + await asyncio.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AsyncAcquireReturnProxy(lock=self) + + async def release(self, force: bool = False) -> None: # type: ignore[override] # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. + Also note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._release) + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + async def _run_internal_method(self, method: Callable[[], Any]) -> None: + if asyncio.iscoroutinefunction(method): + await method() + elif self.run_in_executor: + loop = self.loop or asyncio.get_running_loop() + await loop.run_in_executor(self.executor, method) + else: + method() + + def __enter__(self) -> NoReturn: + """ + Replace old __enter__ method to avoid using it. + + NOTE: DO NOT USE `with` FOR ASYNCIO LOCKS, USE `async with` INSTEAD. + + :return: none + :rtype: NoReturn + """ + msg = "Do not use `with` for asyncio locks, use `async with` instead." + raise NotImplementedError(msg) + + async def __aenter__(self) -> Self: + """ + Acquire the lock. + + :return: the lock object + + """ + await self.acquire() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + await self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + with contextlib.suppress(RuntimeError): + loop = self.loop or asyncio.get_running_loop() + if not loop.is_running(): # pragma: no cover + loop.run_until_complete(self.release(force=True)) + else: + loop.create_task(self.release(force=True)) + + +class AsyncSoftFileLock(SoftFileLock, BaseAsyncFileLock): + """Simply watches the existence of the lock file.""" + + +class AsyncUnixFileLock(UnixFileLock, BaseAsyncFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + +class AsyncWindowsFileLock(WindowsFileLock, BaseAsyncFileLock): + """Uses the :func:`msvcrt.locking` to hard lock the lock file on windows systems.""" + + +__all__ = [ + "AsyncAcquireReturnProxy", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", +] diff --git a/MLPY/Lib/site-packages/filelock/py.typed b/MLPY/Lib/site-packages/filelock/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/filelock/version.py b/MLPY/Lib/site-packages/filelock/version.py new file mode 100644 index 0000000000000000000000000000000000000000..756ac8e336041b542ebdd95220a4757a76cded61 --- /dev/null +++ b/MLPY/Lib/site-packages/filelock/version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '3.15.4' +__version_tuple__ = version_tuple = (3, 15, 4) diff --git a/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/INSTALLER b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/METADATA b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1e7b9ea050adbe53f02ea742614fa15bbf2a8c7c --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/METADATA @@ -0,0 +1,278 @@ +Metadata-Version: 2.3 +Name: fsspec +Version: 2024.6.1 +Summary: File-system specification +Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html +Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/ +Project-URL: Homepage, https://github.com/fsspec/filesystem_spec +Maintainer-email: Martin Durant +License: BSD 3-Clause License + + Copyright (c) 2018, Martin Durant + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +License-File: LICENSE +Keywords: file +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.8 +Provides-Extra: abfs +Requires-Dist: adlfs; extra == 'abfs' +Provides-Extra: adl +Requires-Dist: adlfs; extra == 'adl' +Provides-Extra: arrow +Requires-Dist: pyarrow>=1; extra == 'arrow' +Provides-Extra: dask +Requires-Dist: dask; extra == 'dask' +Requires-Dist: distributed; extra == 'dask' +Provides-Extra: dev +Requires-Dist: pre-commit; extra == 'dev' +Requires-Dist: ruff; extra == 'dev' +Provides-Extra: doc +Requires-Dist: numpydoc; extra == 'doc' +Requires-Dist: sphinx; extra == 'doc' +Requires-Dist: sphinx-design; extra == 'doc' +Requires-Dist: sphinx-rtd-theme; extra == 'doc' +Requires-Dist: yarl; extra == 'doc' +Provides-Extra: dropbox +Requires-Dist: dropbox; extra == 'dropbox' +Requires-Dist: dropboxdrivefs; extra == 'dropbox' +Requires-Dist: requests; extra == 'dropbox' +Provides-Extra: entrypoints +Provides-Extra: full +Requires-Dist: adlfs; extra == 'full' +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'full' +Requires-Dist: dask; extra == 'full' +Requires-Dist: distributed; extra == 'full' +Requires-Dist: dropbox; extra == 'full' +Requires-Dist: dropboxdrivefs; extra == 'full' +Requires-Dist: fusepy; extra == 'full' +Requires-Dist: gcsfs; extra == 'full' +Requires-Dist: libarchive-c; extra == 'full' +Requires-Dist: ocifs; extra == 'full' +Requires-Dist: panel; extra == 'full' +Requires-Dist: paramiko; extra == 'full' +Requires-Dist: pyarrow>=1; extra == 'full' +Requires-Dist: pygit2; extra == 'full' +Requires-Dist: requests; extra == 'full' +Requires-Dist: s3fs; extra == 'full' +Requires-Dist: smbprotocol; extra == 'full' +Requires-Dist: tqdm; extra == 'full' +Provides-Extra: fuse +Requires-Dist: fusepy; extra == 'fuse' +Provides-Extra: gcs +Requires-Dist: gcsfs; extra == 'gcs' +Provides-Extra: git +Requires-Dist: pygit2; extra == 'git' +Provides-Extra: github +Requires-Dist: requests; extra == 'github' +Provides-Extra: gs +Requires-Dist: gcsfs; extra == 'gs' +Provides-Extra: gui +Requires-Dist: panel; extra == 'gui' +Provides-Extra: hdfs +Requires-Dist: pyarrow>=1; extra == 'hdfs' +Provides-Extra: http +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'http' +Provides-Extra: libarchive +Requires-Dist: libarchive-c; extra == 'libarchive' +Provides-Extra: oci +Requires-Dist: ocifs; extra == 'oci' +Provides-Extra: s3 +Requires-Dist: s3fs; extra == 's3' +Provides-Extra: sftp +Requires-Dist: paramiko; extra == 'sftp' +Provides-Extra: smb +Requires-Dist: smbprotocol; extra == 'smb' +Provides-Extra: ssh +Requires-Dist: paramiko; extra == 'ssh' +Provides-Extra: test +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test' +Requires-Dist: numpy; extra == 'test' +Requires-Dist: pytest; extra == 'test' +Requires-Dist: pytest-asyncio!=0.22.0; extra == 'test' +Requires-Dist: pytest-benchmark; extra == 'test' +Requires-Dist: pytest-cov; extra == 'test' +Requires-Dist: pytest-mock; extra == 'test' +Requires-Dist: pytest-recording; extra == 'test' +Requires-Dist: pytest-rerunfailures; extra == 'test' +Requires-Dist: requests; extra == 'test' +Provides-Extra: test-downstream +Requires-Dist: aiobotocore<3.0.0,>=2.5.4; extra == 'test-downstream' +Requires-Dist: dask-expr; extra == 'test-downstream' +Requires-Dist: dask[dataframe,test]; extra == 'test-downstream' +Requires-Dist: moto[server]<5,>4; extra == 'test-downstream' +Requires-Dist: pytest-timeout; extra == 'test-downstream' +Requires-Dist: xarray; extra == 'test-downstream' +Provides-Extra: test-full +Requires-Dist: adlfs; extra == 'test-full' +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test-full' +Requires-Dist: cloudpickle; extra == 'test-full' +Requires-Dist: dask; extra == 'test-full' +Requires-Dist: distributed; extra == 'test-full' +Requires-Dist: dropbox; extra == 'test-full' +Requires-Dist: dropboxdrivefs; extra == 'test-full' +Requires-Dist: fastparquet; extra == 'test-full' +Requires-Dist: fusepy; extra == 'test-full' +Requires-Dist: gcsfs; extra == 'test-full' +Requires-Dist: jinja2; extra == 'test-full' +Requires-Dist: kerchunk; extra == 'test-full' +Requires-Dist: libarchive-c; extra == 'test-full' +Requires-Dist: lz4; extra == 'test-full' +Requires-Dist: notebook; extra == 'test-full' +Requires-Dist: numpy; extra == 'test-full' +Requires-Dist: ocifs; extra == 'test-full' +Requires-Dist: pandas; extra == 'test-full' +Requires-Dist: panel; extra == 'test-full' +Requires-Dist: paramiko; extra == 'test-full' +Requires-Dist: pyarrow; extra == 'test-full' +Requires-Dist: pyarrow>=1; extra == 'test-full' +Requires-Dist: pyftpdlib; extra == 'test-full' +Requires-Dist: pygit2; extra == 'test-full' +Requires-Dist: pytest; extra == 'test-full' +Requires-Dist: pytest-asyncio!=0.22.0; extra == 'test-full' +Requires-Dist: pytest-benchmark; extra == 'test-full' +Requires-Dist: pytest-cov; extra == 'test-full' +Requires-Dist: pytest-mock; extra == 'test-full' +Requires-Dist: pytest-recording; extra == 'test-full' +Requires-Dist: pytest-rerunfailures; extra == 'test-full' +Requires-Dist: python-snappy; extra == 'test-full' +Requires-Dist: requests; extra == 'test-full' +Requires-Dist: smbprotocol; extra == 'test-full' +Requires-Dist: tqdm; extra == 'test-full' +Requires-Dist: urllib3; extra == 'test-full' +Requires-Dist: zarr; extra == 'test-full' +Requires-Dist: zstandard; extra == 'test-full' +Provides-Extra: tqdm +Requires-Dist: tqdm; extra == 'tqdm' +Description-Content-Type: text/markdown + +# filesystem_spec + +[![PyPI version](https://badge.fury.io/py/fsspec.svg)](https://pypi.python.org/pypi/fsspec/) +[![Anaconda-Server Badge](https://anaconda.org/conda-forge/fsspec/badges/version.svg)](https://anaconda.org/conda-forge/fsspec) +![Build](https://github.com/fsspec/filesystem_spec/workflows/CI/badge.svg) +[![Docs](https://readthedocs.org/projects/filesystem-spec/badge/?version=latest)](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) + +A specification for pythonic filesystems. + +## Install + +```bash +pip install fsspec +``` + +would install the base fsspec. Various optionally supported features might require specification of custom +extra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support. +Use `pip install fsspec[full]` for installation of all known extra dependencies. + +Up-to-date package also provided through conda-forge distribution: + +```bash +conda install -c conda-forge fsspec +``` + + +## Purpose + +To produce a template or specification for a file-system interface, that specific implementations should follow, +so that applications making use of them can rely on a common behaviour and not have to worry about the specific +internal implementation decisions with any given backend. Many such implementations are included in this package, +or in sister projects such as `s3fs` and `gcsfs`. + +In addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE +mounting of the file-system implementation may be available for all implementations "for free". + +## Documentation + +Please refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) + +## Develop + +fsspec uses GitHub Actions for CI. Environment files can be found +in the "ci/" directory. Note that the main environment is called "py38", +but it is expected that the version of python installed be adjustable at +CI runtime. For local use, pick a version suitable for you. + +```bash +# For a new environment (mamba / conda). +mamba create -n fsspec -c conda-forge python=3.9 -y +conda activate fsspec + +# Standard dev install with docs and tests. +pip install -e ".[dev,doc,test]" + +# Full tests except for downstream +pip install s3fs +pip uninstall s3fs +pip install -e .[dev,doc,test_full] +pip install s3fs --no-deps +pytest -v + +# Downstream tests. +sh install_s3fs.sh +# Windows powershell. +install_s3fs.sh +``` + +### Testing + +Tests can be run in the dev environment, if activated, via ``pytest fsspec``. + +The full fsspec suite requires a system-level docker, docker-compose, and fuse +installation. If only making changes to one backend implementation, it is +not generally necessary to run all tests locally. + +It is expected that contributors ensure that any change to fsspec does not +cause issues or regressions for either other fsspec-related packages such +as gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI +run and corresponding environment file run a set of tests from the dask +test suite, and very minimal tests against pandas and zarr from the +test_downstream.py module in this repo. + +### Code Formatting + +fsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure +a consistent code format throughout the project. +Run ``black fsspec`` from the root of the filesystem_spec repository to +auto-format your code. Additionally, many editors have plugins that will apply +``black`` as you edit files. ``black`` is included in the ``tox`` environments. + +Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to +automatically run ``black`` when you make a git commit. +Run ``pre-commit install --install-hooks`` from the root of the +filesystem_spec repository to setup pre-commit hooks. ``black`` will now be run +before you commit, reformatting any changed files. You can format without +committing via ``pre-commit run`` or skip these checks with ``git commit +--no-verify``. diff --git a/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/RECORD b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..222534a7105a43b1d06904f99a17909e389045b8 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/RECORD @@ -0,0 +1,107 @@ +fsspec-2024.6.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fsspec-2024.6.1.dist-info/METADATA,sha256=ijt16ZAzPN9P0_1AU4zcKdiM18pkIyf1Gkr-IFXrlLw,11749 +fsspec-2024.6.1.dist-info/RECORD,, +fsspec-2024.6.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +fsspec-2024.6.1.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513 +fsspec/__init__.py,sha256=l9MJaNNV2d4wKpCtMvXDr55n92DkdrAayGy3F9ICjzk,1998 +fsspec/__pycache__/__init__.cpython-39.pyc,, +fsspec/__pycache__/_version.cpython-39.pyc,, +fsspec/__pycache__/archive.cpython-39.pyc,, +fsspec/__pycache__/asyn.cpython-39.pyc,, +fsspec/__pycache__/caching.cpython-39.pyc,, +fsspec/__pycache__/callbacks.cpython-39.pyc,, +fsspec/__pycache__/compression.cpython-39.pyc,, +fsspec/__pycache__/config.cpython-39.pyc,, +fsspec/__pycache__/conftest.cpython-39.pyc,, +fsspec/__pycache__/core.cpython-39.pyc,, +fsspec/__pycache__/dircache.cpython-39.pyc,, +fsspec/__pycache__/exceptions.cpython-39.pyc,, +fsspec/__pycache__/fuse.cpython-39.pyc,, +fsspec/__pycache__/generic.cpython-39.pyc,, +fsspec/__pycache__/gui.cpython-39.pyc,, +fsspec/__pycache__/json.cpython-39.pyc,, +fsspec/__pycache__/mapping.cpython-39.pyc,, +fsspec/__pycache__/parquet.cpython-39.pyc,, +fsspec/__pycache__/registry.cpython-39.pyc,, +fsspec/__pycache__/spec.cpython-39.pyc,, +fsspec/__pycache__/transaction.cpython-39.pyc,, +fsspec/__pycache__/utils.cpython-39.pyc,, +fsspec/_version.py,sha256=bs5gFL9Mlwh1IvRh5hZawz_SleC88gaDg6jodp6omsQ,417 +fsspec/archive.py,sha256=S__DzfZj-urAN3tp2W6jJ6YDiXG1fAl7FjvWUN73qIE,2386 +fsspec/asyn.py,sha256=AOd2SXH2YPCaQL5jA6IegYevdMFkAnGD7Seh9DC2gSE,36404 +fsspec/caching.py,sha256=x6IEdxtR3cMDjy40sNHyawR2SLtNSahVuP5i_TImdso,31600 +fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210 +fsspec/compression.py,sha256=jCSUMJu-zSNyrusnHT0wKXgOd1tTJR6vM126i5SR5Zc,4865 +fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279 +fsspec/conftest.py,sha256=fVfx-NLrH_OZS1TIpYNoPzM7efEcMoL62reHOdYeFCA,1245 +fsspec/core.py,sha256=Iln37fNZqjjk5vaDGU_0WWuwOxN1iVsQ6sDmCmuEvrs,23681 +fsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717 +fsspec/exceptions.py,sha256=pauSLDMxzTJMOjvX1WEUK0cMyFkrFxpWJsyFywav7A8,331 +fsspec/fuse.py,sha256=66amOa6wdIbS0DMhhfAPUoOB37HPorfXD1izV0prmTY,10145 +fsspec/generic.py,sha256=AFbo-mHBt5QJV1Aplg5CJuUiiJ4bNQhcKRuwkZJdWac,13761 +fsspec/gui.py,sha256=k46F11VGBLlrliPj3XbxHKlVGByWoX67Ofmu9ijaPBQ,13929 +fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fsspec/implementations/__pycache__/__init__.cpython-39.pyc,, +fsspec/implementations/__pycache__/arrow.cpython-39.pyc,, +fsspec/implementations/__pycache__/cache_mapper.cpython-39.pyc,, +fsspec/implementations/__pycache__/cache_metadata.cpython-39.pyc,, +fsspec/implementations/__pycache__/cached.cpython-39.pyc,, +fsspec/implementations/__pycache__/dask.cpython-39.pyc,, +fsspec/implementations/__pycache__/data.cpython-39.pyc,, +fsspec/implementations/__pycache__/dbfs.cpython-39.pyc,, +fsspec/implementations/__pycache__/dirfs.cpython-39.pyc,, +fsspec/implementations/__pycache__/ftp.cpython-39.pyc,, +fsspec/implementations/__pycache__/git.cpython-39.pyc,, +fsspec/implementations/__pycache__/github.cpython-39.pyc,, +fsspec/implementations/__pycache__/http.cpython-39.pyc,, +fsspec/implementations/__pycache__/jupyter.cpython-39.pyc,, +fsspec/implementations/__pycache__/libarchive.cpython-39.pyc,, +fsspec/implementations/__pycache__/local.cpython-39.pyc,, +fsspec/implementations/__pycache__/memory.cpython-39.pyc,, +fsspec/implementations/__pycache__/reference.cpython-39.pyc,, +fsspec/implementations/__pycache__/sftp.cpython-39.pyc,, +fsspec/implementations/__pycache__/smb.cpython-39.pyc,, +fsspec/implementations/__pycache__/tar.cpython-39.pyc,, +fsspec/implementations/__pycache__/webhdfs.cpython-39.pyc,, +fsspec/implementations/__pycache__/zip.cpython-39.pyc,, +fsspec/implementations/arrow.py,sha256=Y4F_IwWXuJI1mRO_c0_PI5o-Wp58RLmoiH_s-x88w4M,8631 +fsspec/implementations/cache_mapper.py,sha256=W4wlxyPxZbSp9ItJ0pYRVBMh6bw9eFypgP6kUYuuiI4,2421 +fsspec/implementations/cache_metadata.py,sha256=pcOJYcBQY5OaC7Yhw0F3wjg08QLYApGmoISCrbs59ks,8511 +fsspec/implementations/cached.py,sha256=t5atYATgjuABm-mUyReqjGqVyyP1XBSuROX92aMecxY,32826 +fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466 +fsspec/implementations/data.py,sha256=LDLczxRh8h7x39Zjrd-GgzdQHr78yYxDlrv2C9Uxb5E,1658 +fsspec/implementations/dbfs.py,sha256=cix9OYUveuSOx5UO5uRUwNUkYqjzyY0fkKnca1kTgZ0,15014 +fsspec/implementations/dirfs.py,sha256=0H6k67e2lZgq3U4K64ao6894L4134COUCekc3PCwTq8,11488 +fsspec/implementations/ftp.py,sha256=rp6cTog8xqjDPlKdSLKcsyP7K593_ByMabxGbNSEpTo,11655 +fsspec/implementations/git.py,sha256=vKGI-Vd5q4H2RrvhebkPc9NwlfkZ980OUGhebeCw-M0,4034 +fsspec/implementations/github.py,sha256=eAn1kJ7VeWR6gVoVRLBYclF_rQDXSJU-xzMXpvPQWqs,8002 +fsspec/implementations/http.py,sha256=ymjMQTXW6-akqqEoEKpjf416JAzP9N4VhnWiNYbRklk,29665 +fsspec/implementations/jupyter.py,sha256=B2uj7OEm7yIk-vRSsO37_ND0t0EBvn4B-Su43ibN4Pg,3811 +fsspec/implementations/libarchive.py,sha256=5_I2DiLXwQ1JC8x-K7jXu-tBwhO9dj7tFLnb0bTnVMQ,7102 +fsspec/implementations/local.py,sha256=qc68w69-I7zqVO8njv_s-THVImwICOqxyt-_2EK1VLg,15042 +fsspec/implementations/memory.py,sha256=-BpOVwaWyW2rDvxWIIcrZTNFAhvuG66VWeIM6vLwhkc,10134 +fsspec/implementations/reference.py,sha256=iDisTIZ8kIWG_FNSGaDf88RClywAwoF8yMgoVcxM4cY,44308 +fsspec/implementations/sftp.py,sha256=fMY9XZcmpjszQ2tCqO_TPaJesaeD_Dv7ptYzgUPGoO0,5631 +fsspec/implementations/smb.py,sha256=RcqCvVBPD3U0I0Rc31ns6HRhqKVDugjPQMDPVpvZSNg,11408 +fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111 +fsspec/implementations/webhdfs.py,sha256=Wm7zr0iX3SZx5LtWfJIo-5rkIaoEoWq_Ev87NWbUgug,16721 +fsspec/implementations/zip.py,sha256=vc1fNz-yO8uWQ9bQUqBFYpTcgsfZQq9vDwwg4Aufs9Y,4417 +fsspec/json.py,sha256=65sQ0Y7mTj33u_Y4IId5up4abQ3bAel4E4QzbKMiQSg,3826 +fsspec/mapping.py,sha256=hSsiRo-dgAOj6oHf67bF3i11U4xREglXToHGUX4GhRY,8261 +fsspec/parquet.py,sha256=ONG29Enesp0ToCH2bQ7zkpimnVIsZ2S4xCLj35-fY78,19455 +fsspec/registry.py,sha256=HVC-4HWDZnA6rycJwAu8F8ZXzON_85MTQVIyS6LOHxo,11320 +fsspec/spec.py,sha256=6rb-C3hTZLLtMGx2HDp37N_sZKs5RtYdcj8XOlHFi_c,69586 +fsspec/tests/abstract/__init__.py,sha256=i1wcFixV6QhOwdoB24c8oXjzobISNqiKVz9kl2DvAY8,10028 +fsspec/tests/abstract/__pycache__/__init__.cpython-39.pyc,, +fsspec/tests/abstract/__pycache__/common.cpython-39.pyc,, +fsspec/tests/abstract/__pycache__/copy.cpython-39.pyc,, +fsspec/tests/abstract/__pycache__/get.cpython-39.pyc,, +fsspec/tests/abstract/__pycache__/mv.cpython-39.pyc,, +fsspec/tests/abstract/__pycache__/put.cpython-39.pyc,, +fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973 +fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967 +fsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755 +fsspec/tests/abstract/mv.py,sha256=k8eUEBIrRrGMsBY5OOaDXdGnQUKGwDIfQyduB6YD3Ns,1982 +fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201 +fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398 +fsspec/utils.py,sha256=8czEIoX4GpcC42WLGoy3t_EMeZjJE8e5rTpOT_nEPo0,22987 diff --git a/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/WHEEL b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/licenses/LICENSE b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67590a5e5be5a5a2dde3fe53a7512e404a896c22 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec-2024.6.1.dist-info/licenses/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Martin Durant +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/fsspec/__init__.py b/MLPY/Lib/site-packages/fsspec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7af775f77cd67734aebd25ce4e051be87e64fa8 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/__init__.py @@ -0,0 +1,69 @@ +from importlib.metadata import entry_points + +from . import caching +from ._version import __version__ # noqa: F401 +from .callbacks import Callback +from .compression import available_compressions +from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs +from .exceptions import FSTimeoutError +from .mapping import FSMap, get_mapper +from .registry import ( + available_protocols, + filesystem, + get_filesystem_class, + register_implementation, + registry, +) +from .spec import AbstractFileSystem + +__all__ = [ + "AbstractFileSystem", + "FSTimeoutError", + "FSMap", + "filesystem", + "register_implementation", + "get_filesystem_class", + "get_fs_token_paths", + "get_mapper", + "open", + "open_files", + "open_local", + "registry", + "caching", + "Callback", + "available_protocols", + "available_compressions", + "url_to_fs", +] + + +def process_entries(): + if entry_points is not None: + try: + eps = entry_points() + except TypeError: + pass # importlib-metadata < 0.8 + else: + if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0 + specs = eps.select(group="fsspec.specs") + else: + specs = eps.get("fsspec.specs", []) + registered_names = {} + for spec in specs: + err_msg = f"Unable to load filesystem from {spec}" + name = spec.name + if name in registered_names: + continue + registered_names[name] = True + register_implementation( + name, + spec.value.replace(":", "."), + errtxt=err_msg, + # We take our implementations as the ones to overload with if + # for some reason we encounter some, may be the same, already + # registered + clobber=True, + ) + + +process_entries() diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0875da62582efad839f74fa271f868b32978613a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/_version.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71c4244a21289dd54b5021074cc9ccfe7e743d0a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/archive.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/archive.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2560c176a63f896e551c9ea522aee25035ba4ea1 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/archive.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/asyn.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/asyn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..602b915a23fca511efa82395901a9f87abb82070 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/asyn.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/caching.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/caching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd9bcab99636fc25fc919df42441a0543c436fc9 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/caching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/callbacks.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/callbacks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b64ebf88e2dc899c92004e71e2c757c972a56408 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/callbacks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/compression.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/compression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5b4931c6db252315c2192b93747f8a73be26f68 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/compression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/config.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..297757de11592f2616692f7e376d182b549e10ce Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/conftest.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/conftest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e13c2eda17026899b4bfaba65352e0dadc3fa7eb Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/conftest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/core.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..957e4547f2bad037cf98b267b039828e13ba8229 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/dircache.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/dircache.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d3c8165957f94b052954d64746e956e03efb8d Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/dircache.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cc802b509a9974f07d146df3518fa772b656b67 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/fuse.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/fuse.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e8e53ba64691636ffc6feb93643f008b148103f Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/fuse.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/generic.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/generic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b90dafd17f731ae55a76c9974cddb3b508bf7f51 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/generic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/gui.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/gui.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f33ad9cf573b2727560deee73ba51ea08cee35a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/gui.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/json.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/json.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1f7b497cab6d443b7fa3c67deea1be47ab39b29 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/json.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/mapping.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/mapping.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b91dcad7edbc4504b35f191ffbf95267d5c7853b Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/mapping.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/parquet.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/parquet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..591b1ee25efded506d1fa067fbe5f619663dd99a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/parquet.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/registry.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/registry.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69e8c86f07929a06333c54986a5710477618fb02 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/registry.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/spec.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/spec.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc0078689157076c3287f359f7c80b71e26772d9 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/spec.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/transaction.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/transaction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1f4a3c273950b72be4f8aeec5bbb3a5137509aa Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/transaction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..260de316369d737abe3d0ebfb1ecdb9becd96e27 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/_version.py b/MLPY/Lib/site-packages/fsspec/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..2b84bd68442028bf43b66d524065b76693b244cd --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '2024.6.1' +__version_tuple__ = version_tuple = (2024, 6, 1) diff --git a/MLPY/Lib/site-packages/fsspec/archive.py b/MLPY/Lib/site-packages/fsspec/archive.py new file mode 100644 index 0000000000000000000000000000000000000000..f466780fc802d6aa79be02a2af3424c051503708 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/archive.py @@ -0,0 +1,73 @@ +from fsspec import AbstractFileSystem +from fsspec.utils import tokenize + + +class AbstractArchiveFileSystem(AbstractFileSystem): + """ + A generic superclass for implementing Archive-based filesystems. + + Currently, it is shared amongst + :class:`~fsspec.implementations.zip.ZipFileSystem`, + :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and + :class:`~fsspec.implementations.tar.TarFileSystem`. + """ + + def __str__(self): + return f"" + + __repr__ = __str__ + + def ukey(self, path): + return tokenize(path, self.fo, self.protocol) + + def _all_dirnames(self, paths): + """Returns *all* directory names for each path in paths, including intermediate + ones. + + Parameters + ---------- + paths: Iterable of path strings + """ + if len(paths) == 0: + return set() + + dirnames = {self._parent(path) for path in paths} - {self.root_marker} + return dirnames | self._all_dirnames(dirnames) + + def info(self, path, **kwargs): + self._get_dirs() + path = self._strip_protocol(path) + if path in {"", "/"} and self.dir_cache: + return {"name": "", "type": "directory", "size": 0} + if path in self.dir_cache: + return self.dir_cache[path] + elif path + "/" in self.dir_cache: + return self.dir_cache[path + "/"] + else: + raise FileNotFoundError(path) + + def ls(self, path, detail=True, **kwargs): + self._get_dirs() + paths = {} + for p, f in self.dir_cache.items(): + p = p.rstrip("/") + if "/" in p: + root = p.rsplit("/", 1)[0] + else: + root = "" + if root == path.rstrip("/"): + paths[p] = f + elif all( + (a == b) + for a, b in zip(path.split("/"), [""] + p.strip("/").split("/")) + ): + # root directory entry + ppath = p.rstrip("/").split("/", 1)[0] + if ppath not in paths: + out = {"name": ppath, "size": 0, "type": "directory"} + paths[ppath] = out + if detail: + out = sorted(paths.values(), key=lambda _: _["name"]) + return out + else: + return sorted(paths) diff --git a/MLPY/Lib/site-packages/fsspec/asyn.py b/MLPY/Lib/site-packages/fsspec/asyn.py new file mode 100644 index 0000000000000000000000000000000000000000..a040efc4bba984d27b132e961f8245c1ec11721d --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/asyn.py @@ -0,0 +1,1096 @@ +import asyncio +import asyncio.events +import functools +import inspect +import io +import numbers +import os +import re +import threading +from contextlib import contextmanager +from glob import has_magic +from typing import TYPE_CHECKING, Iterable + +from .callbacks import DEFAULT_CALLBACK +from .exceptions import FSTimeoutError +from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep +from .spec import AbstractBufferedFile, AbstractFileSystem +from .utils import glob_translate, is_exception, other_paths + +private = re.compile("_[^_]") +iothread = [None] # dedicated fsspec IO thread +loop = [None] # global event loop for any non-async instance +_lock = None # global lock placeholder +get_running_loop = asyncio.get_running_loop + + +def get_lock(): + """Allocate or return a threading lock. + + The lock is allocated on first use to allow setting one lock per forked process. + """ + global _lock + if not _lock: + _lock = threading.Lock() + return _lock + + +def reset_lock(): + """Reset the global lock. + + This should be called only on the init of a forked process to reset the lock to + None, enabling the new forked process to get a new lock. + """ + global _lock + + iothread[0] = None + loop[0] = None + _lock = None + + +async def _runner(event, coro, result, timeout=None): + timeout = timeout if timeout else None # convert 0 or 0.0 to None + if timeout is not None: + coro = asyncio.wait_for(coro, timeout=timeout) + try: + result[0] = await coro + except Exception as ex: + result[0] = ex + finally: + event.set() + + +def sync(loop, func, *args, timeout=None, **kwargs): + """ + Make loop run coroutine until it returns. Runs in other thread + + Examples + -------- + >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, + timeout=timeout, **kwargs) + """ + timeout = timeout if timeout else None # convert 0 or 0.0 to None + # NB: if the loop is not running *yet*, it is OK to submit work + # and we will wait for it + if loop is None or loop.is_closed(): + raise RuntimeError("Loop is not running") + try: + loop0 = asyncio.events.get_running_loop() + if loop0 is loop: + raise NotImplementedError("Calling sync() from within a running loop") + except NotImplementedError: + raise + except RuntimeError: + pass + coro = func(*args, **kwargs) + result = [None] + event = threading.Event() + asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop) + while True: + # this loops allows thread to get interrupted + if event.wait(1): + break + if timeout is not None: + timeout -= 1 + if timeout < 0: + raise FSTimeoutError + + return_result = result[0] + if isinstance(return_result, asyncio.TimeoutError): + # suppress asyncio.TimeoutError, raise FSTimeoutError + raise FSTimeoutError from return_result + elif isinstance(return_result, BaseException): + raise return_result + else: + return return_result + + +def sync_wrapper(func, obj=None): + """Given a function, make so can be called in blocking contexts + + Leave obj=None if defining within a class. Pass the instance if attaching + as an attribute of the instance. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + self = obj or args[0] + return sync(self.loop, func, *args, **kwargs) + + return wrapper + + +@contextmanager +def _selector_policy(): + original_policy = asyncio.get_event_loop_policy() + try: + if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + yield + finally: + asyncio.set_event_loop_policy(original_policy) + + +def get_loop(): + """Create or return the default fsspec IO loop + + The loop will be running on a separate thread. + """ + if loop[0] is None: + with get_lock(): + # repeat the check just in case the loop got filled between the + # previous two calls from another thread + if loop[0] is None: + with _selector_policy(): + loop[0] = asyncio.new_event_loop() + th = threading.Thread(target=loop[0].run_forever, name="fsspecIO") + th.daemon = True + th.start() + iothread[0] = th + return loop[0] + + +if TYPE_CHECKING: + import resource + + ResourceError = resource.error +else: + try: + import resource + except ImportError: + resource = None + ResourceError = OSError + else: + ResourceError = getattr(resource, "error", OSError) + +_DEFAULT_BATCH_SIZE = 128 +_NOFILES_DEFAULT_BATCH_SIZE = 1280 + + +def _get_batch_size(nofiles=False): + from fsspec.config import conf + + if nofiles: + if "nofiles_gather_batch_size" in conf: + return conf["nofiles_gather_batch_size"] + else: + if "gather_batch_size" in conf: + return conf["gather_batch_size"] + if nofiles: + return _NOFILES_DEFAULT_BATCH_SIZE + if resource is None: + return _DEFAULT_BATCH_SIZE + + try: + soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE) + except (ImportError, ValueError, ResourceError): + return _DEFAULT_BATCH_SIZE + + if soft_limit == resource.RLIM_INFINITY: + return -1 + else: + return soft_limit // 8 + + +def running_async() -> bool: + """Being executed by an event loop?""" + try: + asyncio.get_running_loop() + return True + except RuntimeError: + return False + + +async def _run_coros_in_chunks( + coros, + batch_size=None, + callback=DEFAULT_CALLBACK, + timeout=None, + return_exceptions=False, + nofiles=False, +): + """Run the given coroutines in chunks. + + Parameters + ---------- + coros: list of coroutines to run + batch_size: int or None + Number of coroutines to submit/wait on simultaneously. + If -1, then it will not be any throttling. If + None, it will be inferred from _get_batch_size() + callback: fsspec.callbacks.Callback instance + Gets a relative_update when each coroutine completes + timeout: number or None + If given, each coroutine times out after this time. Note that, since + there are multiple batches, the total run time of this function will in + general be longer + return_exceptions: bool + Same meaning as in asyncio.gather + nofiles: bool + If inferring the batch_size, does this operation involve local files? + If yes, you normally expect smaller batches. + """ + + if batch_size is None: + batch_size = _get_batch_size(nofiles=nofiles) + + if batch_size == -1: + batch_size = len(coros) + + assert batch_size > 0 + + async def _run_coro(coro, i): + try: + return await asyncio.wait_for(coro, timeout=timeout), i + except Exception as e: + if not return_exceptions: + raise + return e, i + finally: + callback.relative_update(1) + + i = 0 + n = len(coros) + results = [None] * n + pending = set() + + while pending or i < n: + while len(pending) < batch_size and i < n: + pending.add(asyncio.ensure_future(_run_coro(coros[i], i))) + i += 1 + + if not pending: + break + + done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + while done: + result, k = await done.pop() + results[k] = result + + return results + + +# these methods should be implemented as async by any async-able backend +async_methods = [ + "_ls", + "_cat_file", + "_get_file", + "_put_file", + "_rm_file", + "_cp_file", + "_pipe_file", + "_expand_path", + "_info", + "_isfile", + "_isdir", + "_exists", + "_walk", + "_glob", + "_find", + "_du", + "_size", + "_mkdir", + "_makedirs", +] + + +class AsyncFileSystem(AbstractFileSystem): + """Async file operations, default implementations + + Passes bulk operations to asyncio.gather for concurrent operation. + + Implementations that have concurrent batch operations and/or async methods + should inherit from this class instead of AbstractFileSystem. Docstrings are + copied from the un-underscored method in AbstractFileSystem, if not given. + """ + + # note that methods do not have docstring here; they will be copied + # for _* methods and inferred for overridden methods. + + async_impl = True + mirror_sync_methods = True + disable_throttling = False + + def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs): + self.asynchronous = asynchronous + self._pid = os.getpid() + if not asynchronous: + self._loop = loop or get_loop() + else: + self._loop = None + self.batch_size = batch_size + super().__init__(*args, **kwargs) + + @property + def loop(self): + if self._pid != os.getpid(): + raise RuntimeError("This class is not fork-safe") + return self._loop + + async def _rm_file(self, path, **kwargs): + raise NotImplementedError + + async def _rm(self, path, recursive=False, batch_size=None, **kwargs): + # TODO: implement on_error + batch_size = batch_size or self.batch_size + path = await self._expand_path(path, recursive=recursive) + return await _run_coros_in_chunks( + [self._rm_file(p, **kwargs) for p in reversed(path)], + batch_size=batch_size, + nofiles=True, + ) + + async def _cp_file(self, path1, path2, **kwargs): + raise NotImplementedError + + async def _copy( + self, + path1, + path2, + recursive=False, + on_error=None, + maxdepth=None, + batch_size=None, + **kwargs, + ): + if on_error is None and recursive: + on_error = "ignore" + elif on_error is None: + on_error = "raise" + + if isinstance(path1, list) and isinstance(path2, list): + # No need to expand paths when both source and destination + # are provided as lists + paths1 = path1 + paths2 = path2 + else: + source_is_str = isinstance(path1, str) + paths1 = await self._expand_path( + path1, maxdepth=maxdepth, recursive=recursive + ) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + paths1 = [ + p for p in paths1 if not (trailing_sep(p) or await self._isdir(p)) + ] + if not paths1: + return + + source_is_file = len(paths1) == 1 + dest_is_dir = isinstance(path2, str) and ( + trailing_sep(path2) or await self._isdir(path2) + ) + + exists = source_is_str and ( + (has_magic(path1) and source_is_file) + or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1)) + ) + paths2 = other_paths( + paths1, + path2, + exists=exists, + flatten=not source_is_str, + ) + + batch_size = batch_size or self.batch_size + coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)] + result = await _run_coros_in_chunks( + coros, batch_size=batch_size, return_exceptions=True, nofiles=True + ) + + for ex in filter(is_exception, result): + if on_error == "ignore" and isinstance(ex, FileNotFoundError): + continue + raise ex + + async def _pipe_file(self, path, value, **kwargs): + raise NotImplementedError + + async def _pipe(self, path, value=None, batch_size=None, **kwargs): + if isinstance(path, str): + path = {path: value} + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._pipe_file(k, v, **kwargs) for k, v in path.items()], + batch_size=batch_size, + nofiles=True, + ) + + async def _process_limits(self, url, start, end): + """Helper for "Range"-based _cat_file""" + size = None + suff = False + if start is not None and start < 0: + # if start is negative and end None, end is the "suffix length" + if end is None: + end = -start + start = "" + suff = True + else: + size = size or (await self._info(url))["size"] + start = size + start + elif start is None: + start = 0 + if not suff: + if end is not None and end < 0: + if start is not None: + size = size or (await self._info(url))["size"] + end = size + end + elif end is None: + end = "" + if isinstance(end, numbers.Integral): + end -= 1 # bytes range is inclusive + return f"bytes={start}-{end}" + + async def _cat_file(self, path, start=None, end=None, **kwargs): + raise NotImplementedError + + async def _cat( + self, path, recursive=False, on_error="raise", batch_size=None, **kwargs + ): + paths = await self._expand_path(path, recursive=recursive) + coros = [self._cat_file(path, **kwargs) for path in paths] + batch_size = batch_size or self.batch_size + out = await _run_coros_in_chunks( + coros, batch_size=batch_size, nofiles=True, return_exceptions=True + ) + if on_error == "raise": + ex = next(filter(is_exception, out), False) + if ex: + raise ex + if ( + len(paths) > 1 + or isinstance(path, list) + or paths[0] != self._strip_protocol(path) + ): + return { + k: v + for k, v in zip(paths, out) + if on_error != "omit" or not is_exception(v) + } + else: + return out[0] + + async def _cat_ranges( + self, + paths, + starts, + ends, + max_gap=None, + batch_size=None, + on_error="return", + **kwargs, + ): + """Get the contents of byte ranges from one or more files + + Parameters + ---------- + paths: list + A list of of filepaths on this filesystems + starts, ends: int or list + Bytes limits of the read. If using a single int, the same value will be + used to read all the specified files. + """ + # TODO: on_error + if max_gap is not None: + # use utils.merge_offset_ranges + raise NotImplementedError + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, Iterable): + starts = [starts] * len(paths) + if not isinstance(ends, Iterable): + ends = [ends] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + coros = [ + self._cat_file(p, start=s, end=e, **kwargs) + for p, s, e in zip(paths, starts, ends) + ] + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + coros, batch_size=batch_size, nofiles=True, return_exceptions=True + ) + + async def _put_file(self, lpath, rpath, **kwargs): + raise NotImplementedError + + async def _put( + self, + lpath, + rpath, + recursive=False, + callback=DEFAULT_CALLBACK, + batch_size=None, + maxdepth=None, + **kwargs, + ): + """Copy file(s) from local. + + Copies a specific file or tree of files (if recursive=True). If rpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. + + The put_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + source_is_str = isinstance(lpath, str) + if source_is_str: + lpath = make_path_posix(lpath) + fs = LocalFileSystem() + lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))] + if not lpaths: + return + + source_is_file = len(lpaths) == 1 + dest_is_dir = isinstance(rpath, str) and ( + trailing_sep(rpath) or await self._isdir(rpath) + ) + + rpath = self._strip_protocol(rpath) + exists = source_is_str and ( + (has_magic(lpath) and source_is_file) + or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath)) + ) + rpaths = other_paths( + lpaths, + rpath, + exists=exists, + flatten=not source_is_str, + ) + + is_dir = {l: os.path.isdir(l) for l in lpaths} + rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]] + file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]] + + await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs]) + batch_size = batch_size or self.batch_size + + coros = [] + callback.set_size(len(file_pairs)) + for lfile, rfile in file_pairs: + put_file = callback.branch_coro(self._put_file) + coros.append(put_file(lfile, rfile, **kwargs)) + + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _get_file(self, rpath, lpath, **kwargs): + raise NotImplementedError + + async def _get( + self, + rpath, + lpath, + recursive=False, + callback=DEFAULT_CALLBACK, + maxdepth=None, + **kwargs, + ): + """Copy file(s) to local. + + Copies a specific file or tree of files (if recursive=True). If lpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. Can submit a list of paths, which may be glob-patterns + and will be expanded. + + The get_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + source_is_str = isinstance(rpath, str) + # First check for rpath trailing slash as _strip_protocol removes it. + source_not_trailing_sep = source_is_str and not trailing_sep(rpath) + rpath = self._strip_protocol(rpath) + rpaths = await self._expand_path( + rpath, recursive=recursive, maxdepth=maxdepth + ) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + rpaths = [ + p for p in rpaths if not (trailing_sep(p) or await self._isdir(p)) + ] + if not rpaths: + return + + lpath = make_path_posix(lpath) + source_is_file = len(rpaths) == 1 + dest_is_dir = isinstance(lpath, str) and ( + trailing_sep(lpath) or LocalFileSystem().isdir(lpath) + ) + + exists = source_is_str and ( + (has_magic(rpath) and source_is_file) + or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep) + ) + lpaths = other_paths( + rpaths, + lpath, + exists=exists, + flatten=not source_is_str, + ) + + [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths] + batch_size = kwargs.pop("batch_size", self.batch_size) + + coros = [] + callback.set_size(len(lpaths)) + for lpath, rpath in zip(lpaths, rpaths): + get_file = callback.branch_coro(self._get_file) + coros.append(get_file(rpath, lpath, **kwargs)) + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _isfile(self, path): + try: + return (await self._info(path))["type"] == "file" + except: # noqa: E722 + return False + + async def _isdir(self, path): + try: + return (await self._info(path))["type"] == "directory" + except OSError: + return False + + async def _size(self, path): + return (await self._info(path)).get("size", None) + + async def _sizes(self, paths, batch_size=None): + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._size(p) for p in paths], batch_size=batch_size + ) + + async def _exists(self, path, **kwargs): + try: + await self._info(path, **kwargs) + return True + except FileNotFoundError: + return False + + async def _info(self, path, **kwargs): + raise NotImplementedError + + async def _ls(self, path, detail=True, **kwargs): + raise NotImplementedError + + async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + path = self._strip_protocol(path) + full_dirs = {} + dirs = {} + files = {} + + detail = kwargs.pop("detail", False) + try: + listing = await self._ls(path, detail=True, **kwargs) + except (FileNotFoundError, OSError) as e: + if on_error == "raise": + raise + elif callable(on_error): + on_error(e) + if detail: + yield path, {}, {} + else: + yield path, [], [] + return + + for info in listing: + # each info name must be at least [path]/part , but here + # we check also for names like [path]/part/ + pathname = info["name"].rstrip("/") + name = pathname.rsplit("/", 1)[-1] + if info["type"] == "directory" and pathname != path: + # do not include "self" path + full_dirs[name] = pathname + dirs[name] = info + elif pathname == path: + # file-like with same name as give path + files[""] = info + else: + files[name] = info + + if detail: + yield path, dirs, files + else: + yield path, list(dirs), list(files) + + if maxdepth is not None: + maxdepth -= 1 + if maxdepth < 1: + return + + for d in dirs: + async for _ in self._walk( + full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs + ): + yield _ + + async def _glob(self, path, maxdepth=None, **kwargs): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + import re + + seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,) + ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash + path = self._strip_protocol(path) + append_slash_to_dirname = ends_with_sep or path.endswith( + tuple(sep + "**" for sep in seps) + ) + idx_star = path.find("*") if path.find("*") >= 0 else len(path) + idx_qmark = path.find("?") if path.find("?") >= 0 else len(path) + idx_brace = path.find("[") if path.find("[") >= 0 else len(path) + + min_idx = min(idx_star, idx_qmark, idx_brace) + + detail = kwargs.pop("detail", False) + + if not has_magic(path): + if await self._exists(path, **kwargs): + if not detail: + return [path] + else: + return {path: await self._info(path, **kwargs)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:min_idx]: + min_idx = path[:min_idx].rindex("/") + root = path[: min_idx + 1] + depth = path[min_idx + 1 :].count("/") + 1 + else: + root = "" + depth = path[min_idx + 1 :].count("/") + 1 + + if "**" in path: + if maxdepth is not None: + idx_double_stars = path.find("**") + depth_double_stars = path[idx_double_stars:].count("/") + 1 + depth = depth - depth_double_stars + maxdepth + else: + depth = None + + allpaths = await self._find( + root, maxdepth=depth, withdirs=True, detail=True, **kwargs + ) + + pattern = glob_translate(path + ("/" if ends_with_sep else "")) + pattern = re.compile(pattern) + + out = { + p: info + for p, info in sorted(allpaths.items()) + if pattern.match( + ( + p + "/" + if append_slash_to_dirname and info["type"] == "directory" + else p + ) + ) + } + + if detail: + return out + else: + return list(out) + + async def _du(self, path, total=True, maxdepth=None, **kwargs): + sizes = {} + # async for? + for f in await self._find(path, maxdepth=maxdepth, **kwargs): + info = await self._info(f) + sizes[info["name"]] = info["size"] + if total: + return sum(sizes.values()) + else: + return sizes + + async def _find(self, path, maxdepth=None, withdirs=False, **kwargs): + path = self._strip_protocol(path) + out = {} + detail = kwargs.pop("detail", False) + + # Add the root directory if withdirs is requested + # This is needed for posix glob compliance + if withdirs and path != "" and await self._isdir(path): + out[path] = await self._info(path) + + # async for? + async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs): + if withdirs: + files.update(dirs) + out.update({info["name"]: info for name, info in files.items()}) + if not out and (await self._isfile(path)): + # walk works on directories, but find should also return [path] + # when path happens to be a file + out[path] = {} + names = sorted(out) + if not detail: + return names + else: + return {name: out[name] for name in names} + + async def _expand_path(self, path, recursive=False, maxdepth=None): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + if isinstance(path, str): + out = await self._expand_path([path], recursive, maxdepth) + else: + out = set() + path = [self._strip_protocol(p) for p in path] + for p in path: # can gather here + if has_magic(p): + bit = set(await self._glob(p, maxdepth=maxdepth)) + out |= bit + if recursive: + # glob call above expanded one depth so if maxdepth is defined + # then decrement it in expand_path call below. If it is zero + # after decrementing then avoid expand_path call. + if maxdepth is not None and maxdepth <= 1: + continue + out |= set( + await self._expand_path( + list(bit), + recursive=recursive, + maxdepth=maxdepth - 1 if maxdepth is not None else None, + ) + ) + continue + elif recursive: + rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True)) + out |= rec + if p not in out and (recursive is False or (await self._exists(p))): + # should only check once, for the root + out.add(p) + if not out: + raise FileNotFoundError(path) + return sorted(out) + + async def _mkdir(self, path, create_parents=True, **kwargs): + pass # not necessary to implement, may not have directories + + async def _makedirs(self, path, exist_ok=False): + pass # not necessary to implement, may not have directories + + async def open_async(self, path, mode="rb", **kwargs): + if "b" not in mode or kwargs.get("compression"): + raise ValueError + raise NotImplementedError + + +def mirror_sync_methods(obj): + """Populate sync and async methods for obj + + For each method will create a sync version if the name refers to an async method + (coroutine) and there is no override in the child class; will create an async + method for the corresponding sync method if there is no implementation. + + Uses the methods specified in + - async_methods: the set that an implementation is expected to provide + - default_async_methods: that can be derived from their sync version in + AbstractFileSystem + - AsyncFileSystem: async-specific default coroutines + """ + from fsspec import AbstractFileSystem + + for method in async_methods + dir(AsyncFileSystem): + if not method.startswith("_"): + continue + smethod = method[1:] + if private.match(method): + isco = inspect.iscoroutinefunction(getattr(obj, method, None)) + unsync = getattr(getattr(obj, smethod, False), "__func__", None) + is_default = unsync is getattr(AbstractFileSystem, smethod, "") + if isco and is_default: + mth = sync_wrapper(getattr(obj, method), obj=obj) + setattr(obj, smethod, mth) + if not mth.__doc__: + mth.__doc__ = getattr( + getattr(AbstractFileSystem, smethod, None), "__doc__", "" + ) + + +class FSSpecCoroutineCancel(Exception): + pass + + +def _dump_running_tasks( + printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False +): + import traceback + + tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()] + if printout: + [task.print_stack() for task in tasks] + out = [ + { + "locals": task._coro.cr_frame.f_locals, + "file": task._coro.cr_frame.f_code.co_filename, + "firstline": task._coro.cr_frame.f_code.co_firstlineno, + "linelo": task._coro.cr_frame.f_lineno, + "stack": traceback.format_stack(task._coro.cr_frame), + "task": task if with_task else None, + } + for task in tasks + ] + if cancel: + for t in tasks: + cbs = t._callbacks + t.cancel() + asyncio.futures.Future.set_exception(t, exc) + asyncio.futures.Future.cancel(t) + [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures + try: + t._coro.throw(exc) # exits coro, unless explicitly handled + except exc: + pass + return out + + +class AbstractAsyncStreamedFile(AbstractBufferedFile): + # no read buffering, and always auto-commit + # TODO: readahead might still be useful here, but needs async version + + async def read(self, length=-1): + """ + Return data from cache, or fetch pieces as necessary + + Parameters + ---------- + length: int (-1) + Number of bytes to read; if <0, all remaining bytes. + """ + length = -1 if length is None else int(length) + if self.mode != "rb": + raise ValueError("File not in read mode") + if length < 0: + length = self.size - self.loc + if self.closed: + raise ValueError("I/O operation on closed file.") + if length == 0: + # don't even bother calling fetch + return b"" + out = await self._fetch_range(self.loc, self.loc + length) + self.loc += len(out) + return out + + async def write(self, data): + """ + Write data to buffer. + + Buffer only sent on flush() or if buffer is greater than + or equal to blocksize. + + Parameters + ---------- + data: bytes + Set of bytes to be written. + """ + if self.mode not in {"wb", "ab"}: + raise ValueError("File not in write mode") + if self.closed: + raise ValueError("I/O operation on closed file.") + if self.forced: + raise ValueError("This file has been force-flushed, can only close") + out = self.buffer.write(data) + self.loc += out + if self.buffer.tell() >= self.blocksize: + await self.flush() + return out + + async def close(self): + """Close file + + Finalizes writes, discards cache + """ + if getattr(self, "_unclosable", False): + return + if self.closed: + return + if self.mode == "rb": + self.cache = None + else: + if not self.forced: + await self.flush(force=True) + + if self.fs is not None: + self.fs.invalidate_cache(self.path) + self.fs.invalidate_cache(self.fs._parent(self.path)) + + self.closed = True + + async def flush(self, force=False): + if self.closed: + raise ValueError("Flush on closed file") + if force and self.forced: + raise ValueError("Force flush cannot be called more than once") + if force: + self.forced = True + + if self.mode not in {"wb", "ab"}: + # no-op to flush on read-mode + return + + if not force and self.buffer.tell() < self.blocksize: + # Defer write on small block + return + + if self.offset is None: + # Initialize a multipart upload + self.offset = 0 + try: + await self._initiate_upload() + except: # noqa: E722 + self.closed = True + raise + + if await self._upload_chunk(final=force) is not False: + self.offset += self.buffer.seek(0, 2) + self.buffer = io.BytesIO() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def _fetch_range(self, start, end): + raise NotImplementedError + + async def _initiate_upload(self): + pass + + async def _upload_chunk(self, final=False): + raise NotImplementedError diff --git a/MLPY/Lib/site-packages/fsspec/caching.py b/MLPY/Lib/site-packages/fsspec/caching.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f7a1c9f121b2944e6b63117fa98f1357e22d7f --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/caching.py @@ -0,0 +1,951 @@ +from __future__ import annotations + +import collections +import functools +import logging +import math +import os +import threading +import warnings +from concurrent.futures import Future, ThreadPoolExecutor +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Generic, + NamedTuple, + Optional, + OrderedDict, + TypeVar, +) + +if TYPE_CHECKING: + import mmap + + from typing_extensions import ParamSpec + + P = ParamSpec("P") +else: + P = TypeVar("P") + +T = TypeVar("T") + + +logger = logging.getLogger("fsspec") + +Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes + + +class BaseCache: + """Pass-though cache: doesn't keep anything, calls every time + + Acts as base class for other cachers + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + """ + + name: ClassVar[str] = "none" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + self.blocksize = blocksize + self.nblocks = 0 + self.fetcher = fetcher + self.size = size + self.hit_count = 0 + self.miss_count = 0 + # the bytes that we actually requested + self.total_requested_bytes = 0 + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + if start is None: + start = 0 + if stop is None: + stop = self.size + if start >= self.size or start >= stop: + return b"" + return self.fetcher(start, stop) + + def _reset_stats(self) -> None: + """Reset hit and miss counts for a more ganular report e.g. by file.""" + self.hit_count = 0 + self.miss_count = 0 + self.total_requested_bytes = 0 + + def _log_stats(self) -> str: + """Return a formatted string of the cache statistics.""" + if self.hit_count == 0 and self.miss_count == 0: + # a cache that does nothing, this is for logs only + return "" + return " , %s: %d hits, %d misses, %d total requested bytes" % ( + self.name, + self.hit_count, + self.miss_count, + self.total_requested_bytes, + ) + + def __repr__(self) -> str: + # TODO: use rich for better formatting + return f""" + <{self.__class__.__name__}: + block size : {self.blocksize} + block count : {self.nblocks} + file size : {self.size} + cache hits : {self.hit_count} + cache misses: {self.miss_count} + total requested bytes: {self.total_requested_bytes}> + """ + + +class MMapCache(BaseCache): + """memory-mapped sparse file cache + + Opens temporary file, which is filled blocks-wise when data is requested. + Ensure there is enough disc space in the temporary location. + + This cache method might only work on posix + """ + + name = "mmap" + + def __init__( + self, + blocksize: int, + fetcher: Fetcher, + size: int, + location: str | None = None, + blocks: set[int] | None = None, + ) -> None: + super().__init__(blocksize, fetcher, size) + self.blocks = set() if blocks is None else blocks + self.location = location + self.cache = self._makefile() + + def _makefile(self) -> mmap.mmap | bytearray: + import mmap + import tempfile + + if self.size == 0: + return bytearray() + + # posix version + if self.location is None or not os.path.exists(self.location): + if self.location is None: + fd = tempfile.TemporaryFile() + self.blocks = set() + else: + fd = open(self.location, "wb+") + fd.seek(self.size - 1) + fd.write(b"1") + fd.flush() + else: + fd = open(self.location, "r+b") + + return mmap.mmap(fd.fileno(), self.size) + + def _fetch(self, start: int | None, end: int | None) -> bytes: + logger.debug(f"MMap cache fetching {start}-{end}") + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + start_block = start // self.blocksize + end_block = end // self.blocksize + need = [i for i in range(start_block, end_block + 1) if i not in self.blocks] + hits = [i for i in range(start_block, end_block + 1) if i in self.blocks] + self.miss_count += len(need) + self.hit_count += len(hits) + while need: + # TODO: not a for loop so we can consolidate blocks later to + # make fewer fetch calls; this could be parallel + i = need.pop(0) + + sstart = i * self.blocksize + send = min(sstart + self.blocksize, self.size) + self.total_requested_bytes += send - sstart + logger.debug(f"MMap get block #{i} ({sstart}-{send})") + self.cache[sstart:send] = self.fetcher(sstart, send) + self.blocks.add(i) + + return self.cache[start:end] + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__.copy() + # Remove the unpicklable entries. + del state["cache"] + return state + + def __setstate__(self, state: dict[str, Any]) -> None: + # Restore instance attributes + self.__dict__.update(state) + self.cache = self._makefile() + + +class ReadAheadCache(BaseCache): + """Cache which reads only when we get beyond a block of data + + This is a much simpler version of BytesCache, and does not attempt to + fill holes in the cache or keep fragments alive. It is best suited to + many small reads in a sequential order (e.g., reading lines from a file). + """ + + name = "readahead" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start = 0 + self.end = 0 + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None or end > self.size: + end = self.size + if start >= self.size or start >= end: + return b"" + l = end - start + if start >= self.start and end <= self.end: + # cache hit + self.hit_count += 1 + return self.cache[start - self.start : end - self.start] + elif self.start <= start < self.end: + # partial hit + self.miss_count += 1 + part = self.cache[start - self.start :] + l -= len(part) + start = self.end + else: + # miss + self.miss_count += 1 + part = b"" + end = min(self.size, end + self.blocksize) + self.total_requested_bytes += end - start + self.cache = self.fetcher(start, end) # new block replaces old + self.start = start + self.end = self.start + len(self.cache) + return part + self.cache[:l] + + +class FirstChunkCache(BaseCache): + """Caches the first block of a file only + + This may be useful for file types where the metadata is stored in the header, + but is randomly accessed. + """ + + name = "first" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + if blocksize > size: + # this will buffer the whole thing + blocksize = size + super().__init__(blocksize, fetcher, size) + self.cache: bytes | None = None + + def _fetch(self, start: int | None, end: int | None) -> bytes: + start = start or 0 + if start > self.size: + logger.debug("FirstChunkCache: requested start > file size") + return b"" + + end = min(end, self.size) + + if start < self.blocksize: + if self.cache is None: + self.miss_count += 1 + if end > self.blocksize: + self.total_requested_bytes += end + data = self.fetcher(0, end) + self.cache = data[: self.blocksize] + return data[start:] + self.cache = self.fetcher(0, self.blocksize) + self.total_requested_bytes += self.blocksize + part = self.cache[start:end] + if end > self.blocksize: + self.total_requested_bytes += end - self.blocksize + part += self.fetcher(self.blocksize, end) + self.hit_count += 1 + return part + else: + self.miss_count += 1 + self.total_requested_bytes += end - start + return self.fetcher(start, end) + + +class BlockCache(BaseCache): + """ + Cache holding memory as a set of blocks. + + Requests are only ever made ``blocksize`` at a time, and are + stored in an LRU cache. The least recently accessed block is + discarded when more than ``maxblocks`` are stored. + + Parameters + ---------- + blocksize : int + The number of bytes to store in each block. + Requests are only ever made for ``blocksize``, so this + should balance the overhead of making a request against + the granularity of the blocks. + fetcher : Callable + size : int + The total size of the file being cached. + maxblocks : int + The maximum number of blocks to cache for. The maximum memory + use for this cache is then ``blocksize * maxblocks``. + """ + + name = "blockcache" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32 + ) -> None: + super().__init__(blocksize, fetcher, size) + self.nblocks = math.ceil(size / blocksize) + self.maxblocks = maxblocks + self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) + + def cache_info(self): + """ + The statistics on the block cache. + + Returns + ------- + NamedTuple + Returned directly from the LRU Cache used internally. + """ + return self._fetch_block_cached.cache_info() + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__ + del state["_fetch_block_cached"] + return state + + def __setstate__(self, state: dict[str, Any]) -> None: + self.__dict__.update(state) + self._fetch_block_cached = functools.lru_cache(state["maxblocks"])( + self._fetch_block + ) + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + + # byte position -> block numbers + start_block_number = start // self.blocksize + end_block_number = end // self.blocksize + + # these are cached, so safe to do multiple calls for the same start and end. + for block_number in range(start_block_number, end_block_number + 1): + self._fetch_block_cached(block_number) + + return self._read_cache( + start, + end, + start_block_number=start_block_number, + end_block_number=end_block_number, + ) + + def _fetch_block(self, block_number: int) -> bytes: + """ + Fetch the block of data for `block_number`. + """ + if block_number > self.nblocks: + raise ValueError( + f"'block_number={block_number}' is greater than " + f"the number of blocks ({self.nblocks})" + ) + + start = block_number * self.blocksize + end = start + self.blocksize + self.total_requested_bytes += end - start + self.miss_count += 1 + logger.info("BlockCache fetching block %d", block_number) + block_contents = super()._fetch(start, end) + return block_contents + + def _read_cache( + self, start: int, end: int, start_block_number: int, end_block_number: int + ) -> bytes: + """ + Read from our block cache. + + Parameters + ---------- + start, end : int + The start and end byte positions. + start_block_number, end_block_number : int + The start and end block numbers. + """ + start_pos = start % self.blocksize + end_pos = end % self.blocksize + + self.hit_count += 1 + if start_block_number == end_block_number: + block: bytes = self._fetch_block_cached(start_block_number) + return block[start_pos:end_pos] + + else: + # read from the initial + out = [self._fetch_block_cached(start_block_number)[start_pos:]] + + # intermediate blocks + # Note: it'd be nice to combine these into one big request. However + # that doesn't play nicely with our LRU cache. + out.extend( + map( + self._fetch_block_cached, + range(start_block_number + 1, end_block_number), + ) + ) + + # final block + out.append(self._fetch_block_cached(end_block_number)[:end_pos]) + + return b"".join(out) + + +class BytesCache(BaseCache): + """Cache which holds data in a in-memory bytes object + + Implements read-ahead by the block size, for semi-random reads progressing + through the file. + + Parameters + ---------- + trim: bool + As we read more data, whether to discard the start of the buffer when + we are more than a blocksize ahead of it. + """ + + name: ClassVar[str] = "bytes" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True + ) -> None: + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start: int | None = None + self.end: int | None = None + self.trim = trim + + def _fetch(self, start: int | None, end: int | None) -> bytes: + # TODO: only set start/end after fetch, in case it fails? + # is this where retry logic might go? + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + if ( + self.start is not None + and start >= self.start + and self.end is not None + and end < self.end + ): + # cache hit: we have all the required data + offset = start - self.start + self.hit_count += 1 + return self.cache[offset : offset + end - start] + + if self.blocksize: + bend = min(self.size, end + self.blocksize) + else: + bend = end + + if bend == start or start > self.size: + return b"" + + if (self.start is None or start < self.start) and ( + self.end is None or end > self.end + ): + # First read, or extending both before and after + self.total_requested_bytes += bend - start + self.miss_count += 1 + self.cache = self.fetcher(start, bend) + self.start = start + else: + assert self.start is not None + assert self.end is not None + self.miss_count += 1 + + if start < self.start: + if self.end is None or self.end - end > self.blocksize: + self.total_requested_bytes += bend - start + self.cache = self.fetcher(start, bend) + self.start = start + else: + self.total_requested_bytes += self.start - start + new = self.fetcher(start, self.start) + self.start = start + self.cache = new + self.cache + elif self.end is not None and bend > self.end: + if self.end > self.size: + pass + elif end - self.end > self.blocksize: + self.total_requested_bytes += bend - start + self.cache = self.fetcher(start, bend) + self.start = start + else: + self.total_requested_bytes += bend - self.end + new = self.fetcher(self.end, bend) + self.cache = self.cache + new + + self.end = self.start + len(self.cache) + offset = start - self.start + out = self.cache[offset : offset + end - start] + if self.trim: + num = (self.end - self.start) // (self.blocksize + 1) + if num > 1: + self.start += self.blocksize * num + self.cache = self.cache[self.blocksize * num :] + return out + + def __len__(self) -> int: + return len(self.cache) + + +class AllBytes(BaseCache): + """Cache entire contents of the file""" + + name: ClassVar[str] = "all" + + def __init__( + self, + blocksize: int | None = None, + fetcher: Fetcher | None = None, + size: int | None = None, + data: bytes | None = None, + ) -> None: + super().__init__(blocksize, fetcher, size) # type: ignore[arg-type] + if data is None: + self.miss_count += 1 + self.total_requested_bytes += self.size + data = self.fetcher(0, self.size) + self.data = data + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + self.hit_count += 1 + return self.data[start:stop] + + +class KnownPartsOfAFile(BaseCache): + """ + Cache holding known file parts. + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + data: dict + A dictionary mapping explicit `(start, stop)` file-offset tuples + with known bytes. + strict: bool, default True + Whether to fetch reads that go beyond a known byte-range boundary. + If `False`, any read that ends outside a known part will be zero + padded. Note that zero padding will not be used for reads that + begin outside a known byte-range. + """ + + name: ClassVar[str] = "parts" + + def __init__( + self, + blocksize: int, + fetcher: Fetcher, + size: int, + data: Optional[dict[tuple[int, int], bytes]] = None, + strict: bool = True, + **_: Any, + ): + super().__init__(blocksize, fetcher, size) + self.strict = strict + + # simple consolidation of contiguous blocks + if data: + old_offsets = sorted(data.keys()) + offsets = [old_offsets[0]] + blocks = [data.pop(old_offsets[0])] + for start, stop in old_offsets[1:]: + start0, stop0 = offsets[-1] + if start == stop0: + offsets[-1] = (start0, stop) + blocks[-1] += data.pop((start, stop)) + else: + offsets.append((start, stop)) + blocks.append(data.pop((start, stop))) + + self.data = dict(zip(offsets, blocks)) + else: + self.data = {} + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + if start is None: + start = 0 + if stop is None: + stop = self.size + + out = b"" + for (loc0, loc1), data in self.data.items(): + # If self.strict=False, use zero-padded data + # for reads beyond the end of a "known" buffer + if loc0 <= start < loc1: + off = start - loc0 + out = data[off : off + stop - start] + if not self.strict or loc0 <= stop <= loc1: + # The request is within a known range, or + # it begins within a known range, and we + # are allowed to pad reads beyond the + # buffer with zero + out += b"\x00" * (stop - start - len(out)) + self.hit_count += 1 + return out + else: + # The request ends outside a known range, + # and we are being "strict" about reads + # beyond the buffer + start = loc1 + break + + # We only get here if there is a request outside the + # known parts of the file. In an ideal world, this + # should never happen + if self.fetcher is None: + # We cannot fetch the data, so raise an error + raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ") + # We can fetch the data, but should warn the user + # that this may be slow + warnings.warn( + f"Read is outside the known file parts: {(start, stop)}. " + f"IO/caching performance may be poor!" + ) + logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}") + self.total_requested_bytes += stop - start + self.miss_count += 1 + return out + super()._fetch(start, stop) + + +class UpdatableLRU(Generic[P, T]): + """ + Custom implementation of LRU cache that allows updating keys + + Used by BackgroudBlockCache + """ + + class CacheInfo(NamedTuple): + hits: int + misses: int + maxsize: int + currsize: int + + def __init__(self, func: Callable[P, T], max_size: int = 128) -> None: + self._cache: OrderedDict[Any, T] = collections.OrderedDict() + self._func = func + self._max_size = max_size + self._hits = 0 + self._misses = 0 + self._lock = threading.Lock() + + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: + if kwargs: + raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}") + with self._lock: + if args in self._cache: + self._cache.move_to_end(args) + self._hits += 1 + return self._cache[args] + + result = self._func(*args, **kwargs) + + with self._lock: + self._cache[args] = result + self._misses += 1 + if len(self._cache) > self._max_size: + self._cache.popitem(last=False) + + return result + + def is_key_cached(self, *args: Any) -> bool: + with self._lock: + return args in self._cache + + def add_key(self, result: T, *args: Any) -> None: + with self._lock: + self._cache[args] = result + if len(self._cache) > self._max_size: + self._cache.popitem(last=False) + + def cache_info(self) -> UpdatableLRU.CacheInfo: + with self._lock: + return self.CacheInfo( + maxsize=self._max_size, + currsize=len(self._cache), + hits=self._hits, + misses=self._misses, + ) + + +class BackgroundBlockCache(BaseCache): + """ + Cache holding memory as a set of blocks with pre-loading of + the next block in the background. + + Requests are only ever made ``blocksize`` at a time, and are + stored in an LRU cache. The least recently accessed block is + discarded when more than ``maxblocks`` are stored. If the + next block is not in cache, it is loaded in a separate thread + in non-blocking way. + + Parameters + ---------- + blocksize : int + The number of bytes to store in each block. + Requests are only ever made for ``blocksize``, so this + should balance the overhead of making a request against + the granularity of the blocks. + fetcher : Callable + size : int + The total size of the file being cached. + maxblocks : int + The maximum number of blocks to cache for. The maximum memory + use for this cache is then ``blocksize * maxblocks``. + """ + + name: ClassVar[str] = "background" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32 + ) -> None: + super().__init__(blocksize, fetcher, size) + self.nblocks = math.ceil(size / blocksize) + self.maxblocks = maxblocks + self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks) + + self._thread_executor = ThreadPoolExecutor(max_workers=1) + self._fetch_future_block_number: int | None = None + self._fetch_future: Future[bytes] | None = None + self._fetch_future_lock = threading.Lock() + + def cache_info(self) -> UpdatableLRU.CacheInfo: + """ + The statistics on the block cache. + + Returns + ------- + NamedTuple + Returned directly from the LRU Cache used internally. + """ + return self._fetch_block_cached.cache_info() + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__ + del state["_fetch_block_cached"] + del state["_thread_executor"] + del state["_fetch_future_block_number"] + del state["_fetch_future"] + del state["_fetch_future_lock"] + return state + + def __setstate__(self, state) -> None: + self.__dict__.update(state) + self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"]) + self._thread_executor = ThreadPoolExecutor(max_workers=1) + self._fetch_future_block_number = None + self._fetch_future = None + self._fetch_future_lock = threading.Lock() + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + + # byte position -> block numbers + start_block_number = start // self.blocksize + end_block_number = end // self.blocksize + + fetch_future_block_number = None + fetch_future = None + with self._fetch_future_lock: + # Background thread is running. Check we we can or must join it. + if self._fetch_future is not None: + assert self._fetch_future_block_number is not None + if self._fetch_future.done(): + logger.info("BlockCache joined background fetch without waiting.") + self._fetch_block_cached.add_key( + self._fetch_future.result(), self._fetch_future_block_number + ) + # Cleanup the fetch variables. Done with fetching the block. + self._fetch_future_block_number = None + self._fetch_future = None + else: + # Must join if we need the block for the current fetch + must_join = bool( + start_block_number + <= self._fetch_future_block_number + <= end_block_number + ) + if must_join: + # Copy to the local variables to release lock + # before waiting for result + fetch_future_block_number = self._fetch_future_block_number + fetch_future = self._fetch_future + + # Cleanup the fetch variables. Have a local copy. + self._fetch_future_block_number = None + self._fetch_future = None + + # Need to wait for the future for the current read + if fetch_future is not None: + logger.info("BlockCache waiting for background fetch.") + # Wait until result and put it in cache + self._fetch_block_cached.add_key( + fetch_future.result(), fetch_future_block_number + ) + + # these are cached, so safe to do multiple calls for the same start and end. + for block_number in range(start_block_number, end_block_number + 1): + self._fetch_block_cached(block_number) + + # fetch next block in the background if nothing is running in the background, + # the block is within file and it is not already cached + end_block_plus_1 = end_block_number + 1 + with self._fetch_future_lock: + if ( + self._fetch_future is None + and end_block_plus_1 <= self.nblocks + and not self._fetch_block_cached.is_key_cached(end_block_plus_1) + ): + self._fetch_future_block_number = end_block_plus_1 + self._fetch_future = self._thread_executor.submit( + self._fetch_block, end_block_plus_1, "async" + ) + + return self._read_cache( + start, + end, + start_block_number=start_block_number, + end_block_number=end_block_number, + ) + + def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes: + """ + Fetch the block of data for `block_number`. + """ + if block_number > self.nblocks: + raise ValueError( + f"'block_number={block_number}' is greater than " + f"the number of blocks ({self.nblocks})" + ) + + start = block_number * self.blocksize + end = start + self.blocksize + logger.info("BlockCache fetching block (%s) %d", log_info, block_number) + self.total_requested_bytes += end - start + self.miss_count += 1 + block_contents = super()._fetch(start, end) + return block_contents + + def _read_cache( + self, start: int, end: int, start_block_number: int, end_block_number: int + ) -> bytes: + """ + Read from our block cache. + + Parameters + ---------- + start, end : int + The start and end byte positions. + start_block_number, end_block_number : int + The start and end block numbers. + """ + start_pos = start % self.blocksize + end_pos = end % self.blocksize + + # kind of pointless to count this as a hit, but it is + self.hit_count += 1 + + if start_block_number == end_block_number: + block = self._fetch_block_cached(start_block_number) + return block[start_pos:end_pos] + + else: + # read from the initial + out = [self._fetch_block_cached(start_block_number)[start_pos:]] + + # intermediate blocks + # Note: it'd be nice to combine these into one big request. However + # that doesn't play nicely with our LRU cache. + out.extend( + map( + self._fetch_block_cached, + range(start_block_number + 1, end_block_number), + ) + ) + + # final block + out.append(self._fetch_block_cached(end_block_number)[:end_pos]) + + return b"".join(out) + + +caches: dict[str | None, type[BaseCache]] = { + # one custom case + None: BaseCache, +} + + +def register_cache(cls: type[BaseCache], clobber: bool = False) -> None: + """'Register' cache implementation. + + Parameters + ---------- + clobber: bool, optional + If set to True (default is False) - allow to overwrite existing + entry. + + Raises + ------ + ValueError + """ + name = cls.name + if not clobber and name in caches: + raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}") + caches[name] = cls + + +for c in ( + BaseCache, + MMapCache, + BytesCache, + ReadAheadCache, + BlockCache, + FirstChunkCache, + AllBytes, + KnownPartsOfAFile, + BackgroundBlockCache, +): + register_cache(c) diff --git a/MLPY/Lib/site-packages/fsspec/callbacks.py b/MLPY/Lib/site-packages/fsspec/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca99ca6ac3cd69b28bcd1550f6550e8e648c5fe --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/callbacks.py @@ -0,0 +1,324 @@ +from functools import wraps + + +class Callback: + """ + Base class and interface for callback mechanism + + This class can be used directly for monitoring file transfers by + providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument, + below), or subclassed for more specialised behaviour. + + Parameters + ---------- + size: int (optional) + Nominal quantity for the value that corresponds to a complete + transfer, e.g., total number of tiles or total number of + bytes + value: int (0) + Starting internal counter value + hooks: dict or None + A dict of named functions to be called on each update. The signature + of these must be ``f(size, value, **kwargs)`` + """ + + def __init__(self, size=None, value=0, hooks=None, **kwargs): + self.size = size + self.value = value + self.hooks = hooks or {} + self.kw = kwargs + + def __enter__(self): + return self + + def __exit__(self, *exc_args): + self.close() + + def close(self): + """Close callback.""" + + def branched(self, path_1, path_2, **kwargs): + """ + Return callback for child transfers + + If this callback is operating at a higher level, e.g., put, which may + trigger transfers that can also be monitored. The function returns a callback + that has to be passed to the child method, e.g., put_file, + as `callback=` argument. + + The implementation uses `callback.branch` for compatibility. + When implementing callbacks, it is recommended to override this function instead + of `branch` and avoid calling `super().branched(...)`. + + Prefer using this function over `branch`. + + Parameters + ---------- + path_1: str + Child's source path + path_2: str + Child's destination path + **kwargs: + Arbitrary keyword arguments + + Returns + ------- + callback: Callback + A callback instance to be passed to the child method + """ + self.branch(path_1, path_2, kwargs) + # mutate kwargs so that we can force the caller to pass "callback=" explicitly + return kwargs.pop("callback", DEFAULT_CALLBACK) + + def branch_coro(self, fn): + """ + Wraps a coroutine, and pass a new child callback to it. + """ + + @wraps(fn) + async def func(path1, path2: str, **kwargs): + with self.branched(path1, path2, **kwargs) as child: + return await fn(path1, path2, callback=child, **kwargs) + + return func + + def set_size(self, size): + """ + Set the internal maximum size attribute + + Usually called if not initially set at instantiation. Note that this + triggers a ``call()``. + + Parameters + ---------- + size: int + """ + self.size = size + self.call() + + def absolute_update(self, value): + """ + Set the internal value state + + Triggers ``call()`` + + Parameters + ---------- + value: int + """ + self.value = value + self.call() + + def relative_update(self, inc=1): + """ + Delta increment the internal counter + + Triggers ``call()`` + + Parameters + ---------- + inc: int + """ + self.value += inc + self.call() + + def call(self, hook_name=None, **kwargs): + """ + Execute hook(s) with current state + + Each function is passed the internal size and current value + + Parameters + ---------- + hook_name: str or None + If given, execute on this hook + kwargs: passed on to (all) hook(s) + """ + if not self.hooks: + return + kw = self.kw.copy() + kw.update(kwargs) + if hook_name: + if hook_name not in self.hooks: + return + return self.hooks[hook_name](self.size, self.value, **kw) + for hook in self.hooks.values() or []: + hook(self.size, self.value, **kw) + + def wrap(self, iterable): + """ + Wrap an iterable to call ``relative_update`` on each iterations + + Parameters + ---------- + iterable: Iterable + The iterable that is being wrapped + """ + for item in iterable: + self.relative_update() + yield item + + def branch(self, path_1, path_2, kwargs): + """ + Set callbacks for child transfers + + If this callback is operating at a higher level, e.g., put, which may + trigger transfers that can also be monitored. The passed kwargs are + to be *mutated* to add ``callback=``, if this class supports branching + to children. + + Parameters + ---------- + path_1: str + Child's source path + path_2: str + Child's destination path + kwargs: dict + arguments passed to child method, e.g., put_file. + + Returns + ------- + + """ + return None + + def no_op(self, *_, **__): + pass + + def __getattr__(self, item): + """ + If undefined methods are called on this class, nothing happens + """ + return self.no_op + + @classmethod + def as_callback(cls, maybe_callback=None): + """Transform callback=... into Callback instance + + For the special value of ``None``, return the global instance of + ``NoOpCallback``. This is an alternative to including + ``callback=DEFAULT_CALLBACK`` directly in a method signature. + """ + if maybe_callback is None: + return DEFAULT_CALLBACK + return maybe_callback + + +class NoOpCallback(Callback): + """ + This implementation of Callback does exactly nothing + """ + + def call(self, *args, **kwargs): + return None + + +class DotPrinterCallback(Callback): + """ + Simple example Callback implementation + + Almost identical to Callback with a hook that prints a char; here we + demonstrate how the outer layer may print "#" and the inner layer "." + """ + + def __init__(self, chr_to_print="#", **kwargs): + self.chr = chr_to_print + super().__init__(**kwargs) + + def branch(self, path_1, path_2, kwargs): + """Mutate kwargs to add new instance with different print char""" + kwargs["callback"] = DotPrinterCallback(".") + + def call(self, **kwargs): + """Just outputs a character""" + print(self.chr, end="") + + +class TqdmCallback(Callback): + """ + A callback to display a progress bar using tqdm + + Parameters + ---------- + tqdm_kwargs : dict, (optional) + Any argument accepted by the tqdm constructor. + See the `tqdm doc `_. + Will be forwarded to `tqdm_cls`. + tqdm_cls: (optional) + subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`. + + Examples + -------- + >>> import fsspec + >>> from fsspec.callbacks import TqdmCallback + >>> fs = fsspec.filesystem("memory") + >>> path2distant_data = "/your-path" + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(), + ) + + You can forward args to tqdm using the ``tqdm_kwargs`` parameter. + + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}), + ) + + You can also customize the progress bar by passing a subclass of `tqdm`. + + .. code-block:: python + + class TqdmFormat(tqdm): + '''Provides a `total_time` format parameter''' + @property + def format_dict(self): + d = super().format_dict + total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) + d.update(total_time=self.format_interval(total_time) + " in total") + return d + + >>> with TqdmCallback( + tqdm_kwargs={ + "desc": "desc", + "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}", + }, + tqdm_cls=TqdmFormat, + ) as callback: + fs.upload(".", path2distant_data, recursive=True, callback=callback) + """ + + def __init__(self, tqdm_kwargs=None, *args, **kwargs): + try: + from tqdm import tqdm + + except ImportError as exce: + raise ImportError( + "Using TqdmCallback requires tqdm to be installed" + ) from exce + + self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm) + self._tqdm_kwargs = tqdm_kwargs or {} + self.tqdm = None + super().__init__(*args, **kwargs) + + def call(self, *args, **kwargs): + if self.tqdm is None: + self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs) + self.tqdm.total = self.size + self.tqdm.update(self.value - self.tqdm.n) + + def close(self): + if self.tqdm is not None: + self.tqdm.close() + self.tqdm = None + + def __del__(self): + return self.close() + + +DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback() diff --git a/MLPY/Lib/site-packages/fsspec/compression.py b/MLPY/Lib/site-packages/fsspec/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..fc519c24532fdcd3f6f1b3fc645d38e1e1dde1d7 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/compression.py @@ -0,0 +1,175 @@ +"""Helper functions for a standard streaming compression API""" + +from zipfile import ZipFile + +import fsspec.utils +from fsspec.spec import AbstractBufferedFile + + +def noop_file(file, mode, **kwargs): + return file + + +# TODO: files should also be available as contexts +# should be functions of the form func(infile, mode=, **kwargs) -> file-like +compr = {None: noop_file} + + +def register_compression(name, callback, extensions, force=False): + """Register an "inferable" file compression type. + + Registers transparent file compression type for use with fsspec.open. + Compression can be specified by name in open, or "infer"-ed for any files + ending with the given extensions. + + Args: + name: (str) The compression type name. Eg. "gzip". + callback: A callable of form (infile, mode, **kwargs) -> file-like. + Accepts an input file-like object, the target mode and kwargs. + Returns a wrapped file-like object. + extensions: (str, Iterable[str]) A file extension, or list of file + extensions for which to infer this compression scheme. Eg. "gz". + force: (bool) Force re-registration of compression type or extensions. + + Raises: + ValueError: If name or extensions already registered, and not force. + + """ + if isinstance(extensions, str): + extensions = [extensions] + + # Validate registration + if name in compr and not force: + raise ValueError(f"Duplicate compression registration: {name}") + + for ext in extensions: + if ext in fsspec.utils.compressions and not force: + raise ValueError(f"Duplicate compression file extension: {ext} ({name})") + + compr[name] = callback + + for ext in extensions: + fsspec.utils.compressions[ext] = name + + +def unzip(infile, mode="rb", filename=None, **kwargs): + if "r" not in mode: + filename = filename or "file" + z = ZipFile(infile, mode="w", **kwargs) + fo = z.open(filename, mode="w") + fo.close = lambda closer=fo.close: closer() or z.close() + return fo + z = ZipFile(infile) + if filename is None: + filename = z.namelist()[0] + return z.open(filename, mode="r", **kwargs) + + +register_compression("zip", unzip, "zip") + +try: + from bz2 import BZ2File +except ImportError: + pass +else: + register_compression("bz2", BZ2File, "bz2") + +try: # pragma: no cover + from isal import igzip + + def isal(infile, mode="rb", **kwargs): + return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs) + + register_compression("gzip", isal, "gz") +except ImportError: + from gzip import GzipFile + + register_compression( + "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz" + ) + +try: + from lzma import LZMAFile + + register_compression("lzma", LZMAFile, "lzma") + register_compression("xz", LZMAFile, "xz") +except ImportError: + pass + +try: + import lzmaffi + + register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True) + register_compression("xz", lzmaffi.LZMAFile, "xz", force=True) +except ImportError: + pass + + +class SnappyFile(AbstractBufferedFile): + def __init__(self, infile, mode, **kwargs): + import snappy + + super().__init__( + fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs + ) + self.infile = infile + if "r" in mode: + self.codec = snappy.StreamDecompressor() + else: + self.codec = snappy.StreamCompressor() + + def _upload_chunk(self, final=False): + self.buffer.seek(0) + out = self.codec.add_chunk(self.buffer.read()) + self.infile.write(out) + return True + + def seek(self, loc, whence=0): + raise NotImplementedError("SnappyFile is not seekable") + + def seekable(self): + return False + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + data = self.infile.read(end - start) + return self.codec.decompress(data) + + +try: + import snappy + + snappy.compress(b"") + # Snappy may use the .sz file extension, but this is not part of the + # standard implementation. + register_compression("snappy", SnappyFile, []) + +except (ImportError, NameError, AttributeError): + pass + +try: + import lz4.frame + + register_compression("lz4", lz4.frame.open, "lz4") +except ImportError: + pass + +try: + import zstandard as zstd + + def zstandard_file(infile, mode="rb"): + if "r" in mode: + cctx = zstd.ZstdDecompressor() + return cctx.stream_reader(infile) + else: + cctx = zstd.ZstdCompressor(level=10) + return cctx.stream_writer(infile) + + register_compression("zstd", zstandard_file, "zst") +except ImportError: + pass + + +def available_compressions(): + """Return a list of the implemented compressions.""" + return list(compr) diff --git a/MLPY/Lib/site-packages/fsspec/config.py b/MLPY/Lib/site-packages/fsspec/config.py new file mode 100644 index 0000000000000000000000000000000000000000..76d9af14aaf7df47c4551c169f27b05abf9c269e --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/config.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import configparser +import json +import os +import warnings +from typing import Any + +conf: dict[str, dict[str, Any]] = {} +default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec") +conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir) + + +def set_conf_env(conf_dict, envdict=os.environ): + """Set config values from environment variables + + Looks for variables of the form ``FSSPEC_`` and + ``FSSPEC__``. For ``FSSPEC_`` the value is parsed + as a json dictionary and used to ``update`` the config of the + corresponding protocol. For ``FSSPEC__`` there is no + attempt to convert the string value, but the kwarg keys will be lower-cased. + + The ``FSSPEC__`` variables are applied after the + ``FSSPEC_`` ones. + + Parameters + ---------- + conf_dict : dict(str, dict) + This dict will be mutated + envdict : dict-like(str, str) + Source for the values - usually the real environment + """ + kwarg_keys = [] + for key in envdict: + if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_": + if key.count("_") > 1: + kwarg_keys.append(key) + continue + try: + value = json.loads(envdict[key]) + except json.decoder.JSONDecodeError as ex: + warnings.warn( + f"Ignoring environment variable {key} due to a parse failure: {ex}" + ) + else: + if isinstance(value, dict): + _, proto = key.split("_", 1) + conf_dict.setdefault(proto.lower(), {}).update(value) + else: + warnings.warn( + f"Ignoring environment variable {key} due to not being a dict:" + f" {type(value)}" + ) + elif key.startswith("FSSPEC"): + warnings.warn( + f"Ignoring environment variable {key} due to having an unexpected name" + ) + + for key in kwarg_keys: + _, proto, kwarg = key.split("_", 2) + conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key] + + +def set_conf_files(cdir, conf_dict): + """Set config values from files + + Scans for INI and JSON files in the given dictionary, and uses their + contents to set the config. In case of repeated values, later values + win. + + In the case of INI files, all values are strings, and these will not + be converted. + + Parameters + ---------- + cdir : str + Directory to search + conf_dict : dict(str, dict) + This dict will be mutated + """ + if not os.path.isdir(cdir): + return + allfiles = sorted(os.listdir(cdir)) + for fn in allfiles: + if fn.endswith(".ini"): + ini = configparser.ConfigParser() + ini.read(os.path.join(cdir, fn)) + for key in ini: + if key == "DEFAULT": + continue + conf_dict.setdefault(key, {}).update(dict(ini[key])) + if fn.endswith(".json"): + with open(os.path.join(cdir, fn)) as f: + js = json.load(f) + for key in js: + conf_dict.setdefault(key, {}).update(dict(js[key])) + + +def apply_config(cls, kwargs, conf_dict=None): + """Supply default values for kwargs when instantiating class + + Augments the passed kwargs, by finding entries in the config dict + which match the classes ``.protocol`` attribute (one or more str) + + Parameters + ---------- + cls : file system implementation + kwargs : dict + conf_dict : dict of dict + Typically this is the global configuration + + Returns + ------- + dict : the modified set of kwargs + """ + if conf_dict is None: + conf_dict = conf + protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol] + kw = {} + for proto in protos: + # default kwargs from the current state of the config + if proto in conf_dict: + kw.update(conf_dict[proto]) + # explicit kwargs always win + kw.update(**kwargs) + kwargs = kw + return kwargs + + +set_conf_files(conf_dir, conf) +set_conf_env(conf) diff --git a/MLPY/Lib/site-packages/fsspec/conftest.py b/MLPY/Lib/site-packages/fsspec/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6874a42c4895c3c7b973dc5d63fd4488a4e60b44 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/conftest.py @@ -0,0 +1,55 @@ +import os +import shutil +import subprocess +import sys +import time + +import pytest + +import fsspec +from fsspec.implementations.cached import CachingFileSystem + + +@pytest.fixture() +def m(): + """ + Fixture providing a memory filesystem. + """ + m = fsspec.filesystem("memory") + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + try: + yield m + finally: + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + + +@pytest.fixture +def ftp_writable(tmpdir): + """ + Fixture providing a writable FTP filesystem. + """ + pytest.importorskip("pyftpdlib") + from fsspec.implementations.ftp import FTPFileSystem + + FTPFileSystem.clear_instance_cache() # remove lingering connections + CachingFileSystem.clear_instance_cache() + d = str(tmpdir) + with open(os.path.join(d, "out"), "wb") as f: + f.write(b"hello" * 10000) + P = subprocess.Popen( + [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"] + ) + try: + time.sleep(1) + yield "localhost", 2121, "user", "pass" + finally: + P.terminate() + P.wait() + try: + shutil.rmtree(tmpdir) + except Exception: + pass diff --git a/MLPY/Lib/site-packages/fsspec/core.py b/MLPY/Lib/site-packages/fsspec/core.py new file mode 100644 index 0000000000000000000000000000000000000000..bd4f98d13ab69a7eb85dc60f8c9b2a977072d908 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/core.py @@ -0,0 +1,738 @@ +from __future__ import annotations + +import io +import logging +import os +import re +from glob import has_magic +from pathlib import Path + +# for backwards compat, we export cache things from here too +from fsspec.caching import ( # noqa: F401 + BaseCache, + BlockCache, + BytesCache, + MMapCache, + ReadAheadCache, + caches, +) +from fsspec.compression import compr +from fsspec.config import conf +from fsspec.registry import filesystem, get_filesystem_class +from fsspec.utils import ( + _unstrip_protocol, + build_name_function, + infer_compression, + stringify_path, +) + +logger = logging.getLogger("fsspec") + + +class OpenFile: + """ + File-like object to be used in a context + + Can layer (buffered) text-mode and compression over any file-system, which + are typically binary-only. + + These instances are safe to serialize, as the low-level file object + is not created until invoked using ``with``. + + Parameters + ---------- + fs: FileSystem + The file system to use for opening the file. Should be a subclass or duck-type + with ``fsspec.spec.AbstractFileSystem`` + path: str + Location to open + mode: str like 'rb', optional + Mode of the opened file + compression: str or None, optional + Compression to apply + encoding: str or None, optional + The encoding to use if opened in text mode. + errors: str or None, optional + How to handle encoding errors if opened in text mode. + newline: None or str + Passed to TextIOWrapper in text mode, how to handle line endings. + autoopen: bool + If True, calls open() immediately. Mostly used by pickle + pos: int + If given and autoopen is True, seek to this location immediately + """ + + def __init__( + self, + fs, + path, + mode="rb", + compression=None, + encoding=None, + errors=None, + newline=None, + ): + self.fs = fs + self.path = path + self.mode = mode + self.compression = get_compression(path, compression) + self.encoding = encoding + self.errors = errors + self.newline = newline + self.fobjects = [] + + def __reduce__(self): + return ( + OpenFile, + ( + self.fs, + self.path, + self.mode, + self.compression, + self.encoding, + self.errors, + self.newline, + ), + ) + + def __repr__(self): + return f"" + + def __enter__(self): + mode = self.mode.replace("t", "").replace("b", "") + "b" + + try: + f = self.fs.open(self.path, mode=mode) + except FileNotFoundError as e: + if has_magic(self.path): + raise FileNotFoundError( + "%s not found. The URL contains glob characters: you maybe needed\n" + "to pass expand=True in fsspec.open() or the storage_options of \n" + "your library. You can also set the config value 'open_expand'\n" + "before import, or fsspec.core.DEFAULT_EXPAND at runtime, to True.", + self.path, + ) from e + raise + + self.fobjects = [f] + + if self.compression is not None: + compress = compr[self.compression] + f = compress(f, mode=mode[0]) + self.fobjects.append(f) + + if "b" not in self.mode: + # assume, for example, that 'r' is equivalent to 'rt' as in builtin + f = PickleableTextIOWrapper( + f, encoding=self.encoding, errors=self.errors, newline=self.newline + ) + self.fobjects.append(f) + + return self.fobjects[-1] + + def __exit__(self, *args): + self.close() + + @property + def full_name(self): + return _unstrip_protocol(self.path, self.fs) + + def open(self): + """Materialise this as a real open file without context + + The OpenFile object should be explicitly closed to avoid enclosed file + instances persisting. You must, therefore, keep a reference to the OpenFile + during the life of the file-like it generates. + """ + return self.__enter__() + + def close(self): + """Close all encapsulated file objects""" + for f in reversed(self.fobjects): + if "r" not in self.mode and not f.closed: + f.flush() + f.close() + self.fobjects.clear() + + +class OpenFiles(list): + """List of OpenFile instances + + Can be used in a single context, which opens and closes all of the + contained files. Normal list access to get the elements works as + normal. + + A special case is made for caching filesystems - the files will + be down/uploaded together at the start or end of the context, and + this may happen concurrently, if the target filesystem supports it. + """ + + def __init__(self, *args, mode="rb", fs=None): + self.mode = mode + self.fs = fs + self.files = [] + super().__init__(*args) + + def __enter__(self): + if self.fs is None: + raise ValueError("Context has already been used") + + fs = self.fs + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache download; or set up for upload + self.files = fs.open_many(self) + return self.files + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + return [s.__enter__() for s in self] + + def __exit__(self, *args): + fs = self.fs + [s.__exit__(*args) for s in self] + if "r" not in self.mode: + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache upload + fs.commit_many(self.files) + return + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + + def __getitem__(self, item): + out = super().__getitem__(item) + if isinstance(item, slice): + return OpenFiles(out, mode=self.mode, fs=self.fs) + return out + + def __repr__(self): + return f"" + + +def open_files( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + name_function=None, + num=1, + protocol=None, + newline=None, + auto_mkdir=True, + expand=True, + **kwargs, +): + """Given a path or paths, return a list of ``OpenFile`` objects. + + For writing, a str path must contain the "*" character, which will be filled + in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2. + + For either reading or writing, can instead provide explicit list of paths. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath(s). Prefix with a protocol like ``s3://`` + to read from alternative filesystems. To read from multiple files you + can pass a globstring or a list of paths, with the caveat that they + must all have the same protocol. + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + name_function: function or None + if opening a set of files for writing, those files do not yet exist, + so we need to generate their names by formatting the urlpath for + each sequence number + num: int [1] + if writing mode, number of files we expect to create (passed to + name+function) + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + auto_mkdir: bool (True) + If in write mode, this will ensure the target directory exists before + writing, by calling ``fs.mkdirs(exist_ok=True)``. + expand: bool + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> files = open_files('2015-*-*.csv') # doctest: +SKIP + >>> files = open_files( + ... 's3://bucket/2015-*-*.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + + Returns + ------- + An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can + be used as a single context + + Notes + ----- + For a full list of the available protocols and the implementations that + they map across to see the latest online documentation: + + - For implementations built into ``fsspec`` see + https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations + - For implementations in separate packages see + https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations + """ + fs, fs_token, paths = get_fs_token_paths( + urlpath, + mode, + num=num, + name_function=name_function, + storage_options=kwargs, + protocol=protocol, + expand=expand, + ) + if fs.protocol == "file": + fs.auto_mkdir = auto_mkdir + elif "r" not in mode and auto_mkdir: + parents = {fs._parent(path) for path in paths} + for parent in parents: + try: + fs.makedirs(parent, exist_ok=True) + except PermissionError: + pass + return OpenFiles( + [ + OpenFile( + fs, + path, + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + newline=newline, + ) + for path in paths + ], + mode=mode, + fs=fs, + ) + + +def _un_chain(path, kwargs): + x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word + bits = ( + [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")] + if "::" in path + else [path] + ) + # [[url, protocol, kwargs], ...] + out = [] + previous_bit = None + kwargs = kwargs.copy() + for bit in reversed(bits): + protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file" + cls = get_filesystem_class(protocol) + extra_kwargs = cls._get_kwargs_from_urls(bit) + kws = kwargs.pop(protocol, {}) + if bit is bits[0]: + kws.update(kwargs) + kw = dict(**extra_kwargs, **kws) + bit = cls._strip_protocol(bit) + if ( + protocol in {"blockcache", "filecache", "simplecache"} + and "target_protocol" not in kw + ): + bit = previous_bit + out.append((bit, protocol, kw)) + previous_bit = bit + out.reverse() + return out + + +def url_to_fs(url, **kwargs): + """ + Turn fully-qualified and potentially chained URL into filesystem instance + + Parameters + ---------- + url : str + The fsspec-compatible URL + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Returns + ------- + filesystem : FileSystem + The new filesystem discovered from ``url`` and created with + ``**kwargs``. + urlpath : str + The file-systems-specific URL for ``url``. + """ + url = stringify_path(url) + # non-FS arguments that appear in fsspec.open() + # inspect could keep this in sync with open()'s signature + known_kwargs = { + "compression", + "encoding", + "errors", + "expand", + "mode", + "name_function", + "newline", + "num", + } + kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs} + chain = _un_chain(url, kwargs) + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = protocol + inkwargs["fo"] = urls + urlpath, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + return fs, urlpath + + +DEFAULT_EXPAND = conf.get("open_expand", False) + + +def open( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + protocol=None, + newline=None, + expand=None, + **kwargs, +): + """Given a path or paths, return one ``OpenFile`` object. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath. Prefix with a protocol like ``s3://`` + to read from alternative filesystems. Should not include glob + character(s). + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + expand: bool or Nonw + Whether to regard file paths containing special glob characters as needing + expansion (finding the first match) or absolute. Setting False allows using + paths which do embed such characters. If None (default), this argument + takes its value from the DEFAULT_EXPAND module variable, which takes + its initial value from the "open_expand" config value at startup, which will + be False if not set. + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> openfile = open('2015-01-01.csv') # doctest: +SKIP + >>> openfile = open( + ... 's3://bucket/2015-01-01.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + >>> with openfile as f: + ... df = pd.read_csv(f) # doctest: +SKIP + ... + + Returns + ------- + ``OpenFile`` object. + + Notes + ----- + For a full list of the available protocols and the implementations that + they map across to see the latest online documentation: + + - For implementations built into ``fsspec`` see + https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations + - For implementations in separate packages see + https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations + """ + expand = DEFAULT_EXPAND if expand is None else expand + out = open_files( + urlpath=[urlpath], + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + protocol=protocol, + newline=newline, + expand=expand, + **kwargs, + ) + if not out: + raise FileNotFoundError(urlpath) + return out[0] + + +def open_local( + url: str | list[str] | Path | list[Path], + mode: str = "rb", + **storage_options: dict, +) -> str | list[str]: + """Open file(s) which can be resolved to local + + For files which either are local, or get downloaded upon open + (e.g., by file caching) + + Parameters + ---------- + url: str or list(str) + mode: str + Must be read mode + storage_options: + passed on to FS for or used by open_files (e.g., compression) + """ + if "r" not in mode: + raise ValueError("Can only ensure local files when reading") + of = open_files(url, mode=mode, **storage_options) + if not getattr(of[0].fs, "local_file", False): + raise ValueError( + "open_local can only be used on a filesystem which" + " has attribute local_file=True" + ) + with of as files: + paths = [f.name for f in files] + if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path): + return paths[0] + return paths + + +def get_compression(urlpath, compression): + if compression == "infer": + compression = infer_compression(urlpath) + if compression is not None and compression not in compr: + raise ValueError(f"Compression type {compression} not supported") + return compression + + +def split_protocol(urlpath): + """Return protocol, path pair""" + urlpath = stringify_path(urlpath) + if "://" in urlpath: + protocol, path = urlpath.split("://", 1) + if len(protocol) > 1: + # excludes Windows paths + return protocol, path + if urlpath.startswith("data:"): + return urlpath.split(":", 1) + return None, urlpath + + +def strip_protocol(urlpath): + """Return only path part of full URL, according to appropriate backend""" + protocol, _ = split_protocol(urlpath) + cls = get_filesystem_class(protocol) + return cls._strip_protocol(urlpath) + + +def expand_paths_if_needed(paths, mode, num, fs, name_function): + """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]`` + in them (read mode). + + :param paths: list of paths + mode: str + Mode in which to open files. + num: int + If opening in writing mode, number of files we expect to create. + fs: filesystem object + name_function: callable + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + :return: list of paths + """ + expanded_paths = [] + paths = list(paths) + + if "w" in mode: # read mode + if sum([1 for p in paths if "*" in p]) > 1: + raise ValueError( + "When writing data, only one filename mask can be specified." + ) + num = max(num, len(paths)) + + for curr_path in paths: + if "*" in curr_path: + # expand using name_function + expanded_paths.extend(_expand_paths(curr_path, name_function, num)) + else: + expanded_paths.append(curr_path) + # if we generated more paths that asked for, trim the list + if len(expanded_paths) > num: + expanded_paths = expanded_paths[:num] + + else: # read mode + for curr_path in paths: + if has_magic(curr_path): + # expand using glob + expanded_paths.extend(fs.glob(curr_path)) + else: + expanded_paths.append(curr_path) + + return expanded_paths + + +def get_fs_token_paths( + urlpath, + mode="rb", + num=1, + name_function=None, + storage_options=None, + protocol=None, + expand=True, +): + """Filesystem, deterministic token, and paths from a urlpath and options. + + Parameters + ---------- + urlpath: string or iterable + Absolute or relative filepath, URL (may include protocols like + ``s3://``), or globstring pointing to data. + mode: str, optional + Mode in which to open files. + num: int, optional + If opening in writing mode, number of files we expect to create. + name_function: callable, optional + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + storage_options: dict, optional + Additional keywords to pass to the filesystem class. + protocol: str or None + To override the protocol specifier in the URL + expand: bool + Expand string paths for writing, assuming the path is a directory + """ + if isinstance(urlpath, (list, tuple, set)): + if not urlpath: + raise ValueError("empty urlpath sequence") + urlpath0 = stringify_path(list(urlpath)[0]) + else: + urlpath0 = stringify_path(urlpath) + storage_options = storage_options or {} + if protocol: + storage_options["protocol"] = protocol + chain = _un_chain(urlpath0, storage_options or {}) + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, nested_protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = nested_protocol + inkwargs["fo"] = urls + paths, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + if isinstance(urlpath, (list, tuple, set)): + pchains = [ + _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath + ] + if len({pc[1] for pc in pchains}) > 1: + raise ValueError("Protocol mismatch getting fs from %s", urlpath) + paths = [pc[0] for pc in pchains] + else: + paths = fs._strip_protocol(paths) + if isinstance(paths, (list, tuple, set)): + if expand: + paths = expand_paths_if_needed(paths, mode, num, fs, name_function) + elif not isinstance(paths, list): + paths = list(paths) + else: + if "w" in mode and expand: + paths = _expand_paths(paths, name_function, num) + elif "x" in mode and expand: + paths = _expand_paths(paths, name_function, num) + elif "*" in paths: + paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)] + else: + paths = [paths] + + return fs, fs._fs_token, paths + + +def _expand_paths(path, name_function, num): + if isinstance(path, str): + if path.count("*") > 1: + raise ValueError("Output path spec must contain exactly one '*'.") + elif "*" not in path: + path = os.path.join(path, "*.part") + + if name_function is None: + name_function = build_name_function(num - 1) + + paths = [path.replace("*", name_function(i)) for i in range(num)] + if paths != sorted(paths): + logger.warning( + "In order to preserve order between partitions" + " paths created with ``name_function`` should " + "sort to partition order" + ) + elif isinstance(path, (tuple, list)): + assert len(path) == num + paths = list(path) + else: + raise ValueError( + "Path should be either\n" + "1. A list of paths: ['foo.json', 'bar.json', ...]\n" + "2. A directory: 'foo/\n" + "3. A path with a '*' in it: 'foo.*.json'" + ) + return paths + + +class PickleableTextIOWrapper(io.TextIOWrapper): + """TextIOWrapper cannot be pickled. This solves it. + + Requires that ``buffer`` be pickleable, which all instances of + AbstractBufferedFile are. + """ + + def __init__( + self, + buffer, + encoding=None, + errors=None, + newline=None, + line_buffering=False, + write_through=False, + ): + self.args = buffer, encoding, errors, newline, line_buffering, write_through + super().__init__(*self.args) + + def __reduce__(self): + return PickleableTextIOWrapper, self.args diff --git a/MLPY/Lib/site-packages/fsspec/dircache.py b/MLPY/Lib/site-packages/fsspec/dircache.py new file mode 100644 index 0000000000000000000000000000000000000000..eca19566b135e5a7a4f6e7407d56411ec58bfe44 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/dircache.py @@ -0,0 +1,98 @@ +import time +from collections.abc import MutableMapping +from functools import lru_cache + + +class DirCache(MutableMapping): + """ + Caching of directory listings, in a structure like:: + + {"path0": [ + {"name": "path0/file0", + "size": 123, + "type": "file", + ... + }, + {"name": "path0/file1", + }, + ... + ], + "path1": [...] + } + + Parameters to this class control listing expiry or indeed turn + caching off + """ + + def __init__( + self, + use_listings_cache=True, + listings_expiry_time=None, + max_paths=None, + **kwargs, + ): + """ + + Parameters + ---------- + use_listings_cache: bool + If False, this cache never returns items, but always reports KeyError, + and setting items has no effect + listings_expiry_time: int or float (optional) + Time in seconds that a listing is considered valid. If None, + listings do not expire. + max_paths: int (optional) + The number of most recent listings that are considered valid; 'recent' + refers to when the entry was set. + """ + self._cache = {} + self._times = {} + if max_paths: + self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None)) + self.use_listings_cache = use_listings_cache + self.listings_expiry_time = listings_expiry_time + self.max_paths = max_paths + + def __getitem__(self, item): + if self.listings_expiry_time is not None: + if self._times.get(item, 0) - time.time() < -self.listings_expiry_time: + del self._cache[item] + if self.max_paths: + self._q(item) + return self._cache[item] # maybe raises KeyError + + def clear(self): + self._cache.clear() + + def __len__(self): + return len(self._cache) + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + def __setitem__(self, key, value): + if not self.use_listings_cache: + return + if self.max_paths: + self._q(key) + self._cache[key] = value + if self.listings_expiry_time is not None: + self._times[key] = time.time() + + def __delitem__(self, key): + del self._cache[key] + + def __iter__(self): + entries = list(self._cache) + + return (k for k in entries if k in self) + + def __reduce__(self): + return ( + DirCache, + (self.use_listings_cache, self.listings_expiry_time, self.max_paths), + ) diff --git a/MLPY/Lib/site-packages/fsspec/exceptions.py b/MLPY/Lib/site-packages/fsspec/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8905475f02655f4fc5863931d99ca9da55db78 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/exceptions.py @@ -0,0 +1,18 @@ +""" +fsspec user-defined exception classes +""" + +import asyncio + + +class BlocksizeMismatchError(ValueError): + """ + Raised when a cached file is opened with a different blocksize than it was + written with + """ + + +class FSTimeoutError(asyncio.TimeoutError): + """ + Raised when a fsspec function timed out occurs + """ diff --git a/MLPY/Lib/site-packages/fsspec/fuse.py b/MLPY/Lib/site-packages/fsspec/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca8c973c1993ac00016bb46e3ae7a3e44bc8d15 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/fuse.py @@ -0,0 +1,324 @@ +import argparse +import logging +import os +import stat +import threading +import time +from errno import EIO, ENOENT + +from fuse import FUSE, FuseOSError, LoggingMixIn, Operations + +from fsspec import __version__ +from fsspec.core import url_to_fs + +logger = logging.getLogger("fsspec.fuse") + + +class FUSEr(Operations): + def __init__(self, fs, path, ready_file=False): + self.fs = fs + self.cache = {} + self.root = path.rstrip("/") + "/" + self.counter = 0 + logger.info("Starting FUSE at %s", path) + self._ready_file = ready_file + + def getattr(self, path, fh=None): + logger.debug("getattr %s", path) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + return {"type": "file", "st_size": 5} + + path = "".join([self.root, path.lstrip("/")]).rstrip("/") + try: + info = self.fs.info(path) + except FileNotFoundError: + raise FuseOSError(ENOENT) + + data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)} + perm = info.get("mode", 0o777) + + if info["type"] != "file": + data["st_mode"] = stat.S_IFDIR | perm + data["st_size"] = 0 + data["st_blksize"] = 0 + else: + data["st_mode"] = stat.S_IFREG | perm + data["st_size"] = info["size"] + data["st_blksize"] = 5 * 2**20 + data["st_nlink"] = 1 + data["st_atime"] = info["atime"] if "atime" in info else time.time() + data["st_ctime"] = info["ctime"] if "ctime" in info else time.time() + data["st_mtime"] = info["mtime"] if "mtime" in info else time.time() + return data + + def readdir(self, path, fh): + logger.debug("readdir %s", path) + path = "".join([self.root, path.lstrip("/")]) + files = self.fs.ls(path, False) + files = [os.path.basename(f.rstrip("/")) for f in files] + return [".", ".."] + files + + def mkdir(self, path, mode): + path = "".join([self.root, path.lstrip("/")]) + self.fs.mkdir(path) + return 0 + + def rmdir(self, path): + path = "".join([self.root, path.lstrip("/")]) + self.fs.rmdir(path) + return 0 + + def read(self, path, size, offset, fh): + logger.debug("read %s", (path, size, offset)) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + # status indicator + return b"ready" + + f = self.cache[fh] + f.seek(offset) + out = f.read(size) + return out + + def write(self, path, data, offset, fh): + logger.debug("write %s", (path, offset)) + f = self.cache[fh] + f.seek(offset) + f.write(data) + return len(data) + + def create(self, path, flags, fi=None): + logger.debug("create %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + self.fs.touch(fn) # OS will want to get attributes immediately + f = self.fs.open(fn, "wb") + self.cache[self.counter] = f + self.counter += 1 + return self.counter - 1 + + def open(self, path, flags): + logger.debug("open %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + if flags % 2 == 0: + # read + mode = "rb" + else: + # write/create + mode = "wb" + self.cache[self.counter] = self.fs.open(fn, mode) + self.counter += 1 + return self.counter - 1 + + def truncate(self, path, length, fh=None): + fn = "".join([self.root, path.lstrip("/")]) + if length != 0: + raise NotImplementedError + # maybe should be no-op since open with write sets size to zero anyway + self.fs.touch(fn) + + def unlink(self, path): + fn = "".join([self.root, path.lstrip("/")]) + try: + self.fs.rm(fn, False) + except (OSError, FileNotFoundError): + raise FuseOSError(EIO) + + def release(self, path, fh): + try: + if fh in self.cache: + f = self.cache[fh] + f.close() + self.cache.pop(fh) + except Exception as e: + print(e) + return 0 + + def chmod(self, path, mode): + if hasattr(self.fs, "chmod"): + path = "".join([self.root, path.lstrip("/")]) + return self.fs.chmod(path, mode) + raise NotImplementedError + + +def run( + fs, + path, + mount_point, + foreground=True, + threads=False, + ready_file=False, + ops_class=FUSEr, +): + """Mount stuff in a local directory + + This uses fusepy to make it appear as if a given path on an fsspec + instance is in fact resident within the local file-system. + + This requires that fusepy by installed, and that FUSE be available on + the system (typically requiring a package to be installed with + apt, yum, brew, etc.). + + Parameters + ---------- + fs: file-system instance + From one of the compatible implementations + path: str + Location on that file-system to regard as the root directory to + mount. Note that you typically should include the terminating "/" + character. + mount_point: str + An empty directory on the local file-system where the contents of + the remote path will appear. + foreground: bool + Whether or not calling this function will block. Operation will + typically be more stable if True. + threads: bool + Whether or not to create threads when responding to file operations + within the mounter directory. Operation will typically be more + stable if False. + ready_file: bool + Whether the FUSE process is ready. The ``.fuse_ready`` file will + exist in the ``mount_point`` directory if True. Debugging purpose. + ops_class: FUSEr or Subclass of FUSEr + To override the default behavior of FUSEr. For Example, logging + to file. + + """ + func = lambda: FUSE( + ops_class(fs, path, ready_file=ready_file), + mount_point, + nothreads=not threads, + foreground=foreground, + ) + if not foreground: + th = threading.Thread(target=func) + th.daemon = True + th.start() + return th + else: # pragma: no cover + try: + func() + except KeyboardInterrupt: + pass + + +def main(args): + """Mount filesystem from chained URL to MOUNT_POINT. + + Examples: + + python3 -m fsspec.fuse memory /usr/share /tmp/mem + + python3 -m fsspec.fuse local /tmp/source /tmp/local \\ + -l /tmp/fsspecfuse.log + + You can also mount chained-URLs and use special settings: + + python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\ + / /tmp/zip \\ + -o 'filecache-cache_storage=/tmp/simplecache' + + You can specify the type of the setting by using `[int]` or `[bool]`, + (`true`, `yes`, `1` represents the Boolean value `True`): + + python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\ + /historic/packages/RPMS /tmp/ftp \\ + -o 'simplecache-cache_storage=/tmp/simplecache' \\ + -o 'simplecache-check_files=false[bool]' \\ + -o 'ftp-listings_expiry_time=60[int]' \\ + -o 'ftp-username=anonymous' \\ + -o 'ftp-password=xieyanbo' + """ + + class RawDescriptionArgumentParser(argparse.ArgumentParser): + def format_help(self): + usage = super().format_help() + parts = usage.split("\n\n") + parts[1] = self.description.rstrip() + return "\n\n".join(parts) + + parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__) + parser.add_argument("--version", action="version", version=__version__) + parser.add_argument("url", type=str, help="fs url") + parser.add_argument("source_path", type=str, help="source directory in fs") + parser.add_argument("mount_point", type=str, help="local directory") + parser.add_argument( + "-o", + "--option", + action="append", + help="Any options of protocol included in the chained URL", + ) + parser.add_argument( + "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')" + ) + parser.add_argument( + "-f", + "--foreground", + action="store_false", + help="Running in foreground or not (Default: False)", + ) + parser.add_argument( + "-t", + "--threads", + action="store_false", + help="Running with threads support (Default: False)", + ) + parser.add_argument( + "-r", + "--ready-file", + action="store_false", + help="The `.fuse_ready` file will exist after FUSE is ready. " + "(Debugging purpose, Default: False)", + ) + args = parser.parse_args(args) + + kwargs = {} + for item in args.option or []: + key, sep, value = item.partition("=") + if not sep: + parser.error(message=f"Wrong option: {item!r}") + val = value.lower() + if val.endswith("[int]"): + value = int(value[: -len("[int]")]) + elif val.endswith("[bool]"): + value = val[: -len("[bool]")] in ["1", "yes", "true"] + + if "-" in key: + fs_name, setting_name = key.split("-", 1) + if fs_name in kwargs: + kwargs[fs_name][setting_name] = value + else: + kwargs[fs_name] = {setting_name: value} + else: + kwargs[key] = value + + if args.log_file: + logging.basicConfig( + level=logging.DEBUG, + filename=args.log_file, + format="%(asctime)s %(message)s", + ) + + class LoggingFUSEr(FUSEr, LoggingMixIn): + pass + + fuser = LoggingFUSEr + else: + fuser = FUSEr + + fs, url_path = url_to_fs(args.url, **kwargs) + logger.debug("Mounting %s to %s", url_path, str(args.mount_point)) + run( + fs, + args.source_path, + args.mount_point, + foreground=args.foreground, + threads=args.threads, + ready_file=args.ready_file, + ops_class=fuser, + ) + + +if __name__ == "__main__": + import sys + + main(sys.argv[1:]) diff --git a/MLPY/Lib/site-packages/fsspec/generic.py b/MLPY/Lib/site-packages/fsspec/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..9bad0f048f737400b4cd919f7699d15fbfbf7c62 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/generic.py @@ -0,0 +1,411 @@ +from __future__ import annotations + +import inspect +import logging +import os +import shutil +import uuid +from typing import Optional + +from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper +from .callbacks import DEFAULT_CALLBACK +from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs + +_generic_fs = {} +logger = logging.getLogger("fsspec.generic") + + +def set_generic_fs(protocol, **storage_options): + _generic_fs[protocol] = filesystem(protocol, **storage_options) + + +default_method = "default" + + +def _resolve_fs(url, method=None, protocol=None, storage_options=None): + """Pick instance of backend FS""" + method = method or default_method + protocol = protocol or split_protocol(url)[0] + storage_options = storage_options or {} + if method == "default": + return filesystem(protocol) + if method == "generic": + return _generic_fs[protocol] + if method == "current": + cls = get_filesystem_class(protocol) + return cls.current() + if method == "options": + fs, _ = url_to_fs(url, **storage_options.get(protocol, {})) + return fs + raise ValueError(f"Unknown FS resolution method: {method}") + + +def rsync( + source, + destination, + delete_missing=False, + source_field="size", + dest_field="size", + update_cond="different", + inst_kwargs=None, + fs=None, + **kwargs, +): + """Sync files between two directory trees + + (experimental) + + Parameters + ---------- + source: str + Root of the directory tree to take files from. This must be a directory, but + do not include any terminating "/" character + destination: str + Root path to copy into. The contents of this location should be + identical to the contents of ``source`` when done. This will be made a + directory, and the terminal "/" should not be included. + delete_missing: bool + If there are paths in the destination that don't exist in the + source and this is True, delete them. Otherwise, leave them alone. + source_field: str | callable + If ``update_field`` is "different", this is the key in the info + of source files to consider for difference. Maybe a function of the + info dict. + dest_field: str | callable + If ``update_field`` is "different", this is the key in the info + of destination files to consider for difference. May be a function of + the info dict. + update_cond: "different"|"always"|"never" + If "always", every file is copied, regardless of whether it exists in + the destination. If "never", files that exist in the destination are + not copied again. If "different" (default), only copy if the info + fields given by ``source_field`` and ``dest_field`` (usually "size") + are different. Other comparisons may be added in the future. + inst_kwargs: dict|None + If ``fs`` is None, use this set of keyword arguments to make a + GenericFileSystem instance + fs: GenericFileSystem|None + Instance to use if explicitly given. The instance defines how to + to make downstream file system instances from paths. + + Returns + ------- + dict of the copy operations that were performed, {source: destination} + """ + fs = fs or GenericFileSystem(**(inst_kwargs or {})) + source = fs._strip_protocol(source) + destination = fs._strip_protocol(destination) + allfiles = fs.find(source, withdirs=True, detail=True) + if not fs.isdir(source): + raise ValueError("Can only rsync on a directory") + otherfiles = fs.find(destination, withdirs=True, detail=True) + dirs = [ + a + for a, v in allfiles.items() + if v["type"] == "directory" and a.replace(source, destination) not in otherfiles + ] + logger.debug(f"{len(dirs)} directories to create") + if dirs: + fs.make_many_dirs( + [dirn.replace(source, destination) for dirn in dirs], exist_ok=True + ) + allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"} + logger.debug(f"{len(allfiles)} files to consider for copy") + to_delete = [ + o + for o, v in otherfiles.items() + if o.replace(destination, source) not in allfiles and v["type"] == "file" + ] + for k, v in allfiles.copy().items(): + otherfile = k.replace(source, destination) + if otherfile in otherfiles: + if update_cond == "always": + allfiles[k] = otherfile + elif update_cond == "different": + inf1 = source_field(v) if callable(source_field) else v[source_field] + v2 = otherfiles[otherfile] + inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field] + if inf1 != inf2: + # details mismatch, make copy + allfiles[k] = otherfile + else: + # details match, don't copy + allfiles.pop(k) + else: + # file not in target yet + allfiles[k] = otherfile + logger.debug(f"{len(allfiles)} files to copy") + if allfiles: + source_files, target_files = zip(*allfiles.items()) + fs.cp(source_files, target_files, **kwargs) + logger.debug(f"{len(to_delete)} files to delete") + if delete_missing and to_delete: + fs.rm(to_delete) + return allfiles + + +class GenericFileSystem(AsyncFileSystem): + """Wrapper over all other FS types + + + + This implementation is a single unified interface to be able to run FS operations + over generic URLs, and dispatch to the specific implementations using the URL + protocol prefix. + + Note: instances of this FS are always async, even if you never use it with any async + backend. + """ + + protocol = "generic" # there is no real reason to ever use a protocol with this FS + + def __init__(self, default_method="default", **kwargs): + """ + + Parameters + ---------- + default_method: str (optional) + Defines how to configure backend FS instances. Options are: + - "default": instantiate like FSClass(), with no + extra arguments; this is the default instance of that FS, and can be + configured via the config system + - "generic": takes instances from the `_generic_fs` dict in this module, + which you must populate before use. Keys are by protocol + - "current": takes the most recently instantiated version of each FS + """ + self.method = default_method + super().__init__(**kwargs) + + def _parent(self, path): + fs = _resolve_fs(path, self.method) + return fs.unstrip_protocol(fs._parent(path)) + + def _strip_protocol(self, path): + # normalization only + fs = _resolve_fs(path, self.method) + return fs.unstrip_protocol(fs._strip_protocol(path)) + + async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + fs = _resolve_fs(path, self.method) + if fs.async_impl: + out = await fs._find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs + ) + else: + out = fs.find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs + ) + result = {} + for k, v in out.items(): + v = v.copy() # don't corrupt target FS dircache + name = fs.unstrip_protocol(k) + v["name"] = name + result[name] = v + if detail: + return result + return list(result) + + async def _info(self, url, **kwargs): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + out = await fs._info(url, **kwargs) + else: + out = fs.info(url, **kwargs) + out = out.copy() # don't edit originals + out["name"] = fs.unstrip_protocol(out["name"]) + return out + + async def _ls( + self, + url, + detail=True, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + out = await fs._ls(url, detail=True, **kwargs) + else: + out = fs.ls(url, detail=True, **kwargs) + out = [o.copy() for o in out] # don't edit originals + for o in out: + o["name"] = fs.unstrip_protocol(o["name"]) + if detail: + return out + else: + return [o["name"] for o in out] + + async def _cat_file( + self, + url, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + return await fs._cat_file(url, **kwargs) + else: + return fs.cat_file(url, **kwargs) + + async def _pipe_file( + self, + path, + value, + **kwargs, + ): + fs = _resolve_fs(path, self.method) + if fs.async_impl: + return await fs._pipe_file(path, value, **kwargs) + else: + return fs.pipe_file(path, value, **kwargs) + + async def _rm(self, url, **kwargs): + urls = url + if isinstance(urls, str): + urls = [urls] + fs = _resolve_fs(urls[0], self.method) + if fs.async_impl: + await fs._rm(urls, **kwargs) + else: + fs.rm(url, **kwargs) + + async def _makedirs(self, path, exist_ok=False): + logger.debug("Make dir %s", path) + fs = _resolve_fs(path, self.method) + if fs.async_impl: + await fs._makedirs(path, exist_ok=exist_ok) + else: + fs.makedirs(path, exist_ok=exist_ok) + + def rsync(self, source, destination, **kwargs): + """Sync files between two directory trees + + See `func:rsync` for more details. + """ + rsync(source, destination, fs=self, **kwargs) + + async def _cp_file( + self, + url, + url2, + blocksize=2**20, + callback=DEFAULT_CALLBACK, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + fs2 = _resolve_fs(url2, self.method) + if fs is fs2: + # pure remote + if fs.async_impl: + return await fs._cp_file(url, url2, **kwargs) + else: + return fs.cp_file(url, url2, **kwargs) + kw = {"blocksize": 0, "cache_type": "none"} + try: + f1 = ( + await fs.open_async(url, "rb") + if hasattr(fs, "open_async") + else fs.open(url, "rb", **kw) + ) + callback.set_size(await maybe_await(f1.size)) + f2 = ( + await fs2.open_async(url2, "wb") + if hasattr(fs2, "open_async") + else fs2.open(url2, "wb", **kw) + ) + while f1.size is None or f2.tell() < f1.size: + data = await maybe_await(f1.read(blocksize)) + if f1.size is None and not data: + break + await maybe_await(f2.write(data)) + callback.absolute_update(f2.tell()) + finally: + try: + await maybe_await(f2.close()) + await maybe_await(f1.close()) + except NameError: + # fail while opening f1 or f2 + pass + + async def _make_many_dirs(self, urls, exist_ok=True): + fs = _resolve_fs(urls[0], self.method) + if fs.async_impl: + coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls] + await _run_coros_in_chunks(coros) + else: + for u in urls: + fs.makedirs(u, exist_ok=exist_ok) + + make_many_dirs = sync_wrapper(_make_many_dirs) + + async def _copy( + self, + path1: list[str], + path2: list[str], + recursive: bool = False, + on_error: str = "ignore", + maxdepth: Optional[int] = None, + batch_size: Optional[int] = None, + tempdir: Optional[str] = None, + **kwargs, + ): + if recursive: + raise NotImplementedError + fs = _resolve_fs(path1[0], self.method) + fs2 = _resolve_fs(path2[0], self.method) + # not expanding paths atm., assume call is from rsync() + if fs is fs2: + # pure remote + if fs.async_impl: + return await fs._copy(path1, path2, **kwargs) + else: + return fs.copy(path1, path2, **kwargs) + await copy_file_op( + fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error + ) + + +async def copy_file_op( + fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore" +): + import tempfile + + tempdir = tempdir or tempfile.mkdtemp() + try: + coros = [ + _copy_file_op( + fs1, + u1, + fs2, + u2, + os.path.join(tempdir, uuid.uuid4().hex), + on_error=on_error, + ) + for u1, u2 in zip(url1, url2) + ] + await _run_coros_in_chunks(coros, batch_size=batch_size) + finally: + shutil.rmtree(tempdir) + + +async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"): + ex = () if on_error == "raise" else Exception + logger.debug("Copy %s -> %s", url1, url2) + try: + if fs1.async_impl: + await fs1._get_file(url1, local) + else: + fs1.get_file(url1, local) + if fs2.async_impl: + await fs2._put_file(local, url2) + else: + fs2.put_file(local, url2) + os.unlink(local) + logger.debug("Copy %s -> %s; done", url1, url2) + except ex as e: + logger.debug("ignoring cp exception for %s: %s", url1, e) + + +async def maybe_await(cor): + if inspect.iscoroutine(cor): + return await cor + else: + return cor diff --git a/MLPY/Lib/site-packages/fsspec/gui.py b/MLPY/Lib/site-packages/fsspec/gui.py new file mode 100644 index 0000000000000000000000000000000000000000..113317e5a48c4d5fac062bc84019e97a05e16a0f --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/gui.py @@ -0,0 +1,414 @@ +import ast +import contextlib +import logging +import os +import re +from typing import ClassVar, Sequence + +import panel as pn + +from .core import OpenFile, get_filesystem_class, split_protocol +from .registry import known_implementations + +pn.extension() +logger = logging.getLogger("fsspec.gui") + + +class SigSlot: + """Signal-slot mixin, for Panel event passing + + Include this class in a widget manager's superclasses to be able to + register events and callbacks on Panel widgets managed by that class. + + The method ``_register`` should be called as widgets are added, and external + code should call ``connect`` to associate callbacks. + + By default, all signals emit a DEBUG logging statement. + """ + + # names of signals that this class may emit each of which must be + # set by _register for any new instance + signals: ClassVar[Sequence[str]] = [] + # names of actions that this class may respond to + slots: ClassVar[Sequence[str]] = [] + + # each of which must be a method name + + def __init__(self): + self._ignoring_events = False + self._sigs = {} + self._map = {} + self._setup() + + def _setup(self): + """Create GUI elements and register signals""" + self.panel = pn.pane.PaneBase() + # no signals to set up in the base class + + def _register( + self, widget, name, thing="value", log_level=logging.DEBUG, auto=False + ): + """Watch the given attribute of a widget and assign it a named event + + This is normally called at the time a widget is instantiated, in the + class which owns it. + + Parameters + ---------- + widget : pn.layout.Panel or None + Widget to watch. If None, an anonymous signal not associated with + any widget. + name : str + Name of this event + thing : str + Attribute of the given widget to watch + log_level : int + When the signal is triggered, a logging event of the given level + will be fired in the dfviz logger. + auto : bool + If True, automatically connects with a method in this class of the + same name. + """ + if name not in self.signals: + raise ValueError(f"Attempt to assign an undeclared signal: {name}") + self._sigs[name] = { + "widget": widget, + "callbacks": [], + "thing": thing, + "log": log_level, + } + wn = "-".join( + [ + getattr(widget, "name", str(widget)) if widget is not None else "none", + thing, + ] + ) + self._map[wn] = name + if widget is not None: + widget.param.watch(self._signal, thing, onlychanged=True) + if auto and hasattr(self, name): + self.connect(name, getattr(self, name)) + + def _repr_mimebundle_(self, *args, **kwargs): + """Display in a notebook or a server""" + try: + return self.panel._repr_mimebundle_(*args, **kwargs) + except (ValueError, AttributeError): + raise NotImplementedError("Panel does not seem to be set up properly") + + def connect(self, signal, slot): + """Associate call back with given event + + The callback must be a function which takes the "new" value of the + watched attribute as the only parameter. If the callback return False, + this cancels any further processing of the given event. + + Alternatively, the callback can be a string, in which case it means + emitting the correspondingly-named event (i.e., connect to self) + """ + self._sigs[signal]["callbacks"].append(slot) + + def _signal(self, event): + """This is called by a an action on a widget + + Within an self.ignore_events context, nothing happens. + + Tests can execute this method by directly changing the values of + widget components. + """ + if not self._ignoring_events: + wn = "-".join([event.obj.name, event.name]) + if wn in self._map and self._map[wn] in self._sigs: + self._emit(self._map[wn], event.new) + + @contextlib.contextmanager + def ignore_events(self): + """Temporarily turn off events processing in this instance + + (does not propagate to children) + """ + self._ignoring_events = True + try: + yield + finally: + self._ignoring_events = False + + def _emit(self, sig, value=None): + """An event happened, call its callbacks + + This method can be used in tests to simulate message passing without + directly changing visual elements. + + Calling of callbacks will halt whenever one returns False. + """ + logger.log(self._sigs[sig]["log"], f"{sig}: {value}") + for callback in self._sigs[sig]["callbacks"]: + if isinstance(callback, str): + self._emit(callback) + else: + try: + # running callbacks should not break the interface + ret = callback(value) + if ret is False: + break + except Exception as e: + logger.exception( + "Exception (%s) while executing callback for signal: %s", + e, + sig, + ) + + def show(self, threads=False): + """Open a new browser tab and display this instance's interface""" + self.panel.show(threads=threads, verbose=False) + return self + + +class SingleSelect(SigSlot): + """A multiselect which only allows you to select one item for an event""" + + signals = ["_selected", "selected"] # the first is internal + slots = ["set_options", "set_selection", "add", "clear", "select"] + + def __init__(self, **kwargs): + self.kwargs = kwargs + super().__init__() + + def _setup(self): + self.panel = pn.widgets.MultiSelect(**self.kwargs) + self._register(self.panel, "_selected", "value") + self._register(None, "selected") + self.connect("_selected", self.select_one) + + def _signal(self, *args, **kwargs): + super()._signal(*args, **kwargs) + + def select_one(self, *_): + with self.ignore_events(): + val = [self.panel.value[-1]] if self.panel.value else [] + self.panel.value = val + self._emit("selected", self.panel.value) + + def set_options(self, options): + self.panel.options = options + + def clear(self): + self.panel.options = [] + + @property + def value(self): + return self.panel.value + + def set_selection(self, selection): + self.panel.value = [selection] + + +class FileSelector(SigSlot): + """Panel-based graphical file selector widget + + Instances of this widget are interactive and can be displayed in jupyter by having + them as the output of a cell, or in a separate browser tab using ``.show()``. + """ + + signals = [ + "protocol_changed", + "selection_changed", + "directory_entered", + "home_clicked", + "up_clicked", + "go_clicked", + "filters_changed", + ] + slots = ["set_filters", "go_home"] + + def __init__(self, url=None, filters=None, ignore=None, kwargs=None): + """ + + Parameters + ---------- + url : str (optional) + Initial value of the URL to populate the dialog; should include protocol + filters : list(str) (optional) + File endings to include in the listings. If not included, all files are + allowed. Does not affect directories. + If given, the endings will appear as checkboxes in the interface + ignore : list(str) (optional) + Regex(s) of file basename patterns to ignore, e.g., "\\." for typical + hidden files on posix + kwargs : dict (optional) + To pass to file system instance + """ + if url: + self.init_protocol, url = split_protocol(url) + else: + self.init_protocol, url = "file", os.getcwd() + self.init_url = url + self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}" + self.filters = filters + self.ignore = [re.compile(i) for i in ignore or []] + self._fs = None + super().__init__() + + def _setup(self): + self.url = pn.widgets.TextInput( + name="url", + value=self.init_url, + align="end", + sizing_mode="stretch_width", + width_policy="max", + ) + self.protocol = pn.widgets.Select( + options=sorted(known_implementations), + value=self.init_protocol, + name="protocol", + align="center", + ) + self.kwargs = pn.widgets.TextInput( + name="kwargs", value=self.init_kwargs, align="center" + ) + self.go = pn.widgets.Button(name="⇨", align="end", width=45) + self.main = SingleSelect(size=10) + self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end") + self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end") + + self._register(self.protocol, "protocol_changed", auto=True) + self._register(self.go, "go_clicked", "clicks", auto=True) + self._register(self.up, "up_clicked", "clicks", auto=True) + self._register(self.home, "home_clicked", "clicks", auto=True) + self._register(None, "selection_changed") + self.main.connect("selected", self.selection_changed) + self._register(None, "directory_entered") + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + + self.filter_sel = pn.widgets.CheckBoxGroup( + value=[], options=[], inline=False, align="end", width_policy="min" + ) + self._register(self.filter_sel, "filters_changed", auto=True) + + self.panel = pn.Column( + pn.Row(self.protocol, self.kwargs), + pn.Row(self.home, self.up, self.url, self.go, self.filter_sel), + self.main.panel, + ) + self.set_filters(self.filters) + self.go_clicked() + + def set_filters(self, filters=None): + self.filters = filters + if filters: + self.filter_sel.options = filters + self.filter_sel.value = filters + else: + self.filter_sel.options = [] + self.filter_sel.value = [] + + @property + def storage_options(self): + """Value of the kwargs box as a dictionary""" + return ast.literal_eval(self.kwargs.value) or {} + + @property + def fs(self): + """Current filesystem instance""" + if self._fs is None: + cls = get_filesystem_class(self.protocol.value) + self._fs = cls(**self.storage_options) + return self._fs + + @property + def urlpath(self): + """URL of currently selected item""" + return ( + (f"{self.protocol.value}://{self.main.value[0]}") + if self.main.value + else None + ) + + def open_file(self, mode="rb", compression=None, encoding=None): + """Create OpenFile instance for the currently selected item + + For example, in a notebook you might do something like + + .. code-block:: + + [ ]: sel = FileSelector(); sel + + # user selects their file + + [ ]: with sel.open_file('rb') as f: + ... out = f.read() + + Parameters + ---------- + mode: str (optional) + Open mode for the file. + compression: str (optional) + The interact with the file as compressed. Set to 'infer' to guess + compression from the file ending + encoding: str (optional) + If using text mode, use this encoding; defaults to UTF8. + """ + if self.urlpath is None: + raise ValueError("No file selected") + return OpenFile(self.fs, self.urlpath, mode, compression, encoding) + + def filters_changed(self, values): + self.filters = values + self.go_clicked() + + def selection_changed(self, *_): + if self.urlpath is None: + return + if self.fs.isdir(self.urlpath): + self.url.value = self.fs._strip_protocol(self.urlpath) + self.go_clicked() + + def go_clicked(self, *_): + if ( + self.prev_protocol != self.protocol.value + or self.prev_kwargs != self.storage_options + ): + self._fs = None # causes fs to be recreated + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + listing = sorted( + self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"] + ) + listing = [ + l + for l in listing + if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore) + ] + folders = { + "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "directory" + } + files = { + "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "file" + } + if self.filters: + files = { + k: v + for k, v in files.items() + if any(v.endswith(ext) for ext in self.filters) + } + self.main.set_options(dict(**folders, **files)) + + def protocol_changed(self, *_): + self._fs = None + self.main.options = [] + self.url.value = "" + + def home_clicked(self, *_): + self.protocol.value = self.init_protocol + self.kwargs.value = self.init_kwargs + self.url.value = self.init_url + self.go_clicked() + + def up_clicked(self, *_): + self.url.value = self.fs._parent(self.url.value) + self.go_clicked() diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__init__.py b/MLPY/Lib/site-packages/fsspec/implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97774ce7f805649ec0c2029014b5587f089d592a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/arrow.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/arrow.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..454da7057d3b816653b724510912a8ad9a0e8d9a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/arrow.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4fd8d99cc4fc50badd42023caee39cddedceb55 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99872a6db80987963adff8d8f3a364034d6ae908 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cached.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cached.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76e3f0cc49747031e695d407bb287e3827106a6c Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/cached.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dask.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dask.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbdd25f592e39c09e259096aef3f3f375bf479ea Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dask.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/data.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..912eea929b1715c5a977b8cbd146adb17f983324 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/data.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fff62253f2be90b98b7dff684cea0297b48b200 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d8d4c653a9d52fd3ee90ddf1d690bebc8a6f03 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/ftp.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/ftp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47e4409020205093476a0857959695d9f94a938a Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/ftp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/git.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/git.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46bd0c3df7e2bb61dfc0044ff6b7cd1bb976f95e Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/git.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/github.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/github.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..826b01d43d109649c8ecf81e57ece5bef431ddb5 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/github.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/http.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/http.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a238c7379bd348f0d9411fa97bb41296536871c Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/http.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..289b870642316fa41062b3b806166de73810ee90 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba5fb2f600645c556b3ab6a45a7affb282b9e1a2 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/local.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/local.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fe337f898fed154f898887a460e11975c636253 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/local.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/memory.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/memory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12e523f367f1b2af6255961fe628dcbbc0394837 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/memory.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/reference.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/reference.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a426c6b5264668ad6d59b541eb9b38d8519dcb8b Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/reference.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/sftp.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/sftp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dfc132984c5ecea5e9f74b85e30c45a5671c26c Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/sftp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/smb.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/smb.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5588a2fc4fa5928d92ad11f855c641a2b6c7ea6b Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/smb.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/tar.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/tar.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92fd035df89815b46d731222426e14c598518dca Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/tar.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905a2c04d2e967713d734903171e4bef0ba517e2 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/zip.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/zip.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33ede88de9da75fbae4191d5478a51fd57695920 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/implementations/__pycache__/zip.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/implementations/arrow.py b/MLPY/Lib/site-packages/fsspec/implementations/arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..f9fea70d25fe2974e19d35186eee5de60a008eb4 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/arrow.py @@ -0,0 +1,304 @@ +import errno +import io +import os +import secrets +import shutil +from contextlib import suppress +from functools import cached_property, wraps +from urllib.parse import parse_qs + +from fsspec.spec import AbstractFileSystem +from fsspec.utils import ( + get_package_version_without_import, + infer_storage_options, + mirror_from, + tokenize, +) + + +def wrap_exceptions(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except OSError as exception: + if not exception.args: + raise + + message, *args = exception.args + if isinstance(message, str) and "does not exist" in message: + raise FileNotFoundError(errno.ENOENT, message) from exception + else: + raise + + return wrapper + + +PYARROW_VERSION = None + + +class ArrowFSWrapper(AbstractFileSystem): + """FSSpec-compatible wrapper of pyarrow.fs.FileSystem. + + Parameters + ---------- + fs : pyarrow.fs.FileSystem + + """ + + root_marker = "/" + + def __init__(self, fs, **kwargs): + global PYARROW_VERSION + PYARROW_VERSION = get_package_version_without_import("pyarrow") + self.fs = fs + super().__init__(**kwargs) + + @property + def protocol(self): + return self.fs.type_name + + @cached_property + def fsid(self): + return "hdfs_" + tokenize(self.fs.host, self.fs.port) + + @classmethod + def _strip_protocol(cls, path): + ops = infer_storage_options(path) + path = ops["path"] + if path.startswith("//"): + # special case for "hdfs://path" (without the triple slash) + path = path[1:] + return path + + def ls(self, path, detail=False, **kwargs): + path = self._strip_protocol(path) + from pyarrow.fs import FileSelector + + entries = [ + self._make_entry(entry) + for entry in self.fs.get_file_info(FileSelector(path)) + ] + if detail: + return entries + else: + return [entry["name"] for entry in entries] + + def info(self, path, **kwargs): + path = self._strip_protocol(path) + [info] = self.fs.get_file_info([path]) + return self._make_entry(info) + + def exists(self, path): + path = self._strip_protocol(path) + try: + self.info(path) + except FileNotFoundError: + return False + else: + return True + + def _make_entry(self, info): + from pyarrow.fs import FileType + + if info.type is FileType.Directory: + kind = "directory" + elif info.type is FileType.File: + kind = "file" + elif info.type is FileType.NotFound: + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path) + else: + kind = "other" + + return { + "name": info.path, + "size": info.size, + "type": kind, + "mtime": info.mtime, + } + + @wrap_exceptions + def cp_file(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1).rstrip("/") + path2 = self._strip_protocol(path2).rstrip("/") + + with self._open(path1, "rb") as lstream: + tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}" + try: + with self.open(tmp_fname, "wb") as rstream: + shutil.copyfileobj(lstream, rstream) + self.fs.move(tmp_fname, path2) + except BaseException: # noqa + with suppress(FileNotFoundError): + self.fs.delete_file(tmp_fname) + raise + + @wrap_exceptions + def mv(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1).rstrip("/") + path2 = self._strip_protocol(path2).rstrip("/") + self.fs.move(path1, path2) + + @wrap_exceptions + def rm_file(self, path): + path = self._strip_protocol(path) + self.fs.delete_file(path) + + @wrap_exceptions + def rm(self, path, recursive=False, maxdepth=None): + path = self._strip_protocol(path).rstrip("/") + if self.isdir(path): + if recursive: + self.fs.delete_dir(path) + else: + raise ValueError("Can't delete directories without recursive=False") + else: + self.fs.delete_file(path) + + @wrap_exceptions + def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs): + if mode == "rb": + if seekable: + method = self.fs.open_input_file + else: + method = self.fs.open_input_stream + elif mode == "wb": + method = self.fs.open_output_stream + elif mode == "ab": + method = self.fs.open_append_stream + else: + raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}") + + _kwargs = {} + if mode != "rb" or not seekable: + if int(PYARROW_VERSION.split(".")[0]) >= 4: + # disable compression auto-detection + _kwargs["compression"] = None + stream = method(path, **_kwargs) + + return ArrowFile(self, stream, path, mode, block_size, **kwargs) + + @wrap_exceptions + def mkdir(self, path, create_parents=True, **kwargs): + path = self._strip_protocol(path) + if create_parents: + self.makedirs(path, exist_ok=True) + else: + self.fs.create_dir(path, recursive=False) + + @wrap_exceptions + def makedirs(self, path, exist_ok=False): + path = self._strip_protocol(path) + self.fs.create_dir(path, recursive=True) + + @wrap_exceptions + def rmdir(self, path): + path = self._strip_protocol(path) + self.fs.delete_dir(path) + + @wrap_exceptions + def modified(self, path): + path = self._strip_protocol(path) + return self.fs.get_file_info(path).mtime + + def cat_file(self, path, start=None, end=None, **kwargs): + kwargs["seekable"] = start not in [None, 0] + return super().cat_file(path, start=None, end=None, **kwargs) + + def get_file(self, rpath, lpath, **kwargs): + kwargs["seekable"] = False + super().get_file(rpath, lpath, **kwargs) + + +@mirror_from( + "stream", + [ + "read", + "seek", + "tell", + "write", + "readable", + "writable", + "close", + "size", + "seekable", + ], +) +class ArrowFile(io.IOBase): + def __init__(self, fs, stream, path, mode, block_size=None, **kwargs): + self.path = path + self.mode = mode + + self.fs = fs + self.stream = stream + + self.blocksize = self.block_size = block_size + self.kwargs = kwargs + + def __enter__(self): + return self + + def __exit__(self, *args): + return self.close() + + +class HadoopFileSystem(ArrowFSWrapper): + """A wrapper on top of the pyarrow.fs.HadoopFileSystem + to connect it's interface with fsspec""" + + protocol = "hdfs" + + def __init__( + self, + host="default", + port=0, + user=None, + kerb_ticket=None, + replication=3, + extra_conf=None, + **kwargs, + ): + """ + + Parameters + ---------- + host: str + Hostname, IP or "default" to try to read from Hadoop config + port: int + Port to connect on, or default from Hadoop config if 0 + user: str or None + If given, connect as this username + kerb_ticket: str or None + If given, use this ticket for authentication + replication: int + set replication factor of file for write operations. default value is 3. + extra_conf: None or dict + Passed on to HadoopFileSystem + """ + from pyarrow.fs import HadoopFileSystem + + fs = HadoopFileSystem( + host=host, + port=port, + user=user, + kerb_ticket=kerb_ticket, + replication=replication, + extra_conf=extra_conf, + ) + super().__init__(fs=fs, **kwargs) + + @staticmethod + def _get_kwargs_from_urls(path): + ops = infer_storage_options(path) + out = {} + if ops.get("host", None): + out["host"] = ops["host"] + if ops.get("username", None): + out["user"] = ops["username"] + if ops.get("port", None): + out["port"] = ops["port"] + if ops.get("url_query", None): + queries = parse_qs(ops["url_query"]) + if queries.get("replication", None): + out["replication"] = int(queries["replication"][0]) + return out diff --git a/MLPY/Lib/site-packages/fsspec/implementations/cache_mapper.py b/MLPY/Lib/site-packages/fsspec/implementations/cache_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..6e7c7d88afdddf12f77b26bb635bd8bf1e2bd7f1 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/cache_mapper.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import abc +import hashlib + +from fsspec.implementations.local import make_path_posix + + +class AbstractCacheMapper(abc.ABC): + """Abstract super-class for mappers from remote URLs to local cached + basenames. + """ + + @abc.abstractmethod + def __call__(self, path: str) -> str: ... + + def __eq__(self, other: object) -> bool: + # Identity only depends on class. When derived classes have attributes + # they will need to be included. + return isinstance(other, type(self)) + + def __hash__(self) -> int: + # Identity only depends on class. When derived classes have attributes + # they will need to be included. + return hash(type(self)) + + +class BasenameCacheMapper(AbstractCacheMapper): + """Cache mapper that uses the basename of the remote URL and a fixed number + of directory levels above this. + + The default is zero directory levels, meaning different paths with the same + basename will have the same cached basename. + """ + + def __init__(self, directory_levels: int = 0): + if directory_levels < 0: + raise ValueError( + "BasenameCacheMapper requires zero or positive directory_levels" + ) + self.directory_levels = directory_levels + + # Separator for directories when encoded as strings. + self._separator = "_@_" + + def __call__(self, path: str) -> str: + path = make_path_posix(path) + prefix, *bits = path.rsplit("/", self.directory_levels + 1) + if bits: + return self._separator.join(bits) + else: + return prefix # No separator found, simple filename + + def __eq__(self, other: object) -> bool: + return super().__eq__(other) and self.directory_levels == other.directory_levels + + def __hash__(self) -> int: + return super().__hash__() ^ hash(self.directory_levels) + + +class HashCacheMapper(AbstractCacheMapper): + """Cache mapper that uses a hash of the remote URL.""" + + def __call__(self, path: str) -> str: + return hashlib.sha256(path.encode()).hexdigest() + + +def create_cache_mapper(same_names: bool) -> AbstractCacheMapper: + """Factory method to create cache mapper for backward compatibility with + ``CachingFileSystem`` constructor using ``same_names`` kwarg. + """ + if same_names: + return BasenameCacheMapper() + else: + return HashCacheMapper() diff --git a/MLPY/Lib/site-packages/fsspec/implementations/cache_metadata.py b/MLPY/Lib/site-packages/fsspec/implementations/cache_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..bd9b5cdd99d7f4a0a989c0f7d0c70ddcf324816a --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/cache_metadata.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import os +import pickle +import time +from typing import TYPE_CHECKING + +from fsspec.utils import atomic_write + +try: + import ujson as json +except ImportError: + if not TYPE_CHECKING: + import json + +if TYPE_CHECKING: + from typing import Any, Dict, Iterator, Literal + + from typing_extensions import TypeAlias + + from .cached import CachingFileSystem + + Detail: TypeAlias = Dict[str, Any] + + +class CacheMetadata: + """Cache metadata. + + All reading and writing of cache metadata is performed by this class, + accessing the cached files and blocks is not. + + Metadata is stored in a single file per storage directory in JSON format. + For backward compatibility, also reads metadata stored in pickle format + which is converted to JSON when next saved. + """ + + def __init__(self, storage: list[str]): + """ + + Parameters + ---------- + storage: list[str] + Directories containing cached files, must be at least one. Metadata + is stored in the last of these directories by convention. + """ + if not storage: + raise ValueError("CacheMetadata expects at least one storage location") + + self._storage = storage + self.cached_files: list[Detail] = [{}] + + # Private attribute to force saving of metadata in pickle format rather than + # JSON for use in tests to confirm can read both pickle and JSON formats. + self._force_save_pickle = False + + def _load(self, fn: str) -> Detail: + """Low-level function to load metadata from specific file""" + try: + with open(fn, "r") as f: + loaded = json.load(f) + except ValueError: + with open(fn, "rb") as f: + loaded = pickle.load(f) + for c in loaded.values(): + if isinstance(c.get("blocks"), list): + c["blocks"] = set(c["blocks"]) + return loaded + + def _save(self, metadata_to_save: Detail, fn: str) -> None: + """Low-level function to save metadata to specific file""" + if self._force_save_pickle: + with atomic_write(fn) as f: + pickle.dump(metadata_to_save, f) + else: + with atomic_write(fn, mode="w") as f: + json.dump(metadata_to_save, f) + + def _scan_locations( + self, writable_only: bool = False + ) -> Iterator[tuple[str, str, bool]]: + """Yield locations (filenames) where metadata is stored, and whether + writable or not. + + Parameters + ---------- + writable: bool + Set to True to only yield writable locations. + + Returns + ------- + Yields (str, str, bool) + """ + n = len(self._storage) + for i, storage in enumerate(self._storage): + writable = i == n - 1 + if writable_only and not writable: + continue + yield os.path.join(storage, "cache"), storage, writable + + def check_file( + self, path: str, cfs: CachingFileSystem | None + ) -> Literal[False] | tuple[Detail, str]: + """If path is in cache return its details, otherwise return ``False``. + + If the optional CachingFileSystem is specified then it is used to + perform extra checks to reject possible matches, such as if they are + too old. + """ + for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files): + if path not in cache: + continue + detail = cache[path].copy() + + if cfs is not None: + if cfs.check_files and detail["uid"] != cfs.fs.ukey(path): + # Wrong file as determined by hash of file properties + continue + if cfs.expiry and time.time() - detail["time"] > cfs.expiry: + # Cached file has expired + continue + + fn = os.path.join(base, detail["fn"]) + if os.path.exists(fn): + return detail, fn + return False + + def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]: + """Remove expired metadata from the cache. + + Returns names of files corresponding to expired metadata and a boolean + flag indicating whether the writable cache is empty. Caller is + responsible for deleting the expired files. + """ + expired_files = [] + for path, detail in self.cached_files[-1].copy().items(): + if time.time() - detail["time"] > expiry_time: + fn = detail.get("fn", "") + if not fn: + raise RuntimeError( + f"Cache metadata does not contain 'fn' for {path}" + ) + fn = os.path.join(self._storage[-1], fn) + expired_files.append(fn) + self.cached_files[-1].pop(path) + + if self.cached_files[-1]: + cache_path = os.path.join(self._storage[-1], "cache") + self._save(self.cached_files[-1], cache_path) + + writable_cache_empty = not self.cached_files[-1] + return expired_files, writable_cache_empty + + def load(self) -> None: + """Load all metadata from disk and store in ``self.cached_files``""" + cached_files = [] + for fn, _, _ in self._scan_locations(): + if os.path.exists(fn): + # TODO: consolidate blocks here + cached_files.append(self._load(fn)) + else: + cached_files.append({}) + self.cached_files = cached_files or [{}] + + def on_close_cached_file(self, f: Any, path: str) -> None: + """Perform side-effect actions on closing a cached file. + + The actual closing of the file is the responsibility of the caller. + """ + # File must be writeble, so in self.cached_files[-1] + c = self.cached_files[-1][path] + if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size: + c["blocks"] = True + + def pop_file(self, path: str) -> str | None: + """Remove metadata of cached file. + + If path is in the cache, return the filename of the cached file, + otherwise return ``None``. Caller is responsible for deleting the + cached file. + """ + details = self.check_file(path, None) + if not details: + return None + _, fn = details + if fn.startswith(self._storage[-1]): + self.cached_files[-1].pop(path) + self.save() + else: + raise PermissionError( + "Can only delete cached file in last, writable cache location" + ) + return fn + + def save(self) -> None: + """Save metadata to disk""" + for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files): + if not writable: + continue + + if os.path.exists(fn): + cached_files = self._load(fn) + for k, c in cached_files.items(): + if k in cache: + if c["blocks"] is True or cache[k]["blocks"] is True: + c["blocks"] = True + else: + # self.cached_files[*][*]["blocks"] must continue to + # point to the same set object so that updates + # performed by MMapCache are propagated back to + # self.cached_files. + blocks = cache[k]["blocks"] + blocks.update(c["blocks"]) + c["blocks"] = blocks + c["time"] = max(c["time"], cache[k]["time"]) + c["uid"] = cache[k]["uid"] + + # Files can be added to cache after it was written once + for k, c in cache.items(): + if k not in cached_files: + cached_files[k] = c + else: + cached_files = cache + cache = {k: v.copy() for k, v in cached_files.items()} + for c in cache.values(): + if isinstance(c["blocks"], set): + c["blocks"] = list(c["blocks"]) + self._save(cache, fn) + self.cached_files[-1] = cached_files + + def update_file(self, path: str, detail: Detail) -> None: + """Update metadata for specific file in memory, do not save""" + self.cached_files[-1][path] = detail diff --git a/MLPY/Lib/site-packages/fsspec/implementations/cached.py b/MLPY/Lib/site-packages/fsspec/implementations/cached.py new file mode 100644 index 0000000000000000000000000000000000000000..447e4f26770108c09d92f4fee74340ecbaa211e4 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/cached.py @@ -0,0 +1,929 @@ +from __future__ import annotations + +import inspect +import logging +import os +import tempfile +import time +import weakref +from shutil import rmtree +from typing import TYPE_CHECKING, Any, Callable, ClassVar + +from fsspec import AbstractFileSystem, filesystem +from fsspec.callbacks import DEFAULT_CALLBACK +from fsspec.compression import compr +from fsspec.core import BaseCache, MMapCache +from fsspec.exceptions import BlocksizeMismatchError +from fsspec.implementations.cache_mapper import create_cache_mapper +from fsspec.implementations.cache_metadata import CacheMetadata +from fsspec.spec import AbstractBufferedFile +from fsspec.transaction import Transaction +from fsspec.utils import infer_compression + +if TYPE_CHECKING: + from fsspec.implementations.cache_mapper import AbstractCacheMapper + +logger = logging.getLogger("fsspec.cached") + + +class WriteCachedTransaction(Transaction): + def complete(self, commit=True): + rpaths = [f.path for f in self.files] + lpaths = [f.fn for f in self.files] + if commit: + self.fs.put(lpaths, rpaths) + self.files.clear() + self.fs._intrans = False + self.fs._transaction = None + self.fs = None # break cycle + + +class CachingFileSystem(AbstractFileSystem): + """Locally caching filesystem, layer over any other FS + + This class implements chunk-wise local storage of remote files, for quick + access after the initial download. The files are stored in a given + directory with hashes of URLs for the filenames. If no directory is given, + a temporary one is used, which should be cleaned up by the OS after the + process ends. The files themselves are sparse (as implemented in + :class:`~fsspec.caching.MMapCache`), so only the data which is accessed + takes up space. + + Restrictions: + + - the block-size must be the same for each access of a given file, unless + all blocks of the file have already been read + - caching can only be applied to file-systems which produce files + derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also + allowed, for testing + """ + + protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached") + + def __init__( + self, + target_protocol=None, + cache_storage="TMP", + cache_check=10, + check_files=False, + expiry_time=604800, + target_options=None, + fs=None, + same_names: bool | None = None, + compression=None, + cache_mapper: AbstractCacheMapper | None = None, + **kwargs, + ): + """ + + Parameters + ---------- + target_protocol: str (optional) + Target filesystem protocol. Provide either this or ``fs``. + cache_storage: str or list(str) + Location to store files. If "TMP", this is a temporary directory, + and will be cleaned up by the OS when this process ends (or later). + If a list, each location will be tried in the order given, but + only the last will be considered writable. + cache_check: int + Number of seconds between reload of cache metadata + check_files: bool + Whether to explicitly see if the UID of the remote file matches + the stored one before using. Warning: some file systems such as + HTTP cannot reliably give a unique hash of the contents of some + path, so be sure to set this option to False. + expiry_time: int + The time in seconds after which a local copy is considered useless. + Set to falsy to prevent expiry. The default is equivalent to one + week. + target_options: dict or None + Passed to the instantiation of the FS, if fs is None. + fs: filesystem instance + The target filesystem to run against. Provide this or ``protocol``. + same_names: bool (optional) + By default, target URLs are hashed using a ``HashCacheMapper`` so + that files from different backends with the same basename do not + conflict. If this argument is ``true``, a ``BasenameCacheMapper`` + is used instead. Other cache mapper options are available by using + the ``cache_mapper`` keyword argument. Only one of this and + ``cache_mapper`` should be specified. + compression: str (optional) + To decompress on download. Can be 'infer' (guess from the URL name), + one of the entries in ``fsspec.compression.compr``, or None for no + decompression. + cache_mapper: AbstractCacheMapper (optional) + The object use to map from original filenames to cached filenames. + Only one of this and ``same_names`` should be specified. + """ + super().__init__(**kwargs) + if fs is None and target_protocol is None: + raise ValueError( + "Please provide filesystem instance(fs) or target_protocol" + ) + if not (fs is None) ^ (target_protocol is None): + raise ValueError( + "Both filesystems (fs) and target_protocol may not be both given." + ) + if cache_storage == "TMP": + tempdir = tempfile.mkdtemp() + storage = [tempdir] + weakref.finalize(self, self._remove_tempdir, tempdir) + else: + if isinstance(cache_storage, str): + storage = [cache_storage] + else: + storage = cache_storage + os.makedirs(storage[-1], exist_ok=True) + self.storage = storage + self.kwargs = target_options or {} + self.cache_check = cache_check + self.check_files = check_files + self.expiry = expiry_time + self.compression = compression + + # Size of cache in bytes. If None then the size is unknown and will be + # recalculated the next time cache_size() is called. On writes to the + # cache this is reset to None. + self._cache_size = None + + if same_names is not None and cache_mapper is not None: + raise ValueError( + "Cannot specify both same_names and cache_mapper in " + "CachingFileSystem.__init__" + ) + if cache_mapper is not None: + self._mapper = cache_mapper + else: + self._mapper = create_cache_mapper( + same_names if same_names is not None else False + ) + + self.target_protocol = ( + target_protocol + if isinstance(target_protocol, str) + else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]) + ) + self._metadata = CacheMetadata(self.storage) + self.load_cache() + self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs) + + def _strip_protocol(path): + # acts as a method, since each instance has a difference target + return self.fs._strip_protocol(type(self)._strip_protocol(path)) + + self._strip_protocol: Callable = _strip_protocol + + @staticmethod + def _remove_tempdir(tempdir): + try: + rmtree(tempdir) + except Exception: + pass + + def _mkcache(self): + os.makedirs(self.storage[-1], exist_ok=True) + + def cache_size(self): + """Return size of cache in bytes. + + If more than one cache directory is in use, only the size of the last + one (the writable cache directory) is returned. + """ + if self._cache_size is None: + cache_dir = self.storage[-1] + self._cache_size = filesystem("file").du(cache_dir, withdirs=True) + return self._cache_size + + def load_cache(self): + """Read set of stored blocks from file""" + self._metadata.load() + self._mkcache() + self.last_cache = time.time() + + def save_cache(self): + """Save set of stored blocks from file""" + self._mkcache() + self._metadata.save() + self.last_cache = time.time() + self._cache_size = None + + def _check_cache(self): + """Reload caches if time elapsed or any disappeared""" + self._mkcache() + if not self.cache_check: + # explicitly told not to bother checking + return + timecond = time.time() - self.last_cache > self.cache_check + existcond = all(os.path.exists(storage) for storage in self.storage) + if timecond or not existcond: + self.load_cache() + + def _check_file(self, path): + """Is path in cache and still valid""" + path = self._strip_protocol(path) + self._check_cache() + return self._metadata.check_file(path, self) + + def clear_cache(self): + """Remove all files and metadata from the cache + + In the case of multiple cache locations, this clears only the last one, + which is assumed to be the read/write one. + """ + rmtree(self.storage[-1]) + self.load_cache() + self._cache_size = None + + def clear_expired_cache(self, expiry_time=None): + """Remove all expired files and metadata from the cache + + In the case of multiple cache locations, this clears only the last one, + which is assumed to be the read/write one. + + Parameters + ---------- + expiry_time: int + The time in seconds after which a local copy is considered useless. + If not defined the default is equivalent to the attribute from the + file caching instantiation. + """ + + if not expiry_time: + expiry_time = self.expiry + + self._check_cache() + + expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time) + for fn in expired_files: + if os.path.exists(fn): + os.remove(fn) + + if writable_cache_empty: + rmtree(self.storage[-1]) + self.load_cache() + + self._cache_size = None + + def pop_from_cache(self, path): + """Remove cached version of given file + + Deletes local copy of the given (remote) path. If it is found in a cache + location which is not the last, it is assumed to be read-only, and + raises PermissionError + """ + path = self._strip_protocol(path) + fn = self._metadata.pop_file(path) + if fn is not None: + os.remove(fn) + self._cache_size = None + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + """Wrap the target _open + + If the whole file exists in the cache, just open it locally and + return that. + + Otherwise, open the file on the target FS, and make it have a mmap + cache pointing to the location which we determine, in our cache. + The ``blocks`` instance is shared, so as the mmap cache instance + updates, so does the entry in our ``cached_files`` attribute. + We monkey-patch this file, so that when it closes, we call + ``close_and_update`` to save the state of the blocks. + """ + path = self._strip_protocol(path) + + path = self.fs._strip_protocol(path) + if "r" not in mode: + return self.fs._open( + path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_options=cache_options, + **kwargs, + ) + detail = self._check_file(path) + if detail: + # file is in cache + detail, fn = detail + hash, blocks = detail["fn"], detail["blocks"] + if blocks is True: + # stored file is complete + logger.debug("Opening local copy of %s", path) + return open(fn, mode) + # TODO: action where partial file exists in read-only cache + logger.debug("Opening partially cached copy of %s", path) + else: + hash = self._mapper(path) + fn = os.path.join(self.storage[-1], hash) + blocks = set() + detail = { + "original": path, + "fn": hash, + "blocks": blocks, + "time": time.time(), + "uid": self.fs.ukey(path), + } + self._metadata.update_file(path, detail) + logger.debug("Creating local sparse file for %s", path) + + # call target filesystems open + self._mkcache() + f = self.fs._open( + path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_options=cache_options, + cache_type="none", + **kwargs, + ) + if self.compression: + comp = ( + infer_compression(path) + if self.compression == "infer" + else self.compression + ) + f = compr[comp](f, mode="rb") + if "blocksize" in detail: + if detail["blocksize"] != f.blocksize: + raise BlocksizeMismatchError( + f"Cached file must be reopened with same block" + f" size as original (old: {detail['blocksize']}," + f" new {f.blocksize})" + ) + else: + detail["blocksize"] = f.blocksize + f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks) + close = f.close + f.close = lambda: self.close_and_update(f, close) + self.save_cache() + return f + + def _parent(self, path): + return self.fs._parent(path) + + def hash_name(self, path: str, *args: Any) -> str: + # Kept for backward compatibility with downstream libraries. + # Ignores extra arguments, previously same_name boolean. + return self._mapper(path) + + def close_and_update(self, f, close): + """Called when a file is closing, so store the set of blocks""" + if f.closed: + return + path = self._strip_protocol(f.path) + self._metadata.on_close_cached_file(f, path) + try: + logger.debug("going to save") + self.save_cache() + logger.debug("saved") + except OSError: + logger.debug("Cache saving failed while closing file") + except NameError: + logger.debug("Cache save failed due to interpreter shutdown") + close() + f.closed = True + + def ls(self, path, detail=True): + return self.fs.ls(path, detail) + + def __getattribute__(self, item): + if item in { + "load_cache", + "_open", + "save_cache", + "close_and_update", + "__init__", + "__getattribute__", + "__reduce__", + "_make_local_details", + "open", + "cat", + "cat_file", + "cat_ranges", + "get", + "read_block", + "tail", + "head", + "info", + "ls", + "exists", + "isfile", + "isdir", + "_check_file", + "_check_cache", + "_mkcache", + "clear_cache", + "clear_expired_cache", + "pop_from_cache", + "local_file", + "_paths_from_path", + "get_mapper", + "open_many", + "commit_many", + "hash_name", + "__hash__", + "__eq__", + "to_json", + "to_dict", + "cache_size", + "pipe_file", + "pipe", + "start_transaction", + "end_transaction", + }: + # all the methods defined in this class. Note `open` here, since + # it calls `_open`, but is actually in superclass + return lambda *args, **kw: getattr(type(self), item).__get__(self)( + *args, **kw + ) + if item in ["__reduce_ex__"]: + raise AttributeError + if item in ["transaction"]: + # property + return type(self).transaction.__get__(self) + if item in ["_cache", "transaction_type"]: + # class attributes + return getattr(type(self), item) + if item == "__class__": + return type(self) + d = object.__getattribute__(self, "__dict__") + fs = d.get("fs", None) # fs is not immediately defined + if item in d: + return d[item] + elif fs is not None: + if item in fs.__dict__: + # attribute of instance + return fs.__dict__[item] + # attributed belonging to the target filesystem + cls = type(fs) + m = getattr(cls, item) + if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and ( + not hasattr(m, "__self__") or m.__self__ is None + ): + # instance method + return m.__get__(fs, cls) + return m # class method or attribute + else: + # attributes of the superclass, while target is being set up + return super().__getattribute__(item) + + def __eq__(self, other): + """Test for equality.""" + if self is other: + return True + if not isinstance(other, type(self)): + return False + return ( + self.storage == other.storage + and self.kwargs == other.kwargs + and self.cache_check == other.cache_check + and self.check_files == other.check_files + and self.expiry == other.expiry + and self.compression == other.compression + and self._mapper == other._mapper + and self.target_protocol == other.target_protocol + ) + + def __hash__(self): + """Calculate hash.""" + return ( + hash(tuple(self.storage)) + ^ hash(str(self.kwargs)) + ^ hash(self.cache_check) + ^ hash(self.check_files) + ^ hash(self.expiry) + ^ hash(self.compression) + ^ hash(self._mapper) + ^ hash(self.target_protocol) + ) + + +class WholeFileCacheFileSystem(CachingFileSystem): + """Caches whole remote files on first access + + This class is intended as a layer over any other file system, and + will make a local copy of each file accessed, so that all subsequent + reads are local. This is similar to ``CachingFileSystem``, but without + the block-wise functionality and so can work even when sparse files + are not allowed. See its docstring for definition of the init + arguments. + + The class still needs access to the remote store for listing files, + and may refresh cached files. + """ + + protocol = "filecache" + local_file = True + + def open_many(self, open_files, **kwargs): + paths = [of.path for of in open_files] + if "r" in open_files.mode: + self._mkcache() + else: + return [ + LocalTempFile( + self.fs, + path, + mode=open_files.mode, + fn=os.path.join(self.storage[-1], self._mapper(path)), + **kwargs, + ) + for path in paths + ] + + if self.compression: + raise NotImplementedError + details = [self._check_file(sp) for sp in paths] + downpath = [p for p, d in zip(paths, details) if not d] + downfn0 = [ + os.path.join(self.storage[-1], self._mapper(p)) + for p, d in zip(paths, details) + ] # keep these path names for opening later + downfn = [fn for fn, d in zip(downfn0, details) if not d] + if downpath: + # skip if all files are already cached and up to date + self.fs.get(downpath, downfn) + + # update metadata - only happens when downloads are successful + newdetail = [ + { + "original": path, + "fn": self._mapper(path), + "blocks": True, + "time": time.time(), + "uid": self.fs.ukey(path), + } + for path in downpath + ] + for path, detail in zip(downpath, newdetail): + self._metadata.update_file(path, detail) + self.save_cache() + + def firstpart(fn): + # helper to adapt both whole-file and simple-cache + return fn[1] if isinstance(fn, tuple) else fn + + return [ + open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode) + for fn0, fn1 in zip(details, downfn0) + ] + + def commit_many(self, open_files): + self.fs.put([f.fn for f in open_files], [f.path for f in open_files]) + [f.close() for f in open_files] + for f in open_files: + # in case autocommit is off, and so close did not already delete + try: + os.remove(f.name) + except FileNotFoundError: + pass + self._cache_size = None + + def _make_local_details(self, path): + hash = self._mapper(path) + fn = os.path.join(self.storage[-1], hash) + detail = { + "original": path, + "fn": hash, + "blocks": True, + "time": time.time(), + "uid": self.fs.ukey(path), + } + self._metadata.update_file(path, detail) + logger.debug("Copying %s to local cache", path) + return fn + + def cat( + self, + path, + recursive=False, + on_error="raise", + callback=DEFAULT_CALLBACK, + **kwargs, + ): + paths = self.expand_path( + path, recursive=recursive, maxdepth=kwargs.get("maxdepth", None) + ) + getpaths = [] + storepaths = [] + fns = [] + out = {} + for p in paths.copy(): + try: + detail = self._check_file(p) + if not detail: + fn = self._make_local_details(p) + getpaths.append(p) + storepaths.append(fn) + else: + detail, fn = detail if isinstance(detail, tuple) else (None, detail) + fns.append(fn) + except Exception as e: + if on_error == "raise": + raise + if on_error == "return": + out[p] = e + paths.remove(p) + + if getpaths: + self.fs.get(getpaths, storepaths) + self.save_cache() + + callback.set_size(len(paths)) + for p, fn in zip(paths, fns): + with open(fn, "rb") as f: + out[p] = f.read() + callback.relative_update(1) + if isinstance(path, str) and len(paths) == 1 and recursive is False: + out = out[paths[0]] + return out + + def _open(self, path, mode="rb", **kwargs): + path = self._strip_protocol(path) + if "r" not in mode: + hash = self._mapper(path) + fn = os.path.join(self.storage[-1], hash) + user_specified_kwargs = { + k: v + for k, v in kwargs.items() + # those kwargs were added by open(), we don't want them + if k not in ["autocommit", "block_size", "cache_options"] + } + return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs) + detail = self._check_file(path) + if detail: + detail, fn = detail + _, blocks = detail["fn"], detail["blocks"] + if blocks is True: + logger.debug("Opening local copy of %s", path) + + # In order to support downstream filesystems to be able to + # infer the compression from the original filename, like + # the `TarFileSystem`, let's extend the `io.BufferedReader` + # fileobject protocol by adding a dedicated attribute + # `original`. + f = open(fn, mode) + f.original = detail.get("original") + return f + else: + raise ValueError( + f"Attempt to open partially cached file {path}" + f" as a wholly cached file" + ) + else: + fn = self._make_local_details(path) + kwargs["mode"] = mode + + # call target filesystems open + self._mkcache() + if self.compression: + with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: + if isinstance(f, AbstractBufferedFile): + # want no type of caching if just downloading whole thing + f.cache = BaseCache(0, f.cache.fetcher, f.size) + comp = ( + infer_compression(path) + if self.compression == "infer" + else self.compression + ) + f = compr[comp](f, mode="rb") + data = True + while data: + block = getattr(f, "blocksize", 5 * 2**20) + data = f.read(block) + f2.write(data) + else: + self.fs.get_file(path, fn) + self.save_cache() + return self._open(path, mode) + + +class SimpleCacheFileSystem(WholeFileCacheFileSystem): + """Caches whole remote files on first access + + This class is intended as a layer over any other file system, and + will make a local copy of each file accessed, so that all subsequent + reads are local. This implementation only copies whole files, and + does not keep any metadata about the download time or file details. + It is therefore safer to use in multi-threaded/concurrent situations. + + This is the only of the caching filesystems that supports write: you will + be given a real local open file, and upon close and commit, it will be + uploaded to the target filesystem; the writability or the target URL is + not checked until that time. + + """ + + protocol = "simplecache" + local_file = True + transaction_type = WriteCachedTransaction + + def __init__(self, **kwargs): + kw = kwargs.copy() + for key in ["cache_check", "expiry_time", "check_files"]: + kw[key] = False + super().__init__(**kw) + for storage in self.storage: + if not os.path.exists(storage): + os.makedirs(storage, exist_ok=True) + + def _check_file(self, path): + self._check_cache() + sha = self._mapper(path) + for storage in self.storage: + fn = os.path.join(storage, sha) + if os.path.exists(fn): + return fn + + def save_cache(self): + pass + + def load_cache(self): + pass + + def pipe_file(self, path, value=None, **kwargs): + if self._intrans: + with self.open(path, "wb") as f: + f.write(value) + else: + super().pipe_file(path, value) + + def ls(self, path, detail=True, **kwargs): + path = self._strip_protocol(path) + details = [] + try: + details = self.fs.ls( + path, detail=True, **kwargs + ).copy() # don't edit original! + except FileNotFoundError as e: + ex = e + else: + ex = None + if self._intrans: + path1 = path.rstrip("/") + "/" + for f in self.transaction.files: + if f.path == path: + details.append( + {"name": path, "size": f.size or f.tell(), "type": "file"} + ) + elif f.path.startswith(path1): + if f.path.count("/") == path1.count("/"): + details.append( + {"name": f.path, "size": f.size or f.tell(), "type": "file"} + ) + else: + dname = "/".join(f.path.split("/")[: path1.count("/") + 1]) + details.append({"name": dname, "size": 0, "type": "directory"}) + if ex is not None and not details: + raise ex + if detail: + return details + return sorted(_["name"] for _ in details) + + def info(self, path, **kwargs): + path = self._strip_protocol(path) + if self._intrans: + f = [_ for _ in self.transaction.files if _.path == path] + if f: + size = os.path.getsize(f[0].fn) if f[0].closed else f[0].tell() + return {"name": path, "size": size, "type": "file"} + f = any(_.path.startswith(path + "/") for _ in self.transaction.files) + if f: + return {"name": path, "size": 0, "type": "directory"} + return self.fs.info(path, **kwargs) + + def pipe(self, path, value=None, **kwargs): + if isinstance(path, str): + self.pipe_file(self._strip_protocol(path), value, **kwargs) + elif isinstance(path, dict): + for k, v in path.items(): + self.pipe_file(self._strip_protocol(k), v, **kwargs) + else: + raise ValueError("path must be str or dict") + + def cat_ranges( + self, paths, starts, ends, max_gap=None, on_error="return", **kwargs + ): + lpaths = [self._check_file(p) for p in paths] + rpaths = [p for l, p in zip(lpaths, paths) if l is False] + lpaths = [l for l, p in zip(lpaths, paths) if l is False] + self.fs.get(rpaths, lpaths) + return super().cat_ranges( + paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs + ) + + def _open(self, path, mode="rb", **kwargs): + path = self._strip_protocol(path) + sha = self._mapper(path) + + if "r" not in mode: + fn = os.path.join(self.storage[-1], sha) + user_specified_kwargs = { + k: v + for k, v in kwargs.items() + if k not in ["autocommit", "block_size", "cache_options"] + } # those were added by open() + return LocalTempFile( + self, + path, + mode=mode, + autocommit=not self._intrans, + fn=fn, + **user_specified_kwargs, + ) + fn = self._check_file(path) + if fn: + return open(fn, mode) + + fn = os.path.join(self.storage[-1], sha) + logger.debug("Copying %s to local cache", path) + kwargs["mode"] = mode + + self._mkcache() + self._cache_size = None + if self.compression: + with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: + if isinstance(f, AbstractBufferedFile): + # want no type of caching if just downloading whole thing + f.cache = BaseCache(0, f.cache.fetcher, f.size) + comp = ( + infer_compression(path) + if self.compression == "infer" + else self.compression + ) + f = compr[comp](f, mode="rb") + data = True + while data: + block = getattr(f, "blocksize", 5 * 2**20) + data = f.read(block) + f2.write(data) + else: + self.fs.get_file(path, fn) + return self._open(path, mode) + + +class LocalTempFile: + """A temporary local file, which will be uploaded on commit""" + + def __init__(self, fs, path, fn, mode="wb", autocommit=True, seek=0, **kwargs): + self.fn = fn + self.fh = open(fn, mode) + self.mode = mode + if seek: + self.fh.seek(seek) + self.path = path + self.size = None + self.fs = fs + self.closed = False + self.autocommit = autocommit + self.kwargs = kwargs + + def __reduce__(self): + # always open in r+b to allow continuing writing at a location + return ( + LocalTempFile, + (self.fs, self.path, self.fn, "r+b", self.autocommit, self.tell()), + ) + + def __enter__(self): + return self.fh + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + # self.size = self.fh.tell() + if self.closed: + return + self.fh.close() + self.closed = True + if self.autocommit: + self.commit() + + def discard(self): + self.fh.close() + os.remove(self.fn) + + def commit(self): + self.fs.put(self.fn, self.path, **self.kwargs) + # we do not delete local copy - it's still in the cache + + @property + def name(self): + return self.fn + + def __repr__(self) -> str: + return f"LocalTempFile: {self.path}" + + def __getattr__(self, item): + return getattr(self.fh, item) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/dask.py b/MLPY/Lib/site-packages/fsspec/implementations/dask.py new file mode 100644 index 0000000000000000000000000000000000000000..3e1276463db6866665e6a0fe114efc247971b57e --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/dask.py @@ -0,0 +1,152 @@ +import dask +from distributed.client import Client, _get_global_client +from distributed.worker import Worker + +from fsspec import filesystem +from fsspec.spec import AbstractBufferedFile, AbstractFileSystem +from fsspec.utils import infer_storage_options + + +def _get_client(client): + if client is None: + return _get_global_client() + elif isinstance(client, Client): + return client + else: + # e.g., connection string + return Client(client) + + +def _in_worker(): + return bool(Worker._instances) + + +class DaskWorkerFileSystem(AbstractFileSystem): + """View files accessible to a worker as any other remote file-system + + When instances are run on the worker, uses the real filesystem. When + run on the client, they call the worker to provide information or data. + + **Warning** this implementation is experimental, and read-only for now. + """ + + def __init__( + self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs + ): + super().__init__(**kwargs) + if not (fs is None) ^ (target_protocol is None): + raise ValueError( + "Please provide one of filesystem instance (fs) or" + " target_protocol, not both" + ) + self.target_protocol = target_protocol + self.target_options = target_options + self.worker = None + self.client = client + self.fs = fs + self._determine_worker() + + @staticmethod + def _get_kwargs_from_urls(path): + so = infer_storage_options(path) + if "host" in so and "port" in so: + return {"client": f"{so['host']}:{so['port']}"} + else: + return {} + + def _determine_worker(self): + if _in_worker(): + self.worker = True + if self.fs is None: + self.fs = filesystem( + self.target_protocol, **(self.target_options or {}) + ) + else: + self.worker = False + self.client = _get_client(self.client) + self.rfs = dask.delayed(self) + + def mkdir(self, *args, **kwargs): + if self.worker: + self.fs.mkdir(*args, **kwargs) + else: + self.rfs.mkdir(*args, **kwargs).compute() + + def rm(self, *args, **kwargs): + if self.worker: + self.fs.rm(*args, **kwargs) + else: + self.rfs.rm(*args, **kwargs).compute() + + def copy(self, *args, **kwargs): + if self.worker: + self.fs.copy(*args, **kwargs) + else: + self.rfs.copy(*args, **kwargs).compute() + + def mv(self, *args, **kwargs): + if self.worker: + self.fs.mv(*args, **kwargs) + else: + self.rfs.mv(*args, **kwargs).compute() + + def ls(self, *args, **kwargs): + if self.worker: + return self.fs.ls(*args, **kwargs) + else: + return self.rfs.ls(*args, **kwargs).compute() + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + if self.worker: + return self.fs._open( + path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_options=cache_options, + **kwargs, + ) + else: + return DaskFile( + fs=self, + path=path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_options=cache_options, + **kwargs, + ) + + def fetch_range(self, path, mode, start, end): + if self.worker: + with self._open(path, mode) as f: + f.seek(start) + return f.read(end - start) + else: + return self.rfs.fetch_range(path, mode, start, end).compute() + + +class DaskFile(AbstractBufferedFile): + def __init__(self, mode="rb", **kwargs): + if mode != "rb": + raise ValueError('Remote dask files can only be opened in "rb" mode') + super().__init__(**kwargs) + + def _upload_chunk(self, final=False): + pass + + def _initiate_upload(self): + """Create remote file/upload""" + pass + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + return self.fs.fetch_range(self.path, self.mode, start, end) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/data.py b/MLPY/Lib/site-packages/fsspec/implementations/data.py new file mode 100644 index 0000000000000000000000000000000000000000..519032305bed633f2ba8a6148076433caf81710b --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/data.py @@ -0,0 +1,58 @@ +import base64 +import io +from typing import Optional +from urllib.parse import unquote + +from fsspec import AbstractFileSystem + + +class DataFileSystem(AbstractFileSystem): + """A handy decoder for data-URLs + + Example + ------- + >>> with fsspec.open("data:,Hello%2C%20World%21") as f: + ... print(f.read()) + b"Hello, World!" + + See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs + """ + + protocol = "data" + + def __init__(self, **kwargs): + """No parameters for this filesystem""" + super().__init__(**kwargs) + + def cat_file(self, path, start=None, end=None, **kwargs): + pref, data = path.split(",", 1) + if pref.endswith("base64"): + return base64.b64decode(data)[start:end] + return unquote(data).encode()[start:end] + + def info(self, path, **kwargs): + pref, name = path.split(",", 1) + data = self.cat_file(path) + mime = pref.split(":", 1)[1].split(";", 1)[0] + return {"name": name, "size": len(data), "type": "file", "mimetype": mime} + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + if "r" not in mode: + raise ValueError("Read only filesystem") + return io.BytesIO(self.cat_file(path)) + + @staticmethod + def encode(data: bytes, mime: Optional[str] = None): + """Format the given data into data-URL syntax + + This version always base64 encodes, even when the data is ascii/url-safe. + """ + return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}" diff --git a/MLPY/Lib/site-packages/fsspec/implementations/dbfs.py b/MLPY/Lib/site-packages/fsspec/implementations/dbfs.py new file mode 100644 index 0000000000000000000000000000000000000000..ce9f9eadb798577970ee95530743b4521813ca7c --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/dbfs.py @@ -0,0 +1,467 @@ +import base64 +import urllib + +import requests +import requests.exceptions +from requests.adapters import HTTPAdapter, Retry + +from fsspec import AbstractFileSystem +from fsspec.spec import AbstractBufferedFile + + +class DatabricksException(Exception): + """ + Helper class for exceptions raised in this module. + """ + + def __init__(self, error_code, message): + """Create a new DatabricksException""" + super().__init__(message) + + self.error_code = error_code + self.message = message + + +class DatabricksFileSystem(AbstractFileSystem): + """ + Get access to the Databricks filesystem implementation over HTTP. + Can be used inside and outside of a databricks cluster. + """ + + def __init__(self, instance, token, **kwargs): + """ + Create a new DatabricksFileSystem. + + Parameters + ---------- + instance: str + The instance URL of the databricks cluster. + For example for an Azure databricks cluster, this + has the form adb-..azuredatabricks.net. + token: str + Your personal token. Find out more + here: https://docs.databricks.com/dev-tools/api/latest/authentication.html + """ + self.instance = instance + self.token = token + self.session = requests.Session() + self.retries = Retry( + total=10, + backoff_factor=0.05, + status_forcelist=[408, 429, 500, 502, 503, 504], + ) + + self.session.mount("https://", HTTPAdapter(max_retries=self.retries)) + self.session.headers.update({"Authorization": f"Bearer {self.token}"}) + + super().__init__(**kwargs) + + def ls(self, path, detail=True, **kwargs): + """ + List the contents of the given path. + + Parameters + ---------- + path: str + Absolute path + detail: bool + Return not only the list of filenames, + but also additional information on file sizes + and types. + """ + out = self._ls_from_cache(path) + if not out: + try: + r = self._send_to_api( + method="get", endpoint="list", json={"path": path} + ) + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + raise FileNotFoundError(e.message) + + raise e + files = r["files"] + out = [ + { + "name": o["path"], + "type": "directory" if o["is_dir"] else "file", + "size": o["file_size"], + } + for o in files + ] + self.dircache[path] = out + + if detail: + return out + return [o["name"] for o in out] + + def makedirs(self, path, exist_ok=True): + """ + Create a given absolute path and all of its parents. + + Parameters + ---------- + path: str + Absolute path to create + exist_ok: bool + If false, checks if the folder + exists before creating it (and raises an + Exception if this is the case) + """ + if not exist_ok: + try: + # If the following succeeds, the path is already present + self._send_to_api( + method="get", endpoint="get-status", json={"path": path} + ) + raise FileExistsError(f"Path {path} already exists") + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + pass + + try: + self._send_to_api(method="post", endpoint="mkdirs", json={"path": path}) + except DatabricksException as e: + if e.error_code == "RESOURCE_ALREADY_EXISTS": + raise FileExistsError(e.message) + + raise e + self.invalidate_cache(self._parent(path)) + + def mkdir(self, path, create_parents=True, **kwargs): + """ + Create a given absolute path and all of its parents. + + Parameters + ---------- + path: str + Absolute path to create + create_parents: bool + Whether to create all parents or not. + "False" is not implemented so far. + """ + if not create_parents: + raise NotImplementedError + + self.mkdirs(path, **kwargs) + + def rm(self, path, recursive=False, **kwargs): + """ + Remove the file or folder at the given absolute path. + + Parameters + ---------- + path: str + Absolute path what to remove + recursive: bool + Recursively delete all files in a folder. + """ + try: + self._send_to_api( + method="post", + endpoint="delete", + json={"path": path, "recursive": recursive}, + ) + except DatabricksException as e: + # This is not really an exception, it just means + # not everything was deleted so far + if e.error_code == "PARTIAL_DELETE": + self.rm(path=path, recursive=recursive) + elif e.error_code == "IO_ERROR": + # Using the same exception as the os module would use here + raise OSError(e.message) + + raise e + self.invalidate_cache(self._parent(path)) + + def mv( + self, source_path, destination_path, recursive=False, maxdepth=None, **kwargs + ): + """ + Move a source to a destination path. + + A note from the original [databricks API manual] + (https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move). + + When moving a large number of files the API call will time out after + approximately 60s, potentially resulting in partially moved data. + Therefore, for operations that move more than 10k files, we strongly + discourage using the DBFS REST API. + + Parameters + ---------- + source_path: str + From where to move (absolute path) + destination_path: str + To where to move (absolute path) + recursive: bool + Not implemented to far. + maxdepth: + Not implemented to far. + """ + if recursive: + raise NotImplementedError + if maxdepth: + raise NotImplementedError + + try: + self._send_to_api( + method="post", + endpoint="move", + json={"source_path": source_path, "destination_path": destination_path}, + ) + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + raise FileNotFoundError(e.message) + elif e.error_code == "RESOURCE_ALREADY_EXISTS": + raise FileExistsError(e.message) + + raise e + self.invalidate_cache(self._parent(source_path)) + self.invalidate_cache(self._parent(destination_path)) + + def _open(self, path, mode="rb", block_size="default", **kwargs): + """ + Overwrite the base class method to make sure to create a DBFile. + All arguments are copied from the base method. + + Only the default blocksize is allowed. + """ + return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs) + + def _send_to_api(self, method, endpoint, json): + """ + Send the given json to the DBFS API + using a get or post request (specified by the argument `method`). + + Parameters + ---------- + method: str + Which http method to use for communication; "get" or "post". + endpoint: str + Where to send the request to (last part of the API URL) + json: dict + Dictionary of information to send + """ + if method == "post": + session_call = self.session.post + elif method == "get": + session_call = self.session.get + else: + raise ValueError(f"Do not understand method {method}") + + url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint) + + r = session_call(url, json=json) + + # The DBFS API will return a json, also in case of an exception. + # We want to preserve this information as good as possible. + try: + r.raise_for_status() + except requests.HTTPError as e: + # try to extract json error message + # if that fails, fall back to the original exception + try: + exception_json = e.response.json() + except Exception: + raise e + + raise DatabricksException(**exception_json) + + return r.json() + + def _create_handle(self, path, overwrite=True): + """ + Internal function to create a handle, which can be used to + write blocks of a file to DBFS. + A handle has a unique identifier which needs to be passed + whenever written during this transaction. + The handle is active for 10 minutes - after that a new + write transaction needs to be created. + Make sure to close the handle after you are finished. + + Parameters + ---------- + path: str + Absolute path for this file. + overwrite: bool + If a file already exist at this location, either overwrite + it or raise an exception. + """ + try: + r = self._send_to_api( + method="post", + endpoint="create", + json={"path": path, "overwrite": overwrite}, + ) + return r["handle"] + except DatabricksException as e: + if e.error_code == "RESOURCE_ALREADY_EXISTS": + raise FileExistsError(e.message) + + raise e + + def _close_handle(self, handle): + """ + Close a handle, which was opened by :func:`_create_handle`. + + Parameters + ---------- + handle: str + Which handle to close. + """ + try: + self._send_to_api(method="post", endpoint="close", json={"handle": handle}) + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + raise FileNotFoundError(e.message) + + raise e + + def _add_data(self, handle, data): + """ + Upload data to an already opened file handle + (opened by :func:`_create_handle`). + The maximal allowed data size is 1MB after + conversion to base64. + Remember to close the handle when you are finished. + + Parameters + ---------- + handle: str + Which handle to upload data to. + data: bytes + Block of data to add to the handle. + """ + data = base64.b64encode(data).decode() + try: + self._send_to_api( + method="post", + endpoint="add-block", + json={"handle": handle, "data": data}, + ) + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + raise FileNotFoundError(e.message) + elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED": + raise ValueError(e.message) + + raise e + + def _get_data(self, path, start, end): + """ + Download data in bytes from a given absolute path in a block + from [start, start+length]. + The maximum number of allowed bytes to read is 1MB. + + Parameters + ---------- + path: str + Absolute path to download data from + start: int + Start position of the block + end: int + End position of the block + """ + try: + r = self._send_to_api( + method="get", + endpoint="read", + json={"path": path, "offset": start, "length": end - start}, + ) + return base64.b64decode(r["data"]) + except DatabricksException as e: + if e.error_code == "RESOURCE_DOES_NOT_EXIST": + raise FileNotFoundError(e.message) + elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]: + raise ValueError(e.message) + + raise e + + def invalidate_cache(self, path=None): + if path is None: + self.dircache.clear() + else: + self.dircache.pop(path, None) + super().invalidate_cache(path) + + +class DatabricksFile(AbstractBufferedFile): + """ + Helper class for files referenced in the DatabricksFileSystem. + """ + + DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size + + def __init__( + self, + fs, + path, + mode="rb", + block_size="default", + autocommit=True, + cache_type="readahead", + cache_options=None, + **kwargs, + ): + """ + Create a new instance of the DatabricksFile. + + The blocksize needs to be the default one. + """ + if block_size is None or block_size == "default": + block_size = self.DEFAULT_BLOCK_SIZE + + assert ( + block_size == self.DEFAULT_BLOCK_SIZE + ), f"Only the default block size is allowed, not {block_size}" + + super().__init__( + fs, + path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_type=cache_type, + cache_options=cache_options or {}, + **kwargs, + ) + + def _initiate_upload(self): + """Internal function to start a file upload""" + self.handle = self.fs._create_handle(self.path) + + def _upload_chunk(self, final=False): + """Internal function to add a chunk of data to a started upload""" + self.buffer.seek(0) + data = self.buffer.getvalue() + + data_chunks = [ + data[start:end] for start, end in self._to_sized_blocks(len(data)) + ] + + for data_chunk in data_chunks: + self.fs._add_data(handle=self.handle, data=data_chunk) + + if final: + self.fs._close_handle(handle=self.handle) + return True + + def _fetch_range(self, start, end): + """Internal function to download a block of data""" + return_buffer = b"" + length = end - start + for chunk_start, chunk_end in self._to_sized_blocks(length, start): + return_buffer += self.fs._get_data( + path=self.path, start=chunk_start, end=chunk_end + ) + + return return_buffer + + def _to_sized_blocks(self, length, start=0): + """Helper function to split a range from 0 to total_length into bloksizes""" + end = start + length + for data_chunk in range(start, end, self.blocksize): + data_start = data_chunk + data_end = min(end, data_chunk + self.blocksize) + yield data_start, data_end diff --git a/MLPY/Lib/site-packages/fsspec/implementations/dirfs.py b/MLPY/Lib/site-packages/fsspec/implementations/dirfs.py new file mode 100644 index 0000000000000000000000000000000000000000..04f7479adf6ce24edddfede1712d3fd48c3fef87 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/dirfs.py @@ -0,0 +1,366 @@ +from .. import filesystem +from ..asyn import AsyncFileSystem + + +class DirFileSystem(AsyncFileSystem): + """Directory prefix filesystem + + The DirFileSystem is a filesystem-wrapper. It assumes every path it is dealing with + is relative to the `path`. After performing the necessary paths operation it + delegates everything to the wrapped filesystem. + """ + + protocol = "dir" + + def __init__( + self, + path=None, + fs=None, + fo=None, + target_protocol=None, + target_options=None, + **storage_options, + ): + """ + Parameters + ---------- + path: str + Path to the directory. + fs: AbstractFileSystem + An instantiated filesystem to wrap. + target_protocol, target_options: + if fs is none, construct it from these + fo: str + Alternate for path; do not provide both + """ + super().__init__(**storage_options) + if fs is None: + fs = filesystem(protocol=target_protocol, **(target_options or {})) + if (path is not None) ^ (fo is not None) is False: + raise ValueError("Provide path or fo, not both") + path = path or fo + + if self.asynchronous and not fs.async_impl: + raise ValueError("can't use asynchronous with non-async fs") + + if fs.async_impl and self.asynchronous != fs.asynchronous: + raise ValueError("both dirfs and fs should be in the same sync/async mode") + + self.path = fs._strip_protocol(path) + self.fs = fs + + def _join(self, path): + if isinstance(path, str): + if not self.path: + return path + if not path: + return self.path + return self.fs.sep.join((self.path, self._strip_protocol(path))) + if isinstance(path, dict): + return {self._join(_path): value for _path, value in path.items()} + return [self._join(_path) for _path in path] + + def _relpath(self, path): + if isinstance(path, str): + if not self.path: + return path + if path == self.path: + return "" + prefix = self.path + self.fs.sep + assert path.startswith(prefix) + return path[len(prefix) :] + return [self._relpath(_path) for _path in path] + + # Wrappers below + + @property + def sep(self): + return self.fs.sep + + async def set_session(self, *args, **kwargs): + return await self.fs.set_session(*args, **kwargs) + + async def _rm_file(self, path, **kwargs): + return await self.fs._rm_file(self._join(path), **kwargs) + + def rm_file(self, path, **kwargs): + return self.fs.rm_file(self._join(path), **kwargs) + + async def _rm(self, path, *args, **kwargs): + return await self.fs._rm(self._join(path), *args, **kwargs) + + def rm(self, path, *args, **kwargs): + return self.fs.rm(self._join(path), *args, **kwargs) + + async def _cp_file(self, path1, path2, **kwargs): + return await self.fs._cp_file(self._join(path1), self._join(path2), **kwargs) + + def cp_file(self, path1, path2, **kwargs): + return self.fs.cp_file(self._join(path1), self._join(path2), **kwargs) + + async def _copy( + self, + path1, + path2, + *args, + **kwargs, + ): + return await self.fs._copy( + self._join(path1), + self._join(path2), + *args, + **kwargs, + ) + + def copy(self, path1, path2, *args, **kwargs): + return self.fs.copy( + self._join(path1), + self._join(path2), + *args, + **kwargs, + ) + + async def _pipe(self, path, *args, **kwargs): + return await self.fs._pipe(self._join(path), *args, **kwargs) + + def pipe(self, path, *args, **kwargs): + return self.fs.pipe(self._join(path), *args, **kwargs) + + async def _pipe_file(self, path, *args, **kwargs): + return await self.fs._pipe_file(self._join(path), *args, **kwargs) + + def pipe_file(self, path, *args, **kwargs): + return self.fs.pipe_file(self._join(path), *args, **kwargs) + + async def _cat_file(self, path, *args, **kwargs): + return await self.fs._cat_file(self._join(path), *args, **kwargs) + + def cat_file(self, path, *args, **kwargs): + return self.fs.cat_file(self._join(path), *args, **kwargs) + + async def _cat(self, path, *args, **kwargs): + ret = await self.fs._cat( + self._join(path), + *args, + **kwargs, + ) + + if isinstance(ret, dict): + return {self._relpath(key): value for key, value in ret.items()} + + return ret + + def cat(self, path, *args, **kwargs): + ret = self.fs.cat( + self._join(path), + *args, + **kwargs, + ) + + if isinstance(ret, dict): + return {self._relpath(key): value for key, value in ret.items()} + + return ret + + async def _put_file(self, lpath, rpath, **kwargs): + return await self.fs._put_file(lpath, self._join(rpath), **kwargs) + + def put_file(self, lpath, rpath, **kwargs): + return self.fs.put_file(lpath, self._join(rpath), **kwargs) + + async def _put( + self, + lpath, + rpath, + *args, + **kwargs, + ): + return await self.fs._put( + lpath, + self._join(rpath), + *args, + **kwargs, + ) + + def put(self, lpath, rpath, *args, **kwargs): + return self.fs.put( + lpath, + self._join(rpath), + *args, + **kwargs, + ) + + async def _get_file(self, rpath, lpath, **kwargs): + return await self.fs._get_file(self._join(rpath), lpath, **kwargs) + + def get_file(self, rpath, lpath, **kwargs): + return self.fs.get_file(self._join(rpath), lpath, **kwargs) + + async def _get(self, rpath, *args, **kwargs): + return await self.fs._get(self._join(rpath), *args, **kwargs) + + def get(self, rpath, *args, **kwargs): + return self.fs.get(self._join(rpath), *args, **kwargs) + + async def _isfile(self, path): + return await self.fs._isfile(self._join(path)) + + def isfile(self, path): + return self.fs.isfile(self._join(path)) + + async def _isdir(self, path): + return await self.fs._isdir(self._join(path)) + + def isdir(self, path): + return self.fs.isdir(self._join(path)) + + async def _size(self, path): + return await self.fs._size(self._join(path)) + + def size(self, path): + return self.fs.size(self._join(path)) + + async def _exists(self, path): + return await self.fs._exists(self._join(path)) + + def exists(self, path): + return self.fs.exists(self._join(path)) + + async def _info(self, path, **kwargs): + return await self.fs._info(self._join(path), **kwargs) + + def info(self, path, **kwargs): + return self.fs.info(self._join(path), **kwargs) + + async def _ls(self, path, detail=True, **kwargs): + ret = (await self.fs._ls(self._join(path), detail=detail, **kwargs)).copy() + if detail: + out = [] + for entry in ret: + entry = entry.copy() + entry["name"] = self._relpath(entry["name"]) + out.append(entry) + return out + + return self._relpath(ret) + + def ls(self, path, detail=True, **kwargs): + ret = self.fs.ls(self._join(path), detail=detail, **kwargs).copy() + if detail: + out = [] + for entry in ret: + entry = entry.copy() + entry["name"] = self._relpath(entry["name"]) + out.append(entry) + return out + + return self._relpath(ret) + + async def _walk(self, path, *args, **kwargs): + async for root, dirs, files in self.fs._walk(self._join(path), *args, **kwargs): + yield self._relpath(root), dirs, files + + def walk(self, path, *args, **kwargs): + for root, dirs, files in self.fs.walk(self._join(path), *args, **kwargs): + yield self._relpath(root), dirs, files + + async def _glob(self, path, **kwargs): + detail = kwargs.get("detail", False) + ret = await self.fs._glob(self._join(path), **kwargs) + if detail: + return {self._relpath(path): info for path, info in ret.items()} + return self._relpath(ret) + + def glob(self, path, **kwargs): + detail = kwargs.get("detail", False) + ret = self.fs.glob(self._join(path), **kwargs) + if detail: + return {self._relpath(path): info for path, info in ret.items()} + return self._relpath(ret) + + async def _du(self, path, *args, **kwargs): + total = kwargs.get("total", True) + ret = await self.fs._du(self._join(path), *args, **kwargs) + if total: + return ret + + return {self._relpath(path): size for path, size in ret.items()} + + def du(self, path, *args, **kwargs): + total = kwargs.get("total", True) + ret = self.fs.du(self._join(path), *args, **kwargs) + if total: + return ret + + return {self._relpath(path): size for path, size in ret.items()} + + async def _find(self, path, *args, **kwargs): + detail = kwargs.get("detail", False) + ret = await self.fs._find(self._join(path), *args, **kwargs) + if detail: + return {self._relpath(path): info for path, info in ret.items()} + return self._relpath(ret) + + def find(self, path, *args, **kwargs): + detail = kwargs.get("detail", False) + ret = self.fs.find(self._join(path), *args, **kwargs) + if detail: + return {self._relpath(path): info for path, info in ret.items()} + return self._relpath(ret) + + async def _expand_path(self, path, *args, **kwargs): + return self._relpath( + await self.fs._expand_path(self._join(path), *args, **kwargs) + ) + + def expand_path(self, path, *args, **kwargs): + return self._relpath(self.fs.expand_path(self._join(path), *args, **kwargs)) + + async def _mkdir(self, path, *args, **kwargs): + return await self.fs._mkdir(self._join(path), *args, **kwargs) + + def mkdir(self, path, *args, **kwargs): + return self.fs.mkdir(self._join(path), *args, **kwargs) + + async def _makedirs(self, path, *args, **kwargs): + return await self.fs._makedirs(self._join(path), *args, **kwargs) + + def makedirs(self, path, *args, **kwargs): + return self.fs.makedirs(self._join(path), *args, **kwargs) + + def rmdir(self, path): + return self.fs.rmdir(self._join(path)) + + def mv(self, path1, path2, **kwargs): + return self.fs.mv( + self._join(path1), + self._join(path2), + **kwargs, + ) + + def touch(self, path, **kwargs): + return self.fs.touch(self._join(path), **kwargs) + + def created(self, path): + return self.fs.created(self._join(path)) + + def modified(self, path): + return self.fs.modified(self._join(path)) + + def sign(self, path, *args, **kwargs): + return self.fs.sign(self._join(path), *args, **kwargs) + + def __repr__(self): + return f"{self.__class__.__qualname__}(path='{self.path}', fs={self.fs})" + + def open( + self, + path, + *args, + **kwargs, + ): + return self.fs.open( + self._join(path), + *args, + **kwargs, + ) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/ftp.py b/MLPY/Lib/site-packages/fsspec/implementations/ftp.py new file mode 100644 index 0000000000000000000000000000000000000000..415f4844952f362188561a3e41425d364a115400 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/ftp.py @@ -0,0 +1,385 @@ +import os +import sys +import uuid +import warnings +from ftplib import FTP, Error, error_perm +from typing import Any + +from ..spec import AbstractBufferedFile, AbstractFileSystem +from ..utils import infer_storage_options, isfilelike + + +class FTPFileSystem(AbstractFileSystem): + """A filesystem over classic FTP""" + + root_marker = "/" + cachable = False + protocol = "ftp" + + def __init__( + self, + host, + port=21, + username=None, + password=None, + acct=None, + block_size=None, + tempdir=None, + timeout=30, + encoding="utf-8", + **kwargs, + ): + """ + You can use _get_kwargs_from_urls to get some kwargs from + a reasonable FTP url. + + Authentication will be anonymous if username/password are not + given. + + Parameters + ---------- + host: str + The remote server name/ip to connect to + port: int + Port to connect with + username: str or None + If authenticating, the user's identifier + password: str of None + User's password on the server, if using + acct: str or None + Some servers also need an "account" string for auth + block_size: int or None + If given, the read-ahead or write buffer size. + tempdir: str + Directory on remote to put temporary files when in a transaction + timeout: int + Timeout of the ftp connection in seconds + encoding: str + Encoding to use for directories and filenames in FTP connection + """ + super().__init__(**kwargs) + self.host = host + self.port = port + self.tempdir = tempdir or "/tmp" + self.cred = username, password, acct + self.timeout = timeout + self.encoding = encoding + if block_size is not None: + self.blocksize = block_size + else: + self.blocksize = 2**16 + self._connect() + + def _connect(self): + if sys.version_info >= (3, 9): + self.ftp = FTP(timeout=self.timeout, encoding=self.encoding) + elif self.encoding: + warnings.warn("`encoding` not supported for python<3.9, ignoring") + self.ftp = FTP(timeout=self.timeout) + else: + self.ftp = FTP(timeout=self.timeout) + self.ftp.connect(self.host, self.port) + self.ftp.login(*self.cred) + + @classmethod + def _strip_protocol(cls, path): + return "/" + infer_storage_options(path)["path"].lstrip("/").rstrip("/") + + @staticmethod + def _get_kwargs_from_urls(urlpath): + out = infer_storage_options(urlpath) + out.pop("path", None) + out.pop("protocol", None) + return out + + def ls(self, path, detail=True, **kwargs): + path = self._strip_protocol(path) + out = [] + if path not in self.dircache: + try: + try: + out = [ + (fn, details) + for (fn, details) in self.ftp.mlsd(path) + if fn not in [".", ".."] + and details["type"] not in ["pdir", "cdir"] + ] + except error_perm: + out = _mlsd2(self.ftp, path) # Not platform independent + for fn, details in out: + if path == "/": + path = "" # just for forming the names, below + details["name"] = "/".join([path, fn.lstrip("/")]) + if details["type"] == "file": + details["size"] = int(details["size"]) + else: + details["size"] = 0 + if details["type"] == "dir": + details["type"] = "directory" + self.dircache[path] = out + except Error: + try: + info = self.info(path) + if info["type"] == "file": + out = [(path, info)] + except (Error, IndexError): + raise FileNotFoundError(path) + files = self.dircache.get(path, out) + if not detail: + return sorted([fn for fn, details in files]) + return [details for fn, details in files] + + def info(self, path, **kwargs): + # implement with direct method + path = self._strip_protocol(path) + if path == "/": + # special case, since this dir has no real entry + return {"name": "/", "size": 0, "type": "directory"} + files = self.ls(self._parent(path).lstrip("/"), True) + try: + out = [f for f in files if f["name"] == path][0] + except IndexError: + raise FileNotFoundError(path) + return out + + def get_file(self, rpath, lpath, **kwargs): + if self.isdir(rpath): + if not os.path.exists(lpath): + os.mkdir(lpath) + return + if isfilelike(lpath): + outfile = lpath + else: + outfile = open(lpath, "wb") + + def cb(x): + outfile.write(x) + + self.ftp.retrbinary( + f"RETR {rpath}", + blocksize=self.blocksize, + callback=cb, + ) + if not isfilelike(lpath): + outfile.close() + + def cat_file(self, path, start=None, end=None, **kwargs): + if end is not None: + return super().cat_file(path, start, end, **kwargs) + out = [] + + def cb(x): + out.append(x) + + try: + self.ftp.retrbinary( + f"RETR {path}", + blocksize=self.blocksize, + rest=start, + callback=cb, + ) + except (Error, error_perm) as orig_exc: + raise FileNotFoundError(path) from orig_exc + return b"".join(out) + + def _open( + self, + path, + mode="rb", + block_size=None, + cache_options=None, + autocommit=True, + **kwargs, + ): + path = self._strip_protocol(path) + block_size = block_size or self.blocksize + return FTPFile( + self, + path, + mode=mode, + block_size=block_size, + tempdir=self.tempdir, + autocommit=autocommit, + cache_options=cache_options, + ) + + def _rm(self, path): + path = self._strip_protocol(path) + self.ftp.delete(path) + self.invalidate_cache(self._parent(path)) + + def rm(self, path, recursive=False, maxdepth=None): + paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth) + for p in reversed(paths): + if self.isfile(p): + self.rm_file(p) + else: + self.rmdir(p) + + def mkdir(self, path: str, create_parents: bool = True, **kwargs: Any) -> None: + path = self._strip_protocol(path) + parent = self._parent(path) + if parent != self.root_marker and not self.exists(parent) and create_parents: + self.mkdir(parent, create_parents=create_parents) + + self.ftp.mkd(path) + self.invalidate_cache(self._parent(path)) + + def makedirs(self, path: str, exist_ok: bool = False) -> None: + path = self._strip_protocol(path) + if self.exists(path): + # NB: "/" does not "exist" as it has no directory entry + if not exist_ok: + raise FileExistsError(f"{path} exists without `exist_ok`") + # exists_ok=True -> no-op + else: + self.mkdir(path, create_parents=True) + + def rmdir(self, path): + path = self._strip_protocol(path) + self.ftp.rmd(path) + self.invalidate_cache(self._parent(path)) + + def mv(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1) + path2 = self._strip_protocol(path2) + self.ftp.rename(path1, path2) + self.invalidate_cache(self._parent(path1)) + self.invalidate_cache(self._parent(path2)) + + def __del__(self): + self.ftp.close() + + def invalidate_cache(self, path=None): + if path is None: + self.dircache.clear() + else: + self.dircache.pop(path, None) + super().invalidate_cache(path) + + +class TransferDone(Exception): + """Internal exception to break out of transfer""" + + pass + + +class FTPFile(AbstractBufferedFile): + """Interact with a remote FTP file with read/write buffering""" + + def __init__( + self, + fs, + path, + mode="rb", + block_size="default", + autocommit=True, + cache_type="readahead", + cache_options=None, + **kwargs, + ): + super().__init__( + fs, + path, + mode=mode, + block_size=block_size, + autocommit=autocommit, + cache_type=cache_type, + cache_options=cache_options, + **kwargs, + ) + if not autocommit: + self.target = self.path + self.path = "/".join([kwargs["tempdir"], str(uuid.uuid4())]) + + def commit(self): + self.fs.mv(self.path, self.target) + + def discard(self): + self.fs.rm(self.path) + + def _fetch_range(self, start, end): + """Get bytes between given byte limits + + Implemented by raising an exception in the fetch callback when the + number of bytes received reaches the requested amount. + + Will fail if the server does not respect the REST command on + retrieve requests. + """ + out = [] + total = [0] + + def callback(x): + total[0] += len(x) + if total[0] > end - start: + out.append(x[: (end - start) - total[0]]) + if end < self.size: + raise TransferDone + else: + out.append(x) + + if total[0] == end - start and end < self.size: + raise TransferDone + + try: + self.fs.ftp.retrbinary( + f"RETR {self.path}", + blocksize=self.blocksize, + rest=start, + callback=callback, + ) + except TransferDone: + try: + # stop transfer, we got enough bytes for this block + self.fs.ftp.abort() + self.fs.ftp.getmultiline() + except Error: + self.fs._connect() + + return b"".join(out) + + def _upload_chunk(self, final=False): + self.buffer.seek(0) + self.fs.ftp.storbinary( + f"STOR {self.path}", self.buffer, blocksize=self.blocksize, rest=self.offset + ) + return True + + +def _mlsd2(ftp, path="."): + """ + Fall back to using `dir` instead of `mlsd` if not supported. + + This parses a Linux style `ls -l` response to `dir`, but the response may + be platform dependent. + + Parameters + ---------- + ftp: ftplib.FTP + path: str + Expects to be given path, but defaults to ".". + """ + lines = [] + minfo = [] + ftp.dir(path, lines.append) + for line in lines: + split_line = line.split() + if len(split_line) < 9: + continue + this = ( + split_line[-1], + { + "modify": " ".join(split_line[5:8]), + "unix.owner": split_line[2], + "unix.group": split_line[3], + "unix.mode": split_line[0], + "size": split_line[4], + }, + ) + if "d" == this[1]["unix.mode"][0]: + this[1]["type"] = "dir" + else: + this[1]["type"] = "file" + minfo.append(this) + return minfo diff --git a/MLPY/Lib/site-packages/fsspec/implementations/git.py b/MLPY/Lib/site-packages/fsspec/implementations/git.py new file mode 100644 index 0000000000000000000000000000000000000000..7c34d93e08c20fc65421e5aa4bab53e8c683fee7 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/git.py @@ -0,0 +1,127 @@ +import os + +import pygit2 + +from fsspec.spec import AbstractFileSystem + +from .memory import MemoryFile + + +class GitFileSystem(AbstractFileSystem): + """Browse the files of a local git repo at any hash/tag/branch + + (experimental backend) + """ + + root_marker = "" + cachable = True + + def __init__(self, path=None, fo=None, ref=None, **kwargs): + """ + + Parameters + ---------- + path: str (optional) + Local location of the repo (uses current directory if not given). + May be deprecated in favour of ``fo``. When used with a higher + level function such as fsspec.open(), may be of the form + "git://[path-to-repo[:]][ref@]path/to/file" (but the actual + file path should not contain "@" or ":"). + fo: str (optional) + Same as ``path``, but passed as part of a chained URL. This one + takes precedence if both are given. + ref: str (optional) + Reference to work with, could be a hash, tag or branch name. Defaults + to current working tree. Note that ``ls`` and ``open`` also take hash, + so this becomes the default for those operations + kwargs + """ + super().__init__(**kwargs) + self.repo = pygit2.Repository(fo or path or os.getcwd()) + self.ref = ref or "master" + + @classmethod + def _strip_protocol(cls, path): + path = super()._strip_protocol(path).lstrip("/") + if ":" in path: + path = path.split(":", 1)[1] + if "@" in path: + path = path.split("@", 1)[1] + return path.lstrip("/") + + def _path_to_object(self, path, ref): + comm, ref = self.repo.resolve_refish(ref or self.ref) + parts = path.split("/") + tree = comm.tree + for part in parts: + if part and isinstance(tree, pygit2.Tree): + tree = tree[part] + return tree + + @staticmethod + def _get_kwargs_from_urls(path): + if path.startswith("git://"): + path = path[6:] + out = {} + if ":" in path: + out["path"], path = path.split(":", 1) + if "@" in path: + out["ref"], path = path.split("@", 1) + return out + + def ls(self, path, detail=True, ref=None, **kwargs): + path = self._strip_protocol(path) + tree = self._path_to_object(path, ref) + if isinstance(tree, pygit2.Tree): + out = [] + for obj in tree: + if isinstance(obj, pygit2.Tree): + out.append( + { + "type": "directory", + "name": "/".join([path, obj.name]).lstrip("/"), + "hex": obj.hex, + "mode": f"{obj.filemode:o}", + "size": 0, + } + ) + else: + out.append( + { + "type": "file", + "name": "/".join([path, obj.name]).lstrip("/"), + "hex": obj.hex, + "mode": f"{obj.filemode:o}", + "size": obj.size, + } + ) + else: + obj = tree + out = [ + { + "type": "file", + "name": obj.name, + "hex": obj.hex, + "mode": f"{obj.filemode:o}", + "size": obj.size, + } + ] + if detail: + return out + return [o["name"] for o in out] + + def ukey(self, path, ref=None): + return self.info(path, ref=ref)["hex"] + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + ref=None, + **kwargs, + ): + obj = self._path_to_object(path, ref or self.ref) + return MemoryFile(data=obj.data) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/github.py b/MLPY/Lib/site-packages/fsspec/implementations/github.py new file mode 100644 index 0000000000000000000000000000000000000000..3650b8ebaa4eae3caa75a5290305fefe0a80d30b --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/github.py @@ -0,0 +1,239 @@ +import requests + +import fsspec + +from ..spec import AbstractFileSystem +from ..utils import infer_storage_options +from .memory import MemoryFile + +# TODO: add GIST backend, would be very similar + + +class GithubFileSystem(AbstractFileSystem): + """Interface to files in github + + An instance of this class provides the files residing within a remote github + repository. You may specify a point in the repos history, by SHA, branch + or tag (default is current master). + + Given that code files tend to be small, and that github does not support + retrieving partial content, we always fetch whole files. + + When using fsspec.open, allows URIs of the form: + + - "github://path/file", in which case you must specify org, repo and + may specify sha in the extra args + - 'github://org:repo@/precip/catalog.yml', where the org and repo are + part of the URI + - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included + + ``sha`` can be the full or abbreviated hex of the commit you want to fetch + from, or a branch or tag name (so long as it doesn't contain special characters + like "/", "?", which would have to be HTTP-encoded). + + For authorised access, you must provide username and token, which can be made + at https://github.com/settings/tokens + """ + + url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}" + rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}" + protocol = "github" + timeout = (60, 60) # connect, read timeouts + + def __init__( + self, org, repo, sha=None, username=None, token=None, timeout=None, **kwargs + ): + super().__init__(**kwargs) + self.org = org + self.repo = repo + if (username is None) ^ (token is None): + raise ValueError("Auth required both username and token") + self.username = username + self.token = token + if timeout is not None: + self.timeout = timeout + if sha is None: + # look up default branch (not necessarily "master") + u = "https://api.github.com/repos/{org}/{repo}" + r = requests.get( + u.format(org=org, repo=repo), timeout=self.timeout, **self.kw + ) + r.raise_for_status() + sha = r.json()["default_branch"] + + self.root = sha + self.ls("") + + @property + def kw(self): + if self.username: + return {"auth": (self.username, self.token)} + return {} + + @classmethod + def repos(cls, org_or_user, is_org=True): + """List repo names for given org or user + + This may become the top level of the FS + + Parameters + ---------- + org_or_user: str + Name of the github org or user to query + is_org: bool (default True) + Whether the name is an organisation (True) or user (False) + + Returns + ------- + List of string + """ + r = requests.get( + f"https://api.github.com/{['users', 'orgs'][is_org]}/{org_or_user}/repos", + timeout=cls.timeout, + ) + r.raise_for_status() + return [repo["name"] for repo in r.json()] + + @property + def tags(self): + """Names of tags in the repo""" + r = requests.get( + f"https://api.github.com/repos/{self.org}/{self.repo}/tags", + timeout=self.timeout, + **self.kw, + ) + r.raise_for_status() + return [t["name"] for t in r.json()] + + @property + def branches(self): + """Names of branches in the repo""" + r = requests.get( + f"https://api.github.com/repos/{self.org}/{self.repo}/branches", + timeout=self.timeout, + **self.kw, + ) + r.raise_for_status() + return [t["name"] for t in r.json()] + + @property + def refs(self): + """Named references, tags and branches""" + return {"tags": self.tags, "branches": self.branches} + + def ls(self, path, detail=False, sha=None, _sha=None, **kwargs): + """List files at given path + + Parameters + ---------- + path: str + Location to list, relative to repo root + detail: bool + If True, returns list of dicts, one per file; if False, returns + list of full filenames only + sha: str (optional) + List at the given point in the repo history, branch or tag name or commit + SHA + _sha: str (optional) + List this specific tree object (used internally to descend into trees) + """ + path = self._strip_protocol(path) + if path == "": + _sha = sha or self.root + if _sha is None: + parts = path.rstrip("/").split("/") + so_far = "" + _sha = sha or self.root + for part in parts: + out = self.ls(so_far, True, sha=sha, _sha=_sha) + so_far += "/" + part if so_far else part + out = [o for o in out if o["name"] == so_far] + if not out: + raise FileNotFoundError(path) + out = out[0] + if out["type"] == "file": + if detail: + return [out] + else: + return path + _sha = out["sha"] + if path not in self.dircache or sha not in [self.root, None]: + r = requests.get( + self.url.format(org=self.org, repo=self.repo, sha=_sha), + timeout=self.timeout, + **self.kw, + ) + if r.status_code == 404: + raise FileNotFoundError(path) + r.raise_for_status() + types = {"blob": "file", "tree": "directory"} + out = [ + { + "name": path + "/" + f["path"] if path else f["path"], + "mode": f["mode"], + "type": types[f["type"]], + "size": f.get("size", 0), + "sha": f["sha"], + } + for f in r.json()["tree"] + if f["type"] in types + ] + if sha in [self.root, None]: + self.dircache[path] = out + else: + out = self.dircache[path] + if detail: + return out + else: + return sorted([f["name"] for f in out]) + + def invalidate_cache(self, path=None): + self.dircache.clear() + + @classmethod + def _strip_protocol(cls, path): + opts = infer_storage_options(path) + if "username" not in opts: + return super()._strip_protocol(path) + return opts["path"].lstrip("/") + + @staticmethod + def _get_kwargs_from_urls(path): + opts = infer_storage_options(path) + if "username" not in opts: + return {} + out = {"org": opts["username"], "repo": opts["password"]} + if opts["host"]: + out["sha"] = opts["host"] + return out + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + sha=None, + **kwargs, + ): + if mode != "rb": + raise NotImplementedError + url = self.rurl.format( + org=self.org, repo=self.repo, path=path, sha=sha or self.root + ) + r = requests.get(url, timeout=self.timeout, **self.kw) + if r.status_code == 404: + raise FileNotFoundError(path) + r.raise_for_status() + return MemoryFile(None, None, r.content) + + def cat(self, path, recursive=False, on_error="raise", **kwargs): + paths = self.expand_path(path, recursive=recursive) + urls = [ + self.rurl.format(org=self.org, repo=self.repo, path=u, sha=self.root) + for u, sh in paths + ] + fs = fsspec.filesystem("http") + data = fs.cat(urls, on_error="return") + return {u: v for ((k, v), u) in zip(data.items(), urls)} diff --git a/MLPY/Lib/site-packages/fsspec/implementations/http.py b/MLPY/Lib/site-packages/fsspec/implementations/http.py new file mode 100644 index 0000000000000000000000000000000000000000..c9ab177eb5451bde06243cd142d3ff87f7810f4b --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/http.py @@ -0,0 +1,872 @@ +import asyncio +import io +import logging +import re +import weakref +from copy import copy +from urllib.parse import urlparse + +import aiohttp +import yarl + +from fsspec.asyn import AbstractAsyncStreamedFile, AsyncFileSystem, sync, sync_wrapper +from fsspec.callbacks import DEFAULT_CALLBACK +from fsspec.exceptions import FSTimeoutError +from fsspec.spec import AbstractBufferedFile +from fsspec.utils import ( + DEFAULT_BLOCK_SIZE, + glob_translate, + isfilelike, + nullcontext, + tokenize, +) + +from ..caching import AllBytes + +# https://stackoverflow.com/a/15926317/3821154 +ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P[^"']+)""") +ex2 = re.compile(r"""(?Phttp[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""") +logger = logging.getLogger("fsspec.http") + + +async def get_client(**kwargs): + return aiohttp.ClientSession(**kwargs) + + +class HTTPFileSystem(AsyncFileSystem): + """ + Simple File-System for fetching data via HTTP(S) + + ``ls()`` is implemented by loading the parent page and doing a regex + match on the result. If simple_link=True, anything of the form + "http(s)://server.com/stuff?thing=other"; otherwise only links within + HTML href tags will be used. + """ + + sep = "/" + + def __init__( + self, + simple_links=True, + block_size=None, + same_scheme=True, + size_policy=None, + cache_type="bytes", + cache_options=None, + asynchronous=False, + loop=None, + client_kwargs=None, + get_client=get_client, + encoded=False, + **storage_options, + ): + """ + NB: if this is called async, you must await set_client + + Parameters + ---------- + block_size: int + Blocks to read bytes; if 0, will default to raw requests file-like + objects instead of HTTPFile instances + simple_links: bool + If True, will consider both HTML tags and anything that looks + like a URL; if False, will consider only the former. + same_scheme: True + When doing ls/glob, if this is True, only consider paths that have + http/https matching the input URLs. + size_policy: this argument is deprecated + client_kwargs: dict + Passed to aiohttp.ClientSession, see + https://docs.aiohttp.org/en/stable/client_reference.html + For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}`` + get_client: Callable[..., aiohttp.ClientSession] + A callable which takes keyword arguments and constructs + an aiohttp.ClientSession. It's state will be managed by + the HTTPFileSystem class. + storage_options: key-value + Any other parameters passed on to requests + cache_type, cache_options: defaults used in open + """ + super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options) + self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE + self.simple_links = simple_links + self.same_schema = same_scheme + self.cache_type = cache_type + self.cache_options = cache_options + self.client_kwargs = client_kwargs or {} + self.get_client = get_client + self.encoded = encoded + self.kwargs = storage_options + self._session = None + + # Clean caching-related parameters from `storage_options` + # before propagating them as `request_options` through `self.kwargs`. + # TODO: Maybe rename `self.kwargs` to `self.request_options` to make + # it clearer. + request_options = copy(storage_options) + self.use_listings_cache = request_options.pop("use_listings_cache", False) + request_options.pop("listings_expiry_time", None) + request_options.pop("max_paths", None) + request_options.pop("skip_instance_cache", None) + self.kwargs = request_options + + @property + def fsid(self): + return "http" + + def encode_url(self, url): + return yarl.URL(url, encoded=self.encoded) + + @staticmethod + def close_session(loop, session): + if loop is not None and loop.is_running(): + try: + sync(loop, session.close, timeout=0.1) + return + except (TimeoutError, FSTimeoutError, NotImplementedError): + pass + connector = getattr(session, "_connector", None) + if connector is not None: + # close after loop is dead + connector._close() + + async def set_session(self): + if self._session is None: + self._session = await self.get_client(loop=self.loop, **self.client_kwargs) + if not self.asynchronous: + weakref.finalize(self, self.close_session, self.loop, self._session) + return self._session + + @classmethod + def _strip_protocol(cls, path): + """For HTTP, we always want to keep the full URL""" + return path + + @classmethod + def _parent(cls, path): + # override, since _strip_protocol is different for URLs + par = super()._parent(path) + if len(par) > 7: # "http://..." + return par + return "" + + async def _ls_real(self, url, detail=True, **kwargs): + # ignoring URL-encoded arguments + kw = self.kwargs.copy() + kw.update(kwargs) + logger.debug(url) + session = await self.set_session() + async with session.get(self.encode_url(url), **self.kwargs) as r: + self._raise_not_found_for_status(r, url) + try: + text = await r.text() + if self.simple_links: + links = ex2.findall(text) + [u[2] for u in ex.findall(text)] + else: + links = [u[2] for u in ex.findall(text)] + except UnicodeDecodeError: + links = [] # binary, not HTML + out = set() + parts = urlparse(url) + for l in links: + if isinstance(l, tuple): + l = l[1] + if l.startswith("/") and len(l) > 1: + # absolute URL on this server + l = f"{parts.scheme}://{parts.netloc}{l}" + if l.startswith("http"): + if self.same_schema and l.startswith(url.rstrip("/") + "/"): + out.add(l) + elif l.replace("https", "http").startswith( + url.replace("https", "http").rstrip("/") + "/" + ): + # allowed to cross http <-> https + out.add(l) + else: + if l not in ["..", "../"]: + # Ignore FTP-like "parent" + out.add("/".join([url.rstrip("/"), l.lstrip("/")])) + if not out and url.endswith("/"): + out = await self._ls_real(url.rstrip("/"), detail=False) + if detail: + return [ + { + "name": u, + "size": None, + "type": "directory" if u.endswith("/") else "file", + } + for u in out + ] + else: + return sorted(out) + + async def _ls(self, url, detail=True, **kwargs): + if self.use_listings_cache and url in self.dircache: + out = self.dircache[url] + else: + out = await self._ls_real(url, detail=detail, **kwargs) + self.dircache[url] = out + return out + + ls = sync_wrapper(_ls) + + def _raise_not_found_for_status(self, response, url): + """ + Raises FileNotFoundError for 404s, otherwise uses raise_for_status. + """ + if response.status == 404: + raise FileNotFoundError(url) + response.raise_for_status() + + async def _cat_file(self, url, start=None, end=None, **kwargs): + kw = self.kwargs.copy() + kw.update(kwargs) + logger.debug(url) + + if start is not None or end is not None: + if start == end: + return b"" + headers = kw.pop("headers", {}).copy() + + headers["Range"] = await self._process_limits(url, start, end) + kw["headers"] = headers + session = await self.set_session() + async with session.get(self.encode_url(url), **kw) as r: + out = await r.read() + self._raise_not_found_for_status(r, url) + return out + + async def _get_file( + self, rpath, lpath, chunk_size=5 * 2**20, callback=DEFAULT_CALLBACK, **kwargs + ): + kw = self.kwargs.copy() + kw.update(kwargs) + logger.debug(rpath) + session = await self.set_session() + async with session.get(self.encode_url(rpath), **kw) as r: + try: + size = int(r.headers["content-length"]) + except (ValueError, KeyError): + size = None + + callback.set_size(size) + self._raise_not_found_for_status(r, rpath) + if isfilelike(lpath): + outfile = lpath + else: + outfile = open(lpath, "wb") # noqa: ASYNC101 + + try: + chunk = True + while chunk: + chunk = await r.content.read(chunk_size) + outfile.write(chunk) + callback.relative_update(len(chunk)) + finally: + if not isfilelike(lpath): + outfile.close() + + async def _put_file( + self, + lpath, + rpath, + chunk_size=5 * 2**20, + callback=DEFAULT_CALLBACK, + method="post", + **kwargs, + ): + async def gen_chunks(): + # Support passing arbitrary file-like objects + # and use them instead of streams. + if isinstance(lpath, io.IOBase): + context = nullcontext(lpath) + use_seek = False # might not support seeking + else: + context = open(lpath, "rb") # noqa: ASYNC101 + use_seek = True + + with context as f: + if use_seek: + callback.set_size(f.seek(0, 2)) + f.seek(0) + else: + callback.set_size(getattr(f, "size", None)) + + chunk = f.read(chunk_size) + while chunk: + yield chunk + callback.relative_update(len(chunk)) + chunk = f.read(chunk_size) + + kw = self.kwargs.copy() + kw.update(kwargs) + session = await self.set_session() + + method = method.lower() + if method not in ("post", "put"): + raise ValueError( + f"method has to be either 'post' or 'put', not: {method!r}" + ) + + meth = getattr(session, method) + async with meth(self.encode_url(rpath), data=gen_chunks(), **kw) as resp: + self._raise_not_found_for_status(resp, rpath) + + async def _exists(self, path, **kwargs): + kw = self.kwargs.copy() + kw.update(kwargs) + try: + logger.debug(path) + session = await self.set_session() + r = await session.get(self.encode_url(path), **kw) + async with r: + return r.status < 400 + except aiohttp.ClientError: + return False + + async def _isfile(self, path, **kwargs): + return await self._exists(path, **kwargs) + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=None, # XXX: This differs from the base class. + cache_type=None, + cache_options=None, + size=None, + **kwargs, + ): + """Make a file-like object + + Parameters + ---------- + path: str + Full URL with protocol + mode: string + must be "rb" + block_size: int or None + Bytes to download in one request; use instance value if None. If + zero, will return a streaming Requests file-like instance. + kwargs: key-value + Any other parameters, passed to requests calls + """ + if mode != "rb": + raise NotImplementedError + block_size = block_size if block_size is not None else self.block_size + kw = self.kwargs.copy() + kw["asynchronous"] = self.asynchronous + kw.update(kwargs) + size = size or self.info(path, **kwargs)["size"] + session = sync(self.loop, self.set_session) + if block_size and size: + return HTTPFile( + self, + path, + session=session, + block_size=block_size, + mode=mode, + size=size, + cache_type=cache_type or self.cache_type, + cache_options=cache_options or self.cache_options, + loop=self.loop, + **kw, + ) + else: + return HTTPStreamFile( + self, + path, + mode=mode, + loop=self.loop, + session=session, + **kw, + ) + + async def open_async(self, path, mode="rb", size=None, **kwargs): + session = await self.set_session() + if size is None: + try: + size = (await self._info(path, **kwargs))["size"] + except FileNotFoundError: + pass + return AsyncStreamFile( + self, + path, + loop=self.loop, + session=session, + size=size, + **kwargs, + ) + + def ukey(self, url): + """Unique identifier; assume HTTP files are static, unchanging""" + return tokenize(url, self.kwargs, self.protocol) + + async def _info(self, url, **kwargs): + """Get info of URL + + Tries to access location via HEAD, and then GET methods, but does + not fetch the data. + + It is possible that the server does not supply any size information, in + which case size will be given as None (and certain operations on the + corresponding file will not work). + """ + info = {} + session = await self.set_session() + + for policy in ["head", "get"]: + try: + info.update( + await _file_info( + self.encode_url(url), + size_policy=policy, + session=session, + **self.kwargs, + **kwargs, + ) + ) + if info.get("size") is not None: + break + except Exception as exc: + if policy == "get": + # If get failed, then raise a FileNotFoundError + raise FileNotFoundError(url) from exc + logger.debug("", exc_info=exc) + + return {"name": url, "size": None, **info, "type": "file"} + + async def _glob(self, path, maxdepth=None, **kwargs): + """ + Find files by glob-matching. + + This implementation is idntical to the one in AbstractFileSystem, + but "?" is not considered as a character for globbing, because it is + so common in URLs, often identifying the "query" part. + """ + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + import re + + ends_with_slash = path.endswith("/") # _strip_protocol strips trailing slash + path = self._strip_protocol(path) + append_slash_to_dirname = ends_with_slash or path.endswith(("/**", "/*")) + idx_star = path.find("*") if path.find("*") >= 0 else len(path) + idx_brace = path.find("[") if path.find("[") >= 0 else len(path) + + min_idx = min(idx_star, idx_brace) + + detail = kwargs.pop("detail", False) + + if not has_magic(path): + if await self._exists(path, **kwargs): + if not detail: + return [path] + else: + return {path: await self._info(path, **kwargs)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:min_idx]: + min_idx = path[:min_idx].rindex("/") + root = path[: min_idx + 1] + depth = path[min_idx + 1 :].count("/") + 1 + else: + root = "" + depth = path[min_idx + 1 :].count("/") + 1 + + if "**" in path: + if maxdepth is not None: + idx_double_stars = path.find("**") + depth_double_stars = path[idx_double_stars:].count("/") + 1 + depth = depth - depth_double_stars + maxdepth + else: + depth = None + + allpaths = await self._find( + root, maxdepth=depth, withdirs=True, detail=True, **kwargs + ) + + pattern = glob_translate(path + ("/" if ends_with_slash else "")) + pattern = re.compile(pattern) + + out = { + ( + p.rstrip("/") + if not append_slash_to_dirname + and info["type"] == "directory" + and p.endswith("/") + else p + ): info + for p, info in sorted(allpaths.items()) + if pattern.match(p.rstrip("/")) + } + + if detail: + return out + else: + return list(out) + + async def _isdir(self, path): + # override, since all URLs are (also) files + try: + return bool(await self._ls(path)) + except (FileNotFoundError, ValueError): + return False + + +class HTTPFile(AbstractBufferedFile): + """ + A file-like object pointing to a remove HTTP(S) resource + + Supports only reading, with read-ahead of a predermined block-size. + + In the case that the server does not supply the filesize, only reading of + the complete file in one go is supported. + + Parameters + ---------- + url: str + Full URL of the remote resource, including the protocol + session: aiohttp.ClientSession or None + All calls will be made within this session, to avoid restarting + connections where the server allows this + block_size: int or None + The amount of read-ahead to do, in bytes. Default is 5MB, or the value + configured for the FileSystem creating this file + size: None or int + If given, this is the size of the file in bytes, and we don't attempt + to call the server to find the value. + kwargs: all other key-values are passed to requests calls. + """ + + def __init__( + self, + fs, + url, + session=None, + block_size=None, + mode="rb", + cache_type="bytes", + cache_options=None, + size=None, + loop=None, + asynchronous=False, + **kwargs, + ): + if mode != "rb": + raise NotImplementedError("File mode not supported") + self.asynchronous = asynchronous + self.loop = loop + self.url = url + self.session = session + self.details = {"name": url, "size": size, "type": "file"} + super().__init__( + fs=fs, + path=url, + mode=mode, + block_size=block_size, + cache_type=cache_type, + cache_options=cache_options, + **kwargs, + ) + + def read(self, length=-1): + """Read bytes from file + + Parameters + ---------- + length: int + Read up to this many bytes. If negative, read all content to end of + file. If the server has not supplied the filesize, attempting to + read only part of the data will raise a ValueError. + """ + if ( + (length < 0 and self.loc == 0) # explicit read all + # but not when the size is known and fits into a block anyways + and not (self.size is not None and self.size <= self.blocksize) + ): + self._fetch_all() + if self.size is None: + if length < 0: + self._fetch_all() + else: + length = min(self.size - self.loc, length) + return super().read(length) + + async def async_fetch_all(self): + """Read whole file in one shot, without caching + + This is only called when position is still at zero, + and read() is called without a byte-count. + """ + logger.debug(f"Fetch all for {self}") + if not isinstance(self.cache, AllBytes): + r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs) + async with r: + r.raise_for_status() + out = await r.read() + self.cache = AllBytes( + size=len(out), fetcher=None, blocksize=None, data=out + ) + self.size = len(out) + + _fetch_all = sync_wrapper(async_fetch_all) + + def _parse_content_range(self, headers): + """Parse the Content-Range header""" + s = headers.get("Content-Range", "") + m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s) + if not m: + return None, None, None + + if m[1] == "*": + start = end = None + else: + start, end = [int(x) for x in m[1].split("-")] + total = None if m[2] == "*" else int(m[2]) + return start, end, total + + async def async_fetch_range(self, start, end): + """Download a block of data + + The expectation is that the server returns only the requested bytes, + with HTTP code 206. If this is not the case, we first check the headers, + and then stream the output - if the data size is bigger than we + requested, an exception is raised. + """ + logger.debug(f"Fetch range for {self}: {start}-{end}") + kwargs = self.kwargs.copy() + headers = kwargs.pop("headers", {}).copy() + headers["Range"] = f"bytes={start}-{end - 1}" + logger.debug(f"{self.url} : {headers['Range']}") + r = await self.session.get( + self.fs.encode_url(self.url), headers=headers, **kwargs + ) + async with r: + if r.status == 416: + # range request outside file + return b"" + r.raise_for_status() + + # If the server has handled the range request, it should reply + # with status 206 (partial content). But we'll guess that a suitable + # Content-Range header or a Content-Length no more than the + # requested range also mean we have got the desired range. + response_is_range = ( + r.status == 206 + or self._parse_content_range(r.headers)[0] == start + or int(r.headers.get("Content-Length", end + 1)) <= end - start + ) + + if response_is_range: + # partial content, as expected + out = await r.read() + elif start > 0: + raise ValueError( + "The HTTP server doesn't appear to support range requests. " + "Only reading this file from the beginning is supported. " + "Open with block_size=0 for a streaming file interface." + ) + else: + # Response is not a range, but we want the start of the file, + # so we can read the required amount anyway. + cl = 0 + out = [] + while True: + chunk = await r.content.read(2**20) + # data size unknown, let's read until we have enough + if chunk: + out.append(chunk) + cl += len(chunk) + if cl > end - start: + break + else: + break + out = b"".join(out)[: end - start] + return out + + _fetch_range = sync_wrapper(async_fetch_range) + + def __reduce__(self): + return ( + reopen, + ( + self.fs, + self.url, + self.mode, + self.blocksize, + self.cache.name if self.cache else "none", + self.size, + ), + ) + + +def reopen(fs, url, mode, blocksize, cache_type, size=None): + return fs.open( + url, mode=mode, block_size=blocksize, cache_type=cache_type, size=size + ) + + +magic_check = re.compile("([*[])") + + +def has_magic(s): + match = magic_check.search(s) + return match is not None + + +class HTTPStreamFile(AbstractBufferedFile): + def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs): + self.asynchronous = kwargs.pop("asynchronous", False) + self.url = url + self.loop = loop + self.session = session + if mode != "rb": + raise ValueError + self.details = {"name": url, "size": None} + super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs) + + async def cor(): + r = await self.session.get(self.fs.encode_url(url), **kwargs).__aenter__() + self.fs._raise_not_found_for_status(r, url) + return r + + self.r = sync(self.loop, cor) + self.loop = fs.loop + + def seek(self, loc, whence=0): + if loc == 0 and whence == 1: + return + if loc == self.loc and whence == 0: + return + raise ValueError("Cannot seek streaming HTTP file") + + async def _read(self, num=-1): + out = await self.r.content.read(num) + self.loc += len(out) + return out + + read = sync_wrapper(_read) + + async def _close(self): + self.r.close() + + def close(self): + asyncio.run_coroutine_threadsafe(self._close(), self.loop) + super().close() + + def __reduce__(self): + return reopen, (self.fs, self.url, self.mode, self.blocksize, self.cache.name) + + +class AsyncStreamFile(AbstractAsyncStreamedFile): + def __init__( + self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs + ): + self.url = url + self.session = session + self.r = None + if mode != "rb": + raise ValueError + self.details = {"name": url, "size": None} + self.kwargs = kwargs + super().__init__(fs=fs, path=url, mode=mode, cache_type="none") + self.size = size + + async def read(self, num=-1): + if self.r is None: + r = await self.session.get( + self.fs.encode_url(self.url), **self.kwargs + ).__aenter__() + self.fs._raise_not_found_for_status(r, self.url) + self.r = r + out = await self.r.content.read(num) + self.loc += len(out) + return out + + async def close(self): + if self.r is not None: + self.r.close() + self.r = None + await super().close() + + +async def get_range(session, url, start, end, file=None, **kwargs): + # explicit get a range when we know it must be safe + kwargs = kwargs.copy() + headers = kwargs.pop("headers", {}).copy() + headers["Range"] = f"bytes={start}-{end - 1}" + r = await session.get(url, headers=headers, **kwargs) + r.raise_for_status() + async with r: + out = await r.read() + if file: + with open(file, "r+b") as f: # noqa: ASYNC101 + f.seek(start) + f.write(out) + else: + return out + + +async def _file_info(url, session, size_policy="head", **kwargs): + """Call HEAD on the server to get details about the file (size/checksum etc.) + + Default operation is to explicitly allow redirects and use encoding + 'identity' (no compression) to get the true size of the target. + """ + logger.debug("Retrieve file size for %s", url) + kwargs = kwargs.copy() + ar = kwargs.pop("allow_redirects", True) + head = kwargs.get("headers", {}).copy() + head["Accept-Encoding"] = "identity" + kwargs["headers"] = head + + info = {} + if size_policy == "head": + r = await session.head(url, allow_redirects=ar, **kwargs) + elif size_policy == "get": + r = await session.get(url, allow_redirects=ar, **kwargs) + else: + raise TypeError(f'size_policy must be "head" or "get", got {size_policy}') + async with r: + r.raise_for_status() + + # TODO: + # recognise lack of 'Accept-Ranges', + # or 'Accept-Ranges': 'none' (not 'bytes') + # to mean streaming only, no random access => return None + if "Content-Length" in r.headers: + # Some servers may choose to ignore Accept-Encoding and return + # compressed content, in which case the returned size is unreliable. + if "Content-Encoding" not in r.headers or r.headers["Content-Encoding"] in [ + "identity", + "", + ]: + info["size"] = int(r.headers["Content-Length"]) + elif "Content-Range" in r.headers: + info["size"] = int(r.headers["Content-Range"].split("/")[1]) + + if "Content-Type" in r.headers: + info["mimetype"] = r.headers["Content-Type"].partition(";")[0] + + info["url"] = str(r.url) + + for checksum_field in ["ETag", "Content-MD5", "Digest"]: + if r.headers.get(checksum_field): + info[checksum_field] = r.headers[checksum_field] + + return info + + +async def _file_size(url, session=None, *args, **kwargs): + if session is None: + session = await get_client() + info = await _file_info(url, session=session, *args, **kwargs) + return info.get("size") + + +file_size = sync_wrapper(_file_size) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/jupyter.py b/MLPY/Lib/site-packages/fsspec/implementations/jupyter.py new file mode 100644 index 0000000000000000000000000000000000000000..2839f4c1feea56dddd54bdc00f0b884c8461d29e --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/jupyter.py @@ -0,0 +1,124 @@ +import base64 +import io +import re + +import requests + +import fsspec + + +class JupyterFileSystem(fsspec.AbstractFileSystem): + """View of the files as seen by a Jupyter server (notebook or lab)""" + + protocol = ("jupyter", "jlab") + + def __init__(self, url, tok=None, **kwargs): + """ + + Parameters + ---------- + url : str + Base URL of the server, like "http://127.0.0.1:8888". May include + token in the string, which is given by the process when starting up + tok : str + If the token is obtained separately, can be given here + kwargs + """ + if "?" in url: + if tok is None: + try: + tok = re.findall("token=([a-z0-9]+)", url)[0] + except IndexError as e: + raise ValueError("Could not determine token") from e + url = url.split("?", 1)[0] + self.url = url.rstrip("/") + "/api/contents" + self.session = requests.Session() + if tok: + self.session.headers["Authorization"] = f"token {tok}" + + super().__init__(**kwargs) + + def ls(self, path, detail=True, **kwargs): + path = self._strip_protocol(path) + r = self.session.get(f"{self.url}/{path}") + if r.status_code == 404: + return FileNotFoundError(path) + r.raise_for_status() + out = r.json() + + if out["type"] == "directory": + out = out["content"] + else: + out = [out] + for o in out: + o["name"] = o.pop("path") + o.pop("content") + if o["type"] == "notebook": + o["type"] = "file" + if detail: + return out + return [o["name"] for o in out] + + def cat_file(self, path, start=None, end=None, **kwargs): + path = self._strip_protocol(path) + r = self.session.get(f"{self.url}/{path}") + if r.status_code == 404: + return FileNotFoundError(path) + r.raise_for_status() + out = r.json() + if out["format"] == "text": + # data should be binary + b = out["content"].encode() + else: + b = base64.b64decode(out["content"]) + return b[start:end] + + def pipe_file(self, path, value, **_): + path = self._strip_protocol(path) + json = { + "name": path.rsplit("/", 1)[-1], + "path": path, + "size": len(value), + "content": base64.b64encode(value).decode(), + "format": "base64", + "type": "file", + } + self.session.put(f"{self.url}/{path}", json=json) + + def mkdir(self, path, create_parents=True, **kwargs): + path = self._strip_protocol(path) + if create_parents and "/" in path: + self.mkdir(path.rsplit("/", 1)[0], True) + json = { + "name": path.rsplit("/", 1)[-1], + "path": path, + "size": None, + "content": None, + "type": "directory", + } + self.session.put(f"{self.url}/{path}", json=json) + + def _rm(self, path): + path = self._strip_protocol(path) + self.session.delete(f"{self.url}/{path}") + + def _open(self, path, mode="rb", **kwargs): + path = self._strip_protocol(path) + if mode == "rb": + data = self.cat_file(path) + return io.BytesIO(data) + else: + return SimpleFileWriter(self, path, mode="wb") + + +class SimpleFileWriter(fsspec.spec.AbstractBufferedFile): + def _upload_chunk(self, final=False): + """Never uploads a chunk until file is done + + Not suitable for large files + """ + if final is False: + return False + self.buffer.seek(0) + data = self.buffer.read() + self.fs.pipe_file(self.path, data) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/libarchive.py b/MLPY/Lib/site-packages/fsspec/implementations/libarchive.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6f145352e1989e0477e259be02d8d7f4d729e2 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/libarchive.py @@ -0,0 +1,213 @@ +from contextlib import contextmanager +from ctypes import ( + CFUNCTYPE, + POINTER, + c_int, + c_longlong, + c_void_p, + cast, + create_string_buffer, +) + +import libarchive +import libarchive.ffi as ffi + +from fsspec import open_files +from fsspec.archive import AbstractArchiveFileSystem +from fsspec.implementations.memory import MemoryFile +from fsspec.utils import DEFAULT_BLOCK_SIZE + +# Libarchive requires seekable files or memory only for certain archive +# types. However, since we read the directory first to cache the contents +# and also allow random access to any file, the file-like object needs +# to be seekable no matter what. + +# Seek call-backs (not provided in the libarchive python wrapper) +SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int) +read_set_seek_callback = ffi.ffi( + "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int +) +new_api = hasattr(ffi, "NO_OPEN_CB") + + +@contextmanager +def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size): + """Read an archive from a seekable file-like object. + + The `file` object must support the standard `readinto` and 'seek' methods. + """ + buf = create_string_buffer(block_size) + buf_p = cast(buf, c_void_p) + + def read_func(archive_p, context, ptrptr): + # readinto the buffer, returns number of bytes read + length = file.readinto(buf) + # write the address of the buffer into the pointer + ptrptr = cast(ptrptr, POINTER(c_void_p)) + ptrptr[0] = buf_p + # tell libarchive how much data was written into the buffer + return length + + def seek_func(archive_p, context, offset, whence): + file.seek(offset, whence) + # tell libarchvie the current position + return file.tell() + + read_cb = ffi.READ_CALLBACK(read_func) + seek_cb = SEEK_CALLBACK(seek_func) + + if new_api: + open_cb = ffi.NO_OPEN_CB + close_cb = ffi.NO_CLOSE_CB + else: + open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB) + close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB) + + with libarchive.read.new_archive_read(format_name, filter_name) as archive_p: + read_set_seek_callback(archive_p, seek_cb) + ffi.read_open(archive_p, None, open_cb, read_cb, close_cb) + yield libarchive.read.ArchiveRead(archive_p) + + +class LibArchiveFileSystem(AbstractArchiveFileSystem): + """Compressed archives as a file-system (read-only) + + Supports the following formats: + tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar + Microsoft CAB, 7-Zip, WARC + + See the libarchive documentation for further restrictions. + https://www.libarchive.org/ + + Keeps file object open while instance lives. It only works in seekable + file-like objects. In case the filesystem does not support this kind of + file object, it is recommended to cache locally. + + This class is pickleable, but not necessarily thread-safe (depends on the + platform). See libarchive documentation for details. + """ + + root_marker = "" + protocol = "libarchive" + cachable = False + + def __init__( + self, + fo="", + mode="r", + target_protocol=None, + target_options=None, + block_size=DEFAULT_BLOCK_SIZE, + **kwargs, + ): + """ + Parameters + ---------- + fo: str or file-like + Contains ZIP, and must exist. If a str, will fetch file using + :meth:`~fsspec.open_files`, which must return one file exactly. + mode: str + Currently, only 'r' accepted + target_protocol: str (optional) + If ``fo`` is a string, this value can be used to override the + FS protocol inferred from a URL + target_options: dict (optional) + Kwargs passed when instantiating the target FS, if ``fo`` is + a string. + """ + super().__init__(self, **kwargs) + if mode != "r": + raise ValueError("Only read from archive files accepted") + if isinstance(fo, str): + files = open_files(fo, protocol=target_protocol, **(target_options or {})) + if len(files) != 1: + raise ValueError( + f'Path "{fo}" did not resolve to exactly one file: "{files}"' + ) + fo = files[0] + self.of = fo + self.fo = fo.__enter__() # the whole instance is a context + self.block_size = block_size + self.dir_cache = None + + @contextmanager + def _open_archive(self): + self.fo.seek(0) + with custom_reader(self.fo, block_size=self.block_size) as arc: + yield arc + + @classmethod + def _strip_protocol(cls, path): + # file paths are always relative to the archive root + return super()._strip_protocol(path).lstrip("/") + + def _get_dirs(self): + fields = { + "name": "pathname", + "size": "size", + "created": "ctime", + "mode": "mode", + "uid": "uid", + "gid": "gid", + "mtime": "mtime", + } + + if self.dir_cache is not None: + return + + self.dir_cache = {} + list_names = [] + with self._open_archive() as arc: + for entry in arc: + if not entry.isdir and not entry.isfile: + # Skip symbolic links, fifo entries, etc. + continue + self.dir_cache.update( + { + dirname: {"name": dirname, "size": 0, "type": "directory"} + for dirname in self._all_dirnames(set(entry.name)) + } + ) + f = {key: getattr(entry, fields[key]) for key in fields} + f["type"] = "directory" if entry.isdir else "file" + list_names.append(entry.name) + + self.dir_cache[f["name"]] = f + # libarchive does not seem to return an entry for the directories (at least + # not in all formats), so get the directories names from the files names + self.dir_cache.update( + { + dirname: {"name": dirname, "size": 0, "type": "directory"} + for dirname in self._all_dirnames(list_names) + } + ) + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + path = self._strip_protocol(path) + if mode != "rb": + raise NotImplementedError + + data = bytes() + with self._open_archive() as arc: + for entry in arc: + if entry.pathname != path: + continue + + if entry.size == 0: + # empty file, so there are no blocks + break + + for block in entry.get_blocks(entry.size): + data = block + break + else: + raise ValueError + return MemoryFile(fs=self, path=path, data=data) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/local.py b/MLPY/Lib/site-packages/fsspec/implementations/local.py new file mode 100644 index 0000000000000000000000000000000000000000..9881606f138f59ba88a4c2882ef7f7b5de06c122 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/local.py @@ -0,0 +1,467 @@ +import datetime +import io +import logging +import os +import os.path as osp +import shutil +import stat +import tempfile + +from fsspec import AbstractFileSystem +from fsspec.compression import compr +from fsspec.core import get_compression +from fsspec.utils import isfilelike, stringify_path + +logger = logging.getLogger("fsspec.local") + + +class LocalFileSystem(AbstractFileSystem): + """Interface to files on local storage + + Parameters + ---------- + auto_mkdir: bool + Whether, when opening a file, the directory containing it should + be created (if it doesn't already exist). This is assumed by pyarrow + code. + """ + + root_marker = "/" + protocol = "file", "local" + local_file = True + + def __init__(self, auto_mkdir=False, **kwargs): + super().__init__(**kwargs) + self.auto_mkdir = auto_mkdir + + @property + def fsid(self): + return "local" + + def mkdir(self, path, create_parents=True, **kwargs): + path = self._strip_protocol(path) + if self.exists(path): + raise FileExistsError(path) + if create_parents: + self.makedirs(path, exist_ok=True) + else: + os.mkdir(path, **kwargs) + + def makedirs(self, path, exist_ok=False): + path = self._strip_protocol(path) + os.makedirs(path, exist_ok=exist_ok) + + def rmdir(self, path): + path = self._strip_protocol(path) + os.rmdir(path) + + def ls(self, path, detail=False, **kwargs): + path = self._strip_protocol(path) + info = self.info(path) + if info["type"] == "directory": + with os.scandir(path) as it: + infos = [self.info(f) for f in it] + else: + infos = [info] + + if not detail: + return [i["name"] for i in infos] + return infos + + def info(self, path, **kwargs): + if isinstance(path, os.DirEntry): + # scandir DirEntry + out = path.stat(follow_symlinks=False) + link = path.is_symlink() + if path.is_dir(follow_symlinks=False): + t = "directory" + elif path.is_file(follow_symlinks=False): + t = "file" + else: + t = "other" + path = self._strip_protocol(path.path) + else: + # str or path-like + path = self._strip_protocol(path) + out = os.stat(path, follow_symlinks=False) + link = stat.S_ISLNK(out.st_mode) + if link: + out = os.stat(path, follow_symlinks=True) + if stat.S_ISDIR(out.st_mode): + t = "directory" + elif stat.S_ISREG(out.st_mode): + t = "file" + else: + t = "other" + result = { + "name": path, + "size": out.st_size, + "type": t, + "created": out.st_ctime, + "islink": link, + } + for field in ["mode", "uid", "gid", "mtime", "ino", "nlink"]: + result[field] = getattr(out, f"st_{field}") + if result["islink"]: + result["destination"] = os.readlink(path) + try: + out2 = os.stat(path, follow_symlinks=True) + result["size"] = out2.st_size + except OSError: + result["size"] = 0 + return result + + def lexists(self, path, **kwargs): + return osp.lexists(path) + + def cp_file(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1) + path2 = self._strip_protocol(path2) + if self.auto_mkdir: + self.makedirs(self._parent(path2), exist_ok=True) + if self.isfile(path1): + shutil.copyfile(path1, path2) + elif self.isdir(path1): + self.mkdirs(path2, exist_ok=True) + else: + raise FileNotFoundError(path1) + + def isfile(self, path): + path = self._strip_protocol(path) + return os.path.isfile(path) + + def isdir(self, path): + path = self._strip_protocol(path) + return os.path.isdir(path) + + def get_file(self, path1, path2, callback=None, **kwargs): + if isfilelike(path2): + with open(path1, "rb") as f: + shutil.copyfileobj(f, path2) + else: + return self.cp_file(path1, path2, **kwargs) + + def put_file(self, path1, path2, callback=None, **kwargs): + return self.cp_file(path1, path2, **kwargs) + + def mv(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1) + path2 = self._strip_protocol(path2) + shutil.move(path1, path2) + + def link(self, src, dst, **kwargs): + src = self._strip_protocol(src) + dst = self._strip_protocol(dst) + os.link(src, dst, **kwargs) + + def symlink(self, src, dst, **kwargs): + src = self._strip_protocol(src) + dst = self._strip_protocol(dst) + os.symlink(src, dst, **kwargs) + + def islink(self, path) -> bool: + return os.path.islink(self._strip_protocol(path)) + + def rm_file(self, path): + os.remove(self._strip_protocol(path)) + + def rm(self, path, recursive=False, maxdepth=None): + if not isinstance(path, list): + path = [path] + + for p in path: + p = self._strip_protocol(p) + if self.isdir(p): + if not recursive: + raise ValueError("Cannot delete directory, set recursive=True") + if osp.abspath(p) == os.getcwd(): + raise ValueError("Cannot delete current working directory") + shutil.rmtree(p) + else: + os.remove(p) + + def unstrip_protocol(self, name): + name = self._strip_protocol(name) # normalise for local/win/... + return f"file://{name}" + + def _open(self, path, mode="rb", block_size=None, **kwargs): + path = self._strip_protocol(path) + if self.auto_mkdir and "w" in mode: + self.makedirs(self._parent(path), exist_ok=True) + return LocalFileOpener(path, mode, fs=self, **kwargs) + + def touch(self, path, truncate=True, **kwargs): + path = self._strip_protocol(path) + if self.auto_mkdir: + self.makedirs(self._parent(path), exist_ok=True) + if self.exists(path): + os.utime(path, None) + else: + open(path, "a").close() + if truncate: + os.truncate(path, 0) + + def created(self, path): + info = self.info(path=path) + return datetime.datetime.fromtimestamp( + info["created"], tz=datetime.timezone.utc + ) + + def modified(self, path): + info = self.info(path=path) + return datetime.datetime.fromtimestamp(info["mtime"], tz=datetime.timezone.utc) + + @classmethod + def _parent(cls, path): + path = cls._strip_protocol(path) + if os.sep == "/": + # posix native + return path.rsplit("/", 1)[0] or "/" + else: + # NT + path_ = path.rsplit("/", 1)[0] + if len(path_) <= 3: + if path_[1:2] == ":": + # nt root (something like c:/) + return path_[0] + ":/" + # More cases may be required here + return path_ + + @classmethod + def _strip_protocol(cls, path): + path = stringify_path(path) + if path.startswith("file://"): + path = path[7:] + elif path.startswith("file:"): + path = path[5:] + elif path.startswith("local://"): + path = path[8:] + elif path.startswith("local:"): + path = path[6:] + + path = make_path_posix(path) + if os.sep != "/": + # This code-path is a stripped down version of + # > drive, path = ntpath.splitdrive(path) + if path[1:2] == ":": + # Absolute drive-letter path, e.g. X:\Windows + # Relative path with drive, e.g. X:Windows + drive, path = path[:2], path[2:] + elif path[:2] == "//": + # UNC drives, e.g. \\server\share or \\?\UNC\server\share + # Device drives, e.g. \\.\device or \\?\device + if (index1 := path.find("/", 2)) == -1 or ( + index2 := path.find("/", index1 + 1) + ) == -1: + drive, path = path, "" + else: + drive, path = path[:index2], path[index2:] + else: + # Relative path, e.g. Windows + drive = "" + + path = path.rstrip("/") or cls.root_marker + return drive + path + + else: + return path.rstrip("/") or cls.root_marker + + def _isfilestore(self): + # Inheriting from DaskFileSystem makes this False (S3, etc. were) + # the original motivation. But we are a posix-like file system. + # See https://github.com/dask/dask/issues/5526 + return True + + def chmod(self, path, mode): + path = stringify_path(path) + return os.chmod(path, mode) + + +def make_path_posix(path): + """Make path generic and absolute for current OS""" + if not isinstance(path, str): + if isinstance(path, (list, set, tuple)): + return type(path)(make_path_posix(p) for p in path) + else: + path = stringify_path(path) + if not isinstance(path, str): + raise TypeError(f"could not convert {path!r} to string") + if os.sep == "/": + # Native posix + if path.startswith("/"): + # most common fast case for posix + return path + elif path.startswith("~"): + return osp.expanduser(path) + elif path.startswith("./"): + path = path[2:] + elif path == ".": + path = "" + return f"{os.getcwd()}/{path}" + else: + # NT handling + if path[0:1] == "/" and path[2:3] == ":": + # path is like "/c:/local/path" + path = path[1:] + if path[1:2] == ":": + # windows full path like "C:\\local\\path" + if len(path) <= 3: + # nt root (something like c:/) + return path[0] + ":/" + path = path.replace("\\", "/") + return path + elif path[0:1] == "~": + return make_path_posix(osp.expanduser(path)) + elif path.startswith(("\\\\", "//")): + # windows UNC/DFS-style paths + return "//" + path[2:].replace("\\", "/") + elif path.startswith(("\\", "/")): + # windows relative path with root + path = path.replace("\\", "/") + return f"{osp.splitdrive(os.getcwd())[0]}{path}" + else: + path = path.replace("\\", "/") + if path.startswith("./"): + path = path[2:] + elif path == ".": + path = "" + return f"{make_path_posix(os.getcwd())}/{path}" + + +def trailing_sep(path): + """Return True if the path ends with a path separator. + + A forward slash is always considered a path separator, even on Operating + Systems that normally use a backslash. + """ + # TODO: if all incoming paths were posix-compliant then separator would + # always be a forward slash, simplifying this function. + # See https://github.com/fsspec/filesystem_spec/pull/1250 + return path.endswith(os.sep) or (os.altsep is not None and path.endswith(os.altsep)) + + +class LocalFileOpener(io.IOBase): + def __init__( + self, path, mode, autocommit=True, fs=None, compression=None, **kwargs + ): + logger.debug("open file: %s", path) + self.path = path + self.mode = mode + self.fs = fs + self.f = None + self.autocommit = autocommit + self.compression = get_compression(path, compression) + self.blocksize = io.DEFAULT_BUFFER_SIZE + self._open() + + def _open(self): + if self.f is None or self.f.closed: + if self.autocommit or "w" not in self.mode: + self.f = open(self.path, mode=self.mode) + if self.compression: + compress = compr[self.compression] + self.f = compress(self.f, mode=self.mode) + else: + # TODO: check if path is writable? + i, name = tempfile.mkstemp() + os.close(i) # we want normal open and normal buffered file + self.temp = name + self.f = open(name, mode=self.mode) + if "w" not in self.mode: + self.size = self.f.seek(0, 2) + self.f.seek(0) + self.f.size = self.size + + def _fetch_range(self, start, end): + # probably only used by cached FS + if "r" not in self.mode: + raise ValueError + self._open() + self.f.seek(start) + return self.f.read(end - start) + + def __setstate__(self, state): + self.f = None + loc = state.pop("loc", None) + self.__dict__.update(state) + if "r" in state["mode"]: + self.f = None + self._open() + self.f.seek(loc) + + def __getstate__(self): + d = self.__dict__.copy() + d.pop("f") + if "r" in self.mode: + d["loc"] = self.f.tell() + else: + if not self.f.closed: + raise ValueError("Cannot serialise open write-mode local file") + return d + + def commit(self): + if self.autocommit: + raise RuntimeError("Can only commit if not already set to autocommit") + shutil.move(self.temp, self.path) + + def discard(self): + if self.autocommit: + raise RuntimeError("Cannot discard if set to autocommit") + os.remove(self.temp) + + def readable(self) -> bool: + return True + + def writable(self) -> bool: + return "r" not in self.mode + + def read(self, *args, **kwargs): + return self.f.read(*args, **kwargs) + + def write(self, *args, **kwargs): + return self.f.write(*args, **kwargs) + + def tell(self, *args, **kwargs): + return self.f.tell(*args, **kwargs) + + def seek(self, *args, **kwargs): + return self.f.seek(*args, **kwargs) + + def seekable(self, *args, **kwargs): + return self.f.seekable(*args, **kwargs) + + def readline(self, *args, **kwargs): + return self.f.readline(*args, **kwargs) + + def readlines(self, *args, **kwargs): + return self.f.readlines(*args, **kwargs) + + def close(self): + return self.f.close() + + def truncate(self, size=None) -> int: + return self.f.truncate(size) + + @property + def closed(self): + return self.f.closed + + def fileno(self): + return self.raw.fileno() + + def flush(self) -> None: + self.f.flush() + + def __iter__(self): + return self.f.__iter__() + + def __getattr__(self, item): + return getattr(self.f, item) + + def __enter__(self): + self._incontext = True + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._incontext = False + self.f.__exit__(exc_type, exc_value, traceback) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/memory.py b/MLPY/Lib/site-packages/fsspec/implementations/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..83e7e74d6ceceaf6e75268923094bfdf56b72fc7 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/memory.py @@ -0,0 +1,303 @@ +from __future__ import annotations + +import logging +from datetime import datetime, timezone +from errno import ENOTEMPTY +from io import BytesIO +from pathlib import PurePath, PureWindowsPath +from typing import Any, ClassVar + +from fsspec import AbstractFileSystem +from fsspec.implementations.local import LocalFileSystem +from fsspec.utils import stringify_path + +logger = logging.getLogger("fsspec.memoryfs") + + +class MemoryFileSystem(AbstractFileSystem): + """A filesystem based on a dict of BytesIO objects + + This is a global filesystem so instances of this class all point to the same + in memory filesystem. + """ + + store: ClassVar[dict[str, Any]] = {} # global, do not overwrite! + pseudo_dirs = [""] # global, do not overwrite! + protocol = "memory" + root_marker = "/" + + @classmethod + def _strip_protocol(cls, path): + if isinstance(path, PurePath): + if isinstance(path, PureWindowsPath): + return LocalFileSystem._strip_protocol(path) + else: + path = stringify_path(path) + + if path.startswith("memory://"): + path = path[len("memory://") :] + if "::" in path or "://" in path: + return path.rstrip("/") + path = path.lstrip("/").rstrip("/") + return "/" + path if path else "" + + def ls(self, path, detail=True, **kwargs): + path = self._strip_protocol(path) + if path in self.store: + # there is a key with this exact name + if not detail: + return [path] + return [ + { + "name": path, + "size": self.store[path].size, + "type": "file", + "created": self.store[path].created.timestamp(), + } + ] + paths = set() + starter = path + "/" + out = [] + for p2 in tuple(self.store): + if p2.startswith(starter): + if "/" not in p2[len(starter) :]: + # exact child + out.append( + { + "name": p2, + "size": self.store[p2].size, + "type": "file", + "created": self.store[p2].created.timestamp(), + } + ) + elif len(p2) > len(starter): + # implied child directory + ppath = starter + p2[len(starter) :].split("/", 1)[0] + if ppath not in paths: + out = out or [] + out.append( + { + "name": ppath, + "size": 0, + "type": "directory", + } + ) + paths.add(ppath) + for p2 in self.pseudo_dirs: + if p2.startswith(starter): + if "/" not in p2[len(starter) :]: + # exact child pdir + if p2 not in paths: + out.append({"name": p2, "size": 0, "type": "directory"}) + paths.add(p2) + else: + # directory implied by deeper pdir + ppath = starter + p2[len(starter) :].split("/", 1)[0] + if ppath not in paths: + out.append({"name": ppath, "size": 0, "type": "directory"}) + paths.add(ppath) + if not out: + if path in self.pseudo_dirs: + # empty dir + return [] + raise FileNotFoundError(path) + if detail: + return out + return sorted([f["name"] for f in out]) + + def mkdir(self, path, create_parents=True, **kwargs): + path = self._strip_protocol(path) + if path in self.store or path in self.pseudo_dirs: + raise FileExistsError(path) + if self._parent(path).strip("/") and self.isfile(self._parent(path)): + raise NotADirectoryError(self._parent(path)) + if create_parents and self._parent(path).strip("/"): + try: + self.mkdir(self._parent(path), create_parents, **kwargs) + except FileExistsError: + pass + if path and path not in self.pseudo_dirs: + self.pseudo_dirs.append(path) + + def makedirs(self, path, exist_ok=False): + try: + self.mkdir(path, create_parents=True) + except FileExistsError: + if not exist_ok: + raise + + def pipe_file(self, path, value, **kwargs): + """Set the bytes of given file + + Avoids copies of the data if possible + """ + self.open(path, "wb", data=value) + + def rmdir(self, path): + path = self._strip_protocol(path) + if path == "": + # silently avoid deleting FS root + return + if path in self.pseudo_dirs: + if not self.ls(path): + self.pseudo_dirs.remove(path) + else: + raise OSError(ENOTEMPTY, "Directory not empty", path) + else: + raise FileNotFoundError(path) + + def info(self, path, **kwargs): + logger.debug("info: %s", path) + path = self._strip_protocol(path) + if path in self.pseudo_dirs or any( + p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs + ): + return { + "name": path, + "size": 0, + "type": "directory", + } + elif path in self.store: + filelike = self.store[path] + return { + "name": path, + "size": filelike.size, + "type": "file", + "created": getattr(filelike, "created", None), + } + else: + raise FileNotFoundError(path) + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + path = self._strip_protocol(path) + if path in self.pseudo_dirs: + raise IsADirectoryError(path) + parent = path + while len(parent) > 1: + parent = self._parent(parent) + if self.isfile(parent): + raise FileExistsError(parent) + if mode in ["rb", "ab", "r+b"]: + if path in self.store: + f = self.store[path] + if mode == "ab": + # position at the end of file + f.seek(0, 2) + else: + # position at the beginning of file + f.seek(0) + return f + else: + raise FileNotFoundError(path) + elif mode == "wb": + m = MemoryFile(self, path, kwargs.get("data")) + if not self._intrans: + m.commit() + return m + else: + name = self.__class__.__name__ + raise ValueError(f"unsupported file mode for {name}: {mode!r}") + + def cp_file(self, path1, path2, **kwargs): + path1 = self._strip_protocol(path1) + path2 = self._strip_protocol(path2) + if self.isfile(path1): + self.store[path2] = MemoryFile( + self, path2, self.store[path1].getvalue() + ) # implicit copy + elif self.isdir(path1): + if path2 not in self.pseudo_dirs: + self.pseudo_dirs.append(path2) + else: + raise FileNotFoundError(path1) + + def cat_file(self, path, start=None, end=None, **kwargs): + logger.debug("cat: %s", path) + path = self._strip_protocol(path) + try: + return bytes(self.store[path].getbuffer()[start:end]) + except KeyError: + raise FileNotFoundError(path) + + def _rm(self, path): + path = self._strip_protocol(path) + try: + del self.store[path] + except KeyError as e: + raise FileNotFoundError(path) from e + + def modified(self, path): + path = self._strip_protocol(path) + try: + return self.store[path].modified + except KeyError: + raise FileNotFoundError(path) + + def created(self, path): + path = self._strip_protocol(path) + try: + return self.store[path].created + except KeyError: + raise FileNotFoundError(path) + + def rm(self, path, recursive=False, maxdepth=None): + if isinstance(path, str): + path = self._strip_protocol(path) + else: + path = [self._strip_protocol(p) for p in path] + paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth) + for p in reversed(paths): + # If the expanded path doesn't exist, it is only because the expanded + # path was a directory that does not exist in self.pseudo_dirs. This + # is possible if you directly create files without making the + # directories first. + if not self.exists(p): + continue + if self.isfile(p): + self.rm_file(p) + else: + self.rmdir(p) + + +class MemoryFile(BytesIO): + """A BytesIO which can't close and works as a context manager + + Can initialise with data. Each path should only be active once at any moment. + + No need to provide fs, path if auto-committing (default) + """ + + def __init__(self, fs=None, path=None, data=None): + logger.debug("open file %s", path) + self.fs = fs + self.path = path + self.created = datetime.now(tz=timezone.utc) + self.modified = datetime.now(tz=timezone.utc) + if data: + super().__init__(data) + self.seek(0) + + @property + def size(self): + return self.getbuffer().nbytes + + def __enter__(self): + return self + + def close(self): + pass + + def discard(self): + pass + + def commit(self): + self.fs.store[self.path] = self + self.modified = datetime.now(tz=timezone.utc) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/reference.py b/MLPY/Lib/site-packages/fsspec/implementations/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..981e6989f7367d49c389a9eb9459439f9b2cf1bf --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/reference.py @@ -0,0 +1,1169 @@ +import base64 +import collections +import io +import itertools +import logging +import math +import os +from functools import lru_cache +from typing import TYPE_CHECKING + +import fsspec.core + +try: + import ujson as json +except ImportError: + if not TYPE_CHECKING: + import json + +from ..asyn import AsyncFileSystem +from ..callbacks import DEFAULT_CALLBACK +from ..core import filesystem, open, split_protocol +from ..utils import isfilelike, merge_offset_ranges, other_paths + +logger = logging.getLogger("fsspec.reference") + + +class ReferenceNotReachable(RuntimeError): + def __init__(self, reference, target, *args): + super().__init__(*args) + self.reference = reference + self.target = target + + def __str__(self): + return f'Reference "{self.reference}" failed to fetch target {self.target}' + + +def _first(d): + return list(d.values())[0] + + +def _prot_in_references(path, references): + ref = references.get(path) + if isinstance(ref, (list, tuple)): + return split_protocol(ref[0])[0] if ref[0] else ref[0] + + +def _protocol_groups(paths, references): + if isinstance(paths, str): + return {_prot_in_references(paths, references): [paths]} + out = {} + for path in paths: + protocol = _prot_in_references(path, references) + out.setdefault(protocol, []).append(path) + return out + + +class RefsValuesView(collections.abc.ValuesView): + def __iter__(self): + for val in self._mapping.zmetadata.values(): + yield json.dumps(val).encode() + yield from self._mapping._items.values() + for field in self._mapping.listdir(): + chunk_sizes = self._mapping._get_chunk_sizes(field) + if len(chunk_sizes) == 0: + yield self._mapping[field + "/0"] + continue + yield from self._mapping._generate_all_records(field) + + +class RefsItemsView(collections.abc.ItemsView): + def __iter__(self): + return zip(self._mapping.keys(), self._mapping.values()) + + +def ravel_multi_index(idx, sizes): + val = 0 + mult = 1 + for i, s in zip(idx[::-1], sizes[::-1]): + val += i * mult + mult *= s + return val + + +class LazyReferenceMapper(collections.abc.MutableMapping): + """This interface can be used to read/write references from Parquet stores. + It is not intended for other types of references. + It can be used with Kerchunk's MultiZarrToZarr method to combine + references into a parquet store. + Examples of this use-case can be found here: + https://fsspec.github.io/kerchunk/advanced.html?highlight=parquet#parquet-storage""" + + # import is class level to prevent numpy dep requirement for fsspec + @property + def np(self): + import numpy as np + + return np + + @property + def pd(self): + import pandas as pd + + return pd + + def __init__( + self, root, fs=None, out_root=None, cache_size=128, categorical_threshold=10 + ): + """ + + This instance will be writable, storing changes in memory until full partitions + are accumulated or .flush() is called. + + To create an empty lazy store, use .create() + + Parameters + ---------- + root : str + Root of parquet store + fs : fsspec.AbstractFileSystem + fsspec filesystem object, default is local filesystem. + cache_size : int, default=128 + Maximum size of LRU cache, where cache_size*record_size denotes + the total number of references that can be loaded in memory at once. + categorical_threshold : int + Encode urls as pandas.Categorical to reduce memory footprint if the ratio + of the number of unique urls to total number of refs for each variable + is greater than or equal to this number. (default 10) + """ + self.root = root + self.chunk_sizes = {} + self.out_root = out_root or self.root + self.cat_thresh = categorical_threshold + self.cache_size = cache_size + self.dirs = None + self.url = self.root + "/{field}/refs.{record}.parq" + # TODO: derive fs from `root` + self.fs = fsspec.filesystem("file") if fs is None else fs + + def __getattr__(self, item): + if item in ("_items", "record_size", "zmetadata"): + self.setup() + # avoid possible recursion if setup fails somehow + return self.__dict__[item] + raise AttributeError(item) + + def setup(self): + self._items = {} + self._items[".zmetadata"] = self.fs.cat_file( + "/".join([self.root, ".zmetadata"]) + ) + met = json.loads(self._items[".zmetadata"]) + self.record_size = met["record_size"] + self.zmetadata = met["metadata"] + + # Define function to open and decompress refs + @lru_cache(maxsize=self.cache_size) + def open_refs(field, record): + """cached parquet file loader""" + path = self.url.format(field=field, record=record) + data = io.BytesIO(self.fs.cat_file(path)) + df = self.pd.read_parquet(data, engine="fastparquet") + refs = {c: df[c].values for c in df.columns} + return refs + + self.open_refs = open_refs + + @staticmethod + def create(root, storage_options=None, fs=None, record_size=10000, **kwargs): + """Make empty parquet reference set + + First deletes the contents of the given directory, if it exists. + + Parameters + ---------- + root: str + Directory to contain the output; will be created + storage_options: dict | None + For making the filesystem to use for writing is fs is None + fs: FileSystem | None + Filesystem for writing + record_size: int + Number of references per parquet file + kwargs: passed to __init__ + + Returns + ------- + LazyReferenceMapper instance + """ + met = {"metadata": {}, "record_size": record_size} + if fs is None: + fs, root = fsspec.core.url_to_fs(root, **(storage_options or {})) + if fs.exists(root): + fs.rm(root, recursive=True) + fs.makedirs(root, exist_ok=True) + fs.pipe("/".join([root, ".zmetadata"]), json.dumps(met).encode()) + return LazyReferenceMapper(root, fs, **kwargs) + + def listdir(self, basename=True): + """List top-level directories""" + # cache me? + if self.dirs is None: + dirs = [p.split("/", 1)[0] for p in self.zmetadata] + self.dirs = {p for p in dirs if p and not p.startswith(".")} + listing = self.dirs + if basename: + listing = [os.path.basename(path) for path in listing] + return listing + + def ls(self, path="", detail=True): + """Shortcut file listings""" + if not path: + dirnames = self.listdir() + others = set( + [".zmetadata"] + + [name for name in self.zmetadata if "/" not in name] + + [name for name in self._items if "/" not in name] + ) + if detail is False: + others.update(dirnames) + return sorted(others) + dirinfo = [ + {"name": name, "type": "directory", "size": 0} for name in dirnames + ] + fileinfo = [ + { + "name": name, + "type": "file", + "size": len( + json.dumps(self.zmetadata[name]) + if name in self.zmetadata + else self._items[name] + ), + } + for name in others + ] + return sorted(dirinfo + fileinfo, key=lambda s: s["name"]) + parts = path.split("/", 1) + if len(parts) > 1: + raise FileNotFoundError("Cannot list within directories right now") + field = parts[0] + others = set( + [name for name in self.zmetadata if name.startswith(f"{path}/")] + + [name for name in self._items if name.startswith(f"{path}/")] + ) + fileinfo = [ + { + "name": name, + "type": "file", + "size": len( + json.dumps(self.zmetadata[name]) + if name in self.zmetadata + else self._items[name] + ), + } + for name in others + ] + keys = self._keys_in_field(field) + + if detail is False: + return list(others) + list(keys) + recs = self._generate_all_records(field) + recinfo = [ + {"name": name, "type": "file", "size": rec[-1]} + for name, rec in zip(keys, recs) + if rec[0] # filters out path==None, deleted/missing + ] + return fileinfo + recinfo + + def _load_one_key(self, key): + """Get the reference for one key + + Returns bytes, one-element list or three-element list. + """ + if key in self._items: + return self._items[key] + elif key in self.zmetadata: + return json.dumps(self.zmetadata[key]).encode() + elif "/" not in key or self._is_meta(key): + raise KeyError(key) + field, _ = key.rsplit("/", 1) + record, ri, chunk_size = self._key_to_record(key) + maybe = self._items.get((field, record), {}).get(ri, False) + if maybe is None: + # explicitly deleted + raise KeyError + elif maybe: + return maybe + elif chunk_size == 0: + return b"" + + # Chunk keys can be loaded from row group and cached in LRU cache + try: + refs = self.open_refs(field, record) + except (ValueError, TypeError, FileNotFoundError): + raise KeyError(key) + columns = ["path", "offset", "size", "raw"] + selection = [refs[c][ri] if c in refs else None for c in columns] + raw = selection[-1] + if raw is not None: + return raw + if selection[0] is None: + raise KeyError("This reference does not exist or has been deleted") + if selection[1:3] == [0, 0]: + # URL only + return selection[:1] + # URL, offset, size + return selection[:3] + + @lru_cache(4096) + def _key_to_record(self, key): + """Details needed to construct a reference for one key""" + field, chunk = key.rsplit("/", 1) + chunk_sizes = self._get_chunk_sizes(field) + if len(chunk_sizes) == 0: + return 0, 0, 0 + chunk_idx = [int(c) for c in chunk.split(".")] + chunk_number = ravel_multi_index(chunk_idx, chunk_sizes) + record = chunk_number // self.record_size + ri = chunk_number % self.record_size + return record, ri, len(chunk_sizes) + + def _get_chunk_sizes(self, field): + """The number of chunks along each axis for a given field""" + if field not in self.chunk_sizes: + zarray = self.zmetadata[f"{field}/.zarray"] + size_ratio = [ + math.ceil(s / c) for s, c in zip(zarray["shape"], zarray["chunks"]) + ] + self.chunk_sizes[field] = size_ratio or [1] + return self.chunk_sizes[field] + + def _generate_record(self, field, record): + """The references for a given parquet file of a given field""" + refs = self.open_refs(field, record) + it = iter(zip(*refs.values())) + if len(refs) == 3: + # All urls + return (list(t) for t in it) + elif len(refs) == 1: + # All raws + return refs["raw"] + else: + # Mix of urls and raws + return (list(t[:3]) if not t[3] else t[3] for t in it) + + def _generate_all_records(self, field): + """Load all the references within a field by iterating over the parquet files""" + nrec = 1 + for ch in self._get_chunk_sizes(field): + nrec *= ch + nrec = math.ceil(nrec / self.record_size) + for record in range(nrec): + yield from self._generate_record(field, record) + + def values(self): + return RefsValuesView(self) + + def items(self): + return RefsItemsView(self) + + def __hash__(self): + return id(self) + + def __getitem__(self, key): + return self._load_one_key(key) + + def __setitem__(self, key, value): + if "/" in key and not self._is_meta(key): + field, chunk = key.rsplit("/", 1) + record, i, _ = self._key_to_record(key) + subdict = self._items.setdefault((field, record), {}) + subdict[i] = value + if len(subdict) == self.record_size: + self.write(field, record) + else: + # metadata or top-level + self._items[key] = value + new_value = json.loads( + value.decode() if isinstance(value, bytes) else value + ) + self.zmetadata[key] = {**self.zmetadata.get(key, {}), **new_value} + + @staticmethod + def _is_meta(key): + return key.startswith(".z") or "/.z" in key + + def __delitem__(self, key): + if key in self._items: + del self._items[key] + elif key in self.zmetadata: + del self.zmetadata[key] + else: + if "/" in key and not self._is_meta(key): + field, _ = key.rsplit("/", 1) + record, i, _ = self._key_to_record(key) + subdict = self._items.setdefault((field, record), {}) + subdict[i] = None + if len(subdict) == self.record_size: + self.write(field, record) + else: + # metadata or top-level + self._items[key] = None + + def write(self, field, record, base_url=None, storage_options=None): + # extra requirements if writing + import kerchunk.df + import numpy as np + import pandas as pd + + partition = self._items[(field, record)] + original = False + if len(partition) < self.record_size: + try: + original = self.open_refs(field, record) + except IOError: + pass + + if original: + paths = original["path"] + offsets = original["offset"] + sizes = original["size"] + raws = original["raw"] + else: + paths = np.full(self.record_size, np.nan, dtype="O") + offsets = np.zeros(self.record_size, dtype="int64") + sizes = np.zeros(self.record_size, dtype="int64") + raws = np.full(self.record_size, np.nan, dtype="O") + for j, data in partition.items(): + if isinstance(data, list): + if ( + str(paths.dtype) == "category" + and data[0] not in paths.dtype.categories + ): + paths = paths.add_categories(data[0]) + paths[j] = data[0] + if len(data) > 1: + offsets[j] = data[1] + sizes[j] = data[2] + elif data is None: + # delete + paths[j] = None + offsets[j] = 0 + sizes[j] = 0 + raws[j] = None + else: + # this is the only call into kerchunk, could remove + raws[j] = kerchunk.df._proc_raw(data) + # TODO: only save needed columns + df = pd.DataFrame( + { + "path": paths, + "offset": offsets, + "size": sizes, + "raw": raws, + }, + copy=False, + ) + if df.path.count() / (df.path.nunique() or 1) > self.cat_thresh: + df["path"] = df["path"].astype("category") + object_encoding = {"raw": "bytes", "path": "utf8"} + has_nulls = ["path", "raw"] + + fn = f"{base_url or self.out_root}/{field}/refs.{record}.parq" + self.fs.mkdirs(f"{base_url or self.out_root}/{field}", exist_ok=True) + df.to_parquet( + fn, + engine="fastparquet", + storage_options=storage_options + or getattr(self.fs, "storage_options", None), + compression="zstd", + index=False, + stats=False, + object_encoding=object_encoding, + has_nulls=has_nulls, + # **kwargs, + ) + partition.clear() + self._items.pop((field, record)) + + def flush(self, base_url=None, storage_options=None): + """Output any modified or deleted keys + + Parameters + ---------- + base_url: str + Location of the output + """ + # write what we have so far and clear sub chunks + for thing in list(self._items): + if isinstance(thing, tuple): + field, record = thing + self.write( + field, + record, + base_url=base_url, + storage_options=storage_options, + ) + + # gather .zmetadata from self._items and write that too + for k in list(self._items): + if k != ".zmetadata" and ".z" in k: + self.zmetadata[k] = json.loads(self._items.pop(k)) + met = {"metadata": self.zmetadata, "record_size": self.record_size} + self._items[".zmetadata"] = json.dumps(met).encode() + self.fs.pipe( + "/".join([base_url or self.out_root, ".zmetadata"]), + self._items[".zmetadata"], + ) + + # TODO: only clear those that we wrote to? + self.open_refs.cache_clear() + + def __len__(self): + # Caveat: This counts expected references, not actual - but is fast + count = 0 + for field in self.listdir(): + if field.startswith("."): + count += 1 + else: + count += math.prod(self._get_chunk_sizes(field)) + count += len(self.zmetadata) # all metadata keys + # any other files not in reference partitions + count += sum(1 for _ in self._items if not isinstance(_, tuple)) + return count + + def __iter__(self): + # Caveat: returns only existing keys, so the number of these does not + # match len(self) + metas = set(self.zmetadata) + metas.update(self._items) + for bit in metas: + if isinstance(bit, str): + yield bit + for field in self.listdir(): + for k in self._keys_in_field(field): + if k in self: + yield k + + def __contains__(self, item): + try: + self._load_one_key(item) + return True + except KeyError: + return False + + def _keys_in_field(self, field): + """List key names in given field + + Produces strings like "field/x.y" appropriate from the chunking of the array + """ + chunk_sizes = self._get_chunk_sizes(field) + if len(chunk_sizes) == 0: + yield field + "/0" + return + inds = itertools.product(*(range(i) for i in chunk_sizes)) + for ind in inds: + yield field + "/" + ".".join([str(c) for c in ind]) + + +class ReferenceFileSystem(AsyncFileSystem): + """View byte ranges of some other file as a file system + Initial version: single file system target, which must support + async, and must allow start and end args in _cat_file. Later versions + may allow multiple arbitrary URLs for the targets. + This FileSystem is read-only. It is designed to be used with async + targets (for now). This FileSystem only allows whole-file access, no + ``open``. We do not get original file details from the target FS. + Configuration is by passing a dict of references at init, or a URL to + a JSON file containing the same; this dict + can also contain concrete data for some set of paths. + Reference dict format: + {path0: bytes_data, path1: (target_url, offset, size)} + https://github.com/fsspec/kerchunk/blob/main/README.md + """ + + protocol = "reference" + + def __init__( + self, + fo, + target=None, + ref_storage_args=None, + target_protocol=None, + target_options=None, + remote_protocol=None, + remote_options=None, + fs=None, + template_overrides=None, + simple_templates=True, + max_gap=64_000, + max_block=256_000_000, + cache_size=128, + **kwargs, + ): + """ + Parameters + ---------- + fo : dict or str + The set of references to use for this instance, with a structure as above. + If str referencing a JSON file, will use fsspec.open, in conjunction + with target_options and target_protocol to open and parse JSON at this + location. If a directory, then assume references are a set of parquet + files to be loaded lazily. + target : str + For any references having target_url as None, this is the default file + target to use + ref_storage_args : dict + If references is a str, use these kwargs for loading the JSON file. + Deprecated: use target_options instead. + target_protocol : str + Used for loading the reference file, if it is a path. If None, protocol + will be derived from the given path + target_options : dict + Extra FS options for loading the reference file ``fo``, if given as a path + remote_protocol : str + The protocol of the filesystem on which the references will be evaluated + (unless fs is provided). If not given, will be derived from the first + URL that has a protocol in the templates or in the references, in that + order. + remote_options : dict + kwargs to go with remote_protocol + fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict)) + Directly provide a file system(s): + - a single filesystem instance + - a dict of protocol:filesystem, where each value is either a filesystem + instance, or a dict of kwargs that can be used to create in + instance for the given protocol + + If this is given, remote_options and remote_protocol are ignored. + template_overrides : dict + Swap out any templates in the references file with these - useful for + testing. + simple_templates: bool + Whether templates can be processed with simple replace (True) or if + jinja is needed (False, much slower). All reference sets produced by + ``kerchunk`` are simple in this sense, but the spec allows for complex. + max_gap, max_block: int + For merging multiple concurrent requests to the same remote file. + Neighboring byte ranges will only be merged when their + inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0 + to only merge when it requires no extra bytes. Pass a negative + number to disable merging, appropriate for local target files. + Neighboring byte ranges will only be merged when the size of + the aggregated range is <= ``max_block``. Default is 256MB. + cache_size : int + Maximum size of LRU cache, where cache_size*record_size denotes + the total number of references that can be loaded in memory at once. + Only used for lazily loaded references. + kwargs : passed to parent class + """ + super().__init__(**kwargs) + self.target = target + self.template_overrides = template_overrides + self.simple_templates = simple_templates + self.templates = {} + self.fss = {} + self._dircache = {} + self.max_gap = max_gap + self.max_block = max_block + if isinstance(fo, str): + dic = dict( + **(ref_storage_args or target_options or {}), protocol=target_protocol + ) + ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic) + if ref_fs.isfile(fo2): + # text JSON + with fsspec.open(fo, "rb", **dic) as f: + logger.info("Read reference from URL %s", fo) + text = json.load(f) + self._process_references(text, template_overrides) + else: + # Lazy parquet refs + logger.info("Open lazy reference dict from URL %s", fo) + self.references = LazyReferenceMapper( + fo2, + fs=ref_fs, + cache_size=cache_size, + ) + else: + # dictionaries + self._process_references(fo, template_overrides) + if isinstance(fs, dict): + self.fss = { + k: ( + fsspec.filesystem(k.split(":", 1)[0], **opts) + if isinstance(opts, dict) + else opts + ) + for k, opts in fs.items() + } + if None not in self.fss: + self.fss[None] = filesystem("file") + return + if fs is not None: + # single remote FS + remote_protocol = ( + fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol + ) + self.fss[remote_protocol] = fs + + if remote_protocol is None: + # get single protocol from any templates + for ref in self.templates.values(): + if callable(ref): + ref = ref() + protocol, _ = fsspec.core.split_protocol(ref) + if protocol and protocol not in self.fss: + fs = filesystem(protocol, **(remote_options or {})) + self.fss[protocol] = fs + if remote_protocol is None: + # get single protocol from references + # TODO: warning here, since this can be very expensive? + for ref in self.references.values(): + if callable(ref): + ref = ref() + if isinstance(ref, list) and ref[0]: + protocol, _ = fsspec.core.split_protocol(ref[0]) + if protocol not in self.fss: + fs = filesystem(protocol, **(remote_options or {})) + self.fss[protocol] = fs + # only use first remote URL + break + + if remote_protocol and remote_protocol not in self.fss: + fs = filesystem(remote_protocol, **(remote_options or {})) + self.fss[remote_protocol] = fs + + self.fss[None] = fs or filesystem("file") # default one + + def _cat_common(self, path, start=None, end=None): + path = self._strip_protocol(path) + logger.debug(f"cat: {path}") + try: + part = self.references[path] + except KeyError: + raise FileNotFoundError(path) + if isinstance(part, str): + part = part.encode() + if isinstance(part, bytes): + logger.debug(f"Reference: {path}, type bytes") + if part.startswith(b"base64:"): + part = base64.b64decode(part[7:]) + return part, None, None + + if len(part) == 1: + logger.debug(f"Reference: {path}, whole file => {part}") + url = part[0] + start1, end1 = start, end + else: + url, start0, size = part + logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}") + end0 = start0 + size + + if start is not None: + if start >= 0: + start1 = start0 + start + else: + start1 = end0 + start + else: + start1 = start0 + if end is not None: + if end >= 0: + end1 = start0 + end + else: + end1 = end0 + end + else: + end1 = end0 + if url is None: + url = self.target + return url, start1, end1 + + async def _cat_file(self, path, start=None, end=None, **kwargs): + part_or_url, start0, end0 = self._cat_common(path, start=start, end=end) + if isinstance(part_or_url, bytes): + return part_or_url[start:end] + protocol, _ = split_protocol(part_or_url) + try: + await self.fss[protocol]._cat_file(part_or_url, start=start, end=end) + except Exception as e: + raise ReferenceNotReachable(path, part_or_url) from e + + def cat_file(self, path, start=None, end=None, **kwargs): + part_or_url, start0, end0 = self._cat_common(path, start=start, end=end) + if isinstance(part_or_url, bytes): + return part_or_url[start:end] + protocol, _ = split_protocol(part_or_url) + try: + return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0) + except Exception as e: + raise ReferenceNotReachable(path, part_or_url) from e + + def pipe_file(self, path, value, **_): + """Temporarily add binary data or reference as a file""" + self.references[path] = value + + async def _get_file(self, rpath, lpath, **kwargs): + if self.isdir(rpath): + return os.makedirs(lpath, exist_ok=True) + data = await self._cat_file(rpath) + with open(lpath, "wb") as f: + f.write(data) + + def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, **kwargs): + if self.isdir(rpath): + return os.makedirs(lpath, exist_ok=True) + data = self.cat_file(rpath, **kwargs) + callback.set_size(len(data)) + if isfilelike(lpath): + lpath.write(data) + else: + with open(lpath, "wb") as f: + f.write(data) + callback.absolute_update(len(data)) + + def get(self, rpath, lpath, recursive=False, **kwargs): + if recursive: + # trigger directory build + self.ls("") + rpath = self.expand_path(rpath, recursive=recursive) + fs = fsspec.filesystem("file", auto_mkdir=True) + targets = other_paths(rpath, lpath) + if recursive: + data = self.cat([r for r in rpath if not self.isdir(r)]) + else: + data = self.cat(rpath) + for remote, local in zip(rpath, targets): + if remote in data: + fs.pipe_file(local, data[remote]) + + def cat(self, path, recursive=False, on_error="raise", **kwargs): + if isinstance(path, str) and recursive: + raise NotImplementedError + if isinstance(path, list) and (recursive or any("*" in p for p in path)): + raise NotImplementedError + # TODO: if references is lazy, pre-fetch all paths in batch before access + proto_dict = _protocol_groups(path, self.references) + out = {} + for proto, paths in proto_dict.items(): + fs = self.fss[proto] + urls, starts, ends, valid_paths = [], [], [], [] + for p in paths: + # find references or label not-found. Early exit if any not + # found and on_error is "raise" + try: + u, s, e = self._cat_common(p) + except FileNotFoundError as err: + if on_error == "raise": + raise + if on_error != "omit": + out[p] = err + else: + urls.append(u) + starts.append(s) + ends.append(e) + valid_paths.append(p) + + # process references into form for merging + urls2 = [] + starts2 = [] + ends2 = [] + paths2 = [] + whole_files = set() + for u, s, e, p in zip(urls, starts, ends, valid_paths): + if isinstance(u, bytes): + # data + out[p] = u + elif s is None: + # whole file - limits are None, None, but no further + # entries take for this file + whole_files.add(u) + urls2.append(u) + starts2.append(s) + ends2.append(e) + paths2.append(p) + for u, s, e, p in zip(urls, starts, ends, valid_paths): + # second run to account for files that are to be loaded whole + if s is not None and u not in whole_files: + urls2.append(u) + starts2.append(s) + ends2.append(e) + paths2.append(p) + + # merge and fetch consolidated ranges + new_paths, new_starts, new_ends = merge_offset_ranges( + list(urls2), + list(starts2), + list(ends2), + sort=True, + max_gap=self.max_gap, + max_block=self.max_block, + ) + bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends) + + # unbundle from merged bytes - simple approach + for u, s, e, p in zip(urls, starts, ends, valid_paths): + if p in out: + continue # was bytes, already handled + for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out): + if np == u and (ns is None or ne is None): + if isinstance(b, Exception): + out[p] = b + else: + out[p] = b[s:e] + elif np == u and s >= ns and e <= ne: + if isinstance(b, Exception): + out[p] = b + else: + out[p] = b[s - ns : (e - ne) or None] + + for k, v in out.copy().items(): + # these were valid references, but fetch failed, so transform exc + if isinstance(v, Exception) and k in self.references: + ex = out[k] + new_ex = ReferenceNotReachable(k, self.references[k]) + new_ex.__cause__ = ex + if on_error == "raise": + raise new_ex + elif on_error != "omit": + out[k] = new_ex + + if len(out) == 1 and isinstance(path, str) and "*" not in path: + return _first(out) + return out + + def _process_references(self, references, template_overrides=None): + vers = references.get("version", None) + if vers is None: + self._process_references0(references) + elif vers == 1: + self._process_references1(references, template_overrides=template_overrides) + else: + raise ValueError(f"Unknown reference spec version: {vers}") + # TODO: we make dircache by iterating over all entries, but for Spec >= 1, + # can replace with programmatic. Is it even needed for mapper interface? + + def _process_references0(self, references): + """Make reference dict for Spec Version 0""" + if isinstance(references, dict): + # do not do this for lazy/parquet backend, which will not make dicts, + # but must remain writable in the original object + references = { + key: json.dumps(val) if isinstance(val, dict) else val + for key, val in references.items() + } + self.references = references + + def _process_references1(self, references, template_overrides=None): + if not self.simple_templates or self.templates: + import jinja2 + self.references = {} + self._process_templates(references.get("templates", {})) + + @lru_cache(1000) + def _render_jinja(u): + return jinja2.Template(u).render(**self.templates) + + for k, v in references.get("refs", {}).items(): + if isinstance(v, str): + if v.startswith("base64:"): + self.references[k] = base64.b64decode(v[7:]) + self.references[k] = v + elif isinstance(v, dict): + self.references[k] = json.dumps(v) + elif self.templates: + u = v[0] + if "{{" in u: + if self.simple_templates: + u = ( + u.replace("{{", "{") + .replace("}}", "}") + .format(**self.templates) + ) + else: + u = _render_jinja(u) + self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]] + else: + self.references[k] = v + self.references.update(self._process_gen(references.get("gen", []))) + + def _process_templates(self, tmp): + self.templates = {} + if self.template_overrides is not None: + tmp.update(self.template_overrides) + for k, v in tmp.items(): + if "{{" in v: + import jinja2 + + self.templates[k] = lambda temp=v, **kwargs: jinja2.Template( + temp + ).render(**kwargs) + else: + self.templates[k] = v + + def _process_gen(self, gens): + out = {} + for gen in gens: + dimension = { + k: v + if isinstance(v, list) + else range(v.get("start", 0), v["stop"], v.get("step", 1)) + for k, v in gen["dimensions"].items() + } + products = ( + dict(zip(dimension.keys(), values)) + for values in itertools.product(*dimension.values()) + ) + for pr in products: + import jinja2 + + key = jinja2.Template(gen["key"]).render(**pr, **self.templates) + url = jinja2.Template(gen["url"]).render(**pr, **self.templates) + if ("offset" in gen) and ("length" in gen): + offset = int( + jinja2.Template(gen["offset"]).render(**pr, **self.templates) + ) + length = int( + jinja2.Template(gen["length"]).render(**pr, **self.templates) + ) + out[key] = [url, offset, length] + elif ("offset" in gen) ^ ("length" in gen): + raise ValueError( + "Both 'offset' and 'length' are required for a " + "reference generator entry if either is provided." + ) + else: + out[key] = [url] + return out + + def _dircache_from_items(self): + self.dircache = {"": []} + it = self.references.items() + for path, part in it: + if isinstance(part, (bytes, str)): + size = len(part) + elif len(part) == 1: + size = None + else: + _, _, size = part + par = path.rsplit("/", 1)[0] if "/" in path else "" + par0 = par + subdirs = [par0] + while par0 and par0 not in self.dircache: + # collect parent directories + par0 = self._parent(par0) + subdirs.append(par0) + + subdirs.reverse() + for parent, child in zip(subdirs, subdirs[1:]): + # register newly discovered directories + assert child not in self.dircache + assert parent in self.dircache + self.dircache[parent].append( + {"name": child, "type": "directory", "size": 0} + ) + self.dircache[child] = [] + + self.dircache[par].append({"name": path, "type": "file", "size": size}) + + def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs): + data = self.cat_file(path) # load whole chunk into memory + return io.BytesIO(data) + + def ls(self, path, detail=True, **kwargs): + path = self._strip_protocol(path) + if isinstance(self.references, LazyReferenceMapper): + try: + return self.references.ls(path, detail) + except KeyError: + pass + raise FileNotFoundError(f"'{path}' is not a known key") + if not self.dircache: + self._dircache_from_items() + out = self._ls_from_cache(path) + if out is None: + raise FileNotFoundError(path) + if detail: + return out + return [o["name"] for o in out] + + def exists(self, path, **kwargs): # overwrite auto-sync version + return self.isdir(path) or self.isfile(path) + + def isdir(self, path): # overwrite auto-sync version + if self.dircache: + return path in self.dircache + elif isinstance(self.references, LazyReferenceMapper): + return path in self.references.listdir("") + else: + # this may be faster than building dircache for single calls, but + # by looping will be slow for many calls; could cache it? + return any(_.startswith(f"{path}/") for _ in self.references) + + def isfile(self, path): # overwrite auto-sync version + return path in self.references + + async def _ls(self, path, detail=True, **kwargs): # calls fast sync code + return self.ls(path, detail, **kwargs) + + def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + if withdirs: + return super().find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs + ) + if path: + path = self._strip_protocol(path) + r = sorted(k for k in self.references if k.startswith(path)) + else: + r = sorted(self.references) + if detail: + if not self.dircache: + self._dircache_from_items() + return {k: self._ls_from_cache(k)[0] for k in r} + else: + return r + + def info(self, path, **kwargs): + out = self.references.get(path) + if out is not None: + if isinstance(out, (str, bytes)): + # decode base64 here + return {"name": path, "type": "file", "size": len(out)} + elif len(out) > 1: + return {"name": path, "type": "file", "size": out[2]} + else: + out0 = [{"name": path, "type": "file", "size": None}] + else: + out = self.ls(path, True) + out0 = [o for o in out if o["name"] == path] + if not out0: + return {"name": path, "type": "directory", "size": 0} + if out0[0]["size"] is None: + # if this is a whole remote file, update size using remote FS + prot, _ = split_protocol(self.references[path][0]) + out0[0]["size"] = self.fss[prot].size(self.references[path][0]) + return out0[0] + + async def _info(self, path, **kwargs): # calls fast sync code + return self.info(path) + + async def _rm_file(self, path, **kwargs): + self.references.pop( + path, None + ) # ignores FileNotFound, just as well for directories + self.dircache.clear() # this is a bit heavy handed + + async def _pipe_file(self, path, data): + # can be str or bytes + self.references[path] = data + self.dircache.clear() # this is a bit heavy handed + + async def _put_file(self, lpath, rpath, **kwargs): + # puts binary + with open(lpath, "rb") as f: + self.references[rpath] = f.read() + self.dircache.clear() # this is a bit heavy handed + + def save_json(self, url, **storage_options): + """Write modified references into new location""" + out = {} + for k, v in self.references.items(): + if isinstance(v, bytes): + try: + out[k] = v.decode("ascii") + except UnicodeDecodeError: + out[k] = (b"base64:" + base64.b64encode(v)).decode() + else: + out[k] = v + with fsspec.open(url, "wb", **storage_options) as f: + f.write(json.dumps({"version": 1, "refs": out}).encode()) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/sftp.py b/MLPY/Lib/site-packages/fsspec/implementations/sftp.py new file mode 100644 index 0000000000000000000000000000000000000000..77f7b370cd246f9a9bfd34141afc3edd728d13c3 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/sftp.py @@ -0,0 +1,180 @@ +import datetime +import logging +import os +import types +import uuid +from stat import S_ISDIR, S_ISLNK + +import paramiko + +from .. import AbstractFileSystem +from ..utils import infer_storage_options + +logger = logging.getLogger("fsspec.sftp") + + +class SFTPFileSystem(AbstractFileSystem): + """Files over SFTP/SSH + + Peer-to-peer filesystem over SSH using paramiko. + + Note: if using this with the ``open`` or ``open_files``, with full URLs, + there is no way to tell if a path is relative, so all paths are assumed + to be absolute. + """ + + protocol = "sftp", "ssh" + + def __init__(self, host, **ssh_kwargs): + """ + + Parameters + ---------- + host: str + Hostname or IP as a string + temppath: str + Location on the server to put files, when within a transaction + ssh_kwargs: dict + Parameters passed on to connection. See details in + https://docs.paramiko.org/en/3.3/api/client.html#paramiko.client.SSHClient.connect + May include port, username, password... + """ + if self._cached: + return + super().__init__(**ssh_kwargs) + self.temppath = ssh_kwargs.pop("temppath", "/tmp") # remote temp directory + self.host = host + self.ssh_kwargs = ssh_kwargs + self._connect() + + def _connect(self): + logger.debug("Connecting to SFTP server %s", self.host) + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.client.connect(self.host, **self.ssh_kwargs) + self.ftp = self.client.open_sftp() + + @classmethod + def _strip_protocol(cls, path): + return infer_storage_options(path)["path"] + + @staticmethod + def _get_kwargs_from_urls(urlpath): + out = infer_storage_options(urlpath) + out.pop("path", None) + out.pop("protocol", None) + return out + + def mkdir(self, path, create_parents=True, mode=511): + logger.debug("Creating folder %s", path) + if self.exists(path): + raise FileExistsError(f"File exists: {path}") + + if create_parents: + self.makedirs(path) + else: + self.ftp.mkdir(path, mode) + + def makedirs(self, path, exist_ok=False, mode=511): + if self.exists(path) and not exist_ok: + raise FileExistsError(f"File exists: {path}") + + parts = path.split("/") + new_path = "/" if path[:1] == "/" else "" + + for part in parts: + if part: + new_path = f"{new_path}/{part}" if new_path else part + if not self.exists(new_path): + self.ftp.mkdir(new_path, mode) + + def rmdir(self, path): + logger.debug("Removing folder %s", path) + self.ftp.rmdir(path) + + def info(self, path): + stat = self._decode_stat(self.ftp.stat(path)) + stat["name"] = path + return stat + + @staticmethod + def _decode_stat(stat, parent_path=None): + if S_ISDIR(stat.st_mode): + t = "directory" + elif S_ISLNK(stat.st_mode): + t = "link" + else: + t = "file" + out = { + "name": "", + "size": stat.st_size, + "type": t, + "uid": stat.st_uid, + "gid": stat.st_gid, + "time": datetime.datetime.fromtimestamp( + stat.st_atime, tz=datetime.timezone.utc + ), + "mtime": datetime.datetime.fromtimestamp( + stat.st_mtime, tz=datetime.timezone.utc + ), + } + if parent_path: + out["name"] = "/".join([parent_path.rstrip("/"), stat.filename]) + return out + + def ls(self, path, detail=False): + logger.debug("Listing folder %s", path) + stats = [self._decode_stat(stat, path) for stat in self.ftp.listdir_iter(path)] + if detail: + return stats + else: + paths = [stat["name"] for stat in stats] + return sorted(paths) + + def put(self, lpath, rpath, callback=None, **kwargs): + logger.debug("Put file %s into %s", lpath, rpath) + self.ftp.put(lpath, rpath) + + def get_file(self, rpath, lpath, **kwargs): + if self.isdir(rpath): + os.makedirs(lpath, exist_ok=True) + else: + self.ftp.get(self._strip_protocol(rpath), lpath) + + def _open(self, path, mode="rb", block_size=None, **kwargs): + """ + block_size: int or None + If 0, no buffering, if 1, line buffering, if >1, buffer that many + bytes, if None use default from paramiko. + """ + logger.debug("Opening file %s", path) + if kwargs.get("autocommit", True) is False: + # writes to temporary file, move on commit + path2 = "/".join([self.temppath, str(uuid.uuid4())]) + f = self.ftp.open(path2, mode, bufsize=block_size if block_size else -1) + f.temppath = path2 + f.targetpath = path + f.fs = self + f.commit = types.MethodType(commit_a_file, f) + f.discard = types.MethodType(discard_a_file, f) + else: + f = self.ftp.open(path, mode, bufsize=block_size if block_size else -1) + return f + + def _rm(self, path): + if self.isdir(path): + self.ftp.rmdir(path) + else: + self.ftp.remove(path) + + def mv(self, old, new): + logger.debug("Renaming %s into %s", old, new) + self.ftp.posix_rename(old, new) + + +def commit_a_file(self): + self.fs.mv(self.temppath, self.targetpath) + + +def discard_a_file(self): + self.fs._rm(self.temppath) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/smb.py b/MLPY/Lib/site-packages/fsspec/implementations/smb.py new file mode 100644 index 0000000000000000000000000000000000000000..bcd13a638a4d3756f4a76c315b95804397eac48f --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/smb.py @@ -0,0 +1,343 @@ +""" +This module contains SMBFileSystem class responsible for handling access to +Windows Samba network shares by using package smbprotocol +""" + +import datetime +import uuid +from stat import S_ISDIR, S_ISLNK + +import smbclient + +from .. import AbstractFileSystem +from ..utils import infer_storage_options + +# ! pylint: disable=bad-continuation + + +class SMBFileSystem(AbstractFileSystem): + """Allow reading and writing to Windows and Samba network shares. + + When using `fsspec.open()` for getting a file-like object the URI + should be specified as this format: + ``smb://workgroup;user:password@server:port/share/folder/file.csv``. + + Example:: + + >>> import fsspec + >>> with fsspec.open( + ... 'smb://myuser:mypassword@myserver.com/' 'share/folder/file.csv' + ... ) as smbfile: + ... df = pd.read_csv(smbfile, sep='|', header=None) + + Note that you need to pass in a valid hostname or IP address for the host + component of the URL. Do not use the Windows/NetBIOS machine name for the + host component. + + The first component of the path in the URL points to the name of the shared + folder. Subsequent path components will point to the directory/folder/file. + + The URL components ``workgroup`` , ``user``, ``password`` and ``port`` may be + optional. + + .. note:: + + For working this source require `smbprotocol`_ to be installed, e.g.:: + + $ pip install smbprotocol + # or + # pip install smbprotocol[kerberos] + + .. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements + + Note: if using this with the ``open`` or ``open_files``, with full URLs, + there is no way to tell if a path is relative, so all paths are assumed + to be absolute. + """ + + protocol = "smb" + + # pylint: disable=too-many-arguments + def __init__( + self, + host, + port=None, + username=None, + password=None, + timeout=60, + encrypt=None, + share_access=None, + register_session_retries=5, + auto_mkdir=False, + **kwargs, + ): + """ + You can use _get_kwargs_from_urls to get some kwargs from + a reasonable SMB url. + + Authentication will be anonymous or integrated if username/password are not + given. + + Parameters + ---------- + host: str + The remote server name/ip to connect to + port: int or None + Port to connect with. Usually 445, sometimes 139. + username: str or None + Username to connect with. Required if Kerberos auth is not being used. + password: str or None + User's password on the server, if using username + timeout: int + Connection timeout in seconds + encrypt: bool + Whether to force encryption or not, once this has been set to True + the session cannot be changed back to False. + share_access: str or None + Specifies the default access applied to file open operations + performed with this file system object. + This affects whether other processes can concurrently open a handle + to the same file. + + - None (the default): exclusively locks the file until closed. + - 'r': Allow other handles to be opened with read access. + - 'w': Allow other handles to be opened with write access. + - 'd': Allow other handles to be opened with delete access. + auto_mkdir: bool + Whether, when opening a file, the directory containing it should + be created (if it doesn't already exist). This is assumed by pyarrow + and zarr-python code. + """ + super().__init__(**kwargs) + self.host = host + self.port = port + self.username = username + self.password = password + self.timeout = timeout + self.encrypt = encrypt + self.temppath = kwargs.pop("temppath", "") + self.share_access = share_access + self.register_session_retries = register_session_retries + self.auto_mkdir = auto_mkdir + self._connect() + + @property + def _port(self): + return 445 if self.port is None else self.port + + def _connect(self): + import time + + for _ in range(self.register_session_retries): + try: + smbclient.register_session( + self.host, + username=self.username, + password=self.password, + port=self._port, + encrypt=self.encrypt, + connection_timeout=self.timeout, + ) + break + except Exception: + time.sleep(0.1) + + @classmethod + def _strip_protocol(cls, path): + return infer_storage_options(path)["path"] + + @staticmethod + def _get_kwargs_from_urls(path): + # smb://workgroup;user:password@host:port/share/folder/file.csv + out = infer_storage_options(path) + out.pop("path", None) + out.pop("protocol", None) + return out + + def mkdir(self, path, create_parents=True, **kwargs): + wpath = _as_unc_path(self.host, path) + if create_parents: + smbclient.makedirs(wpath, exist_ok=False, port=self._port, **kwargs) + else: + smbclient.mkdir(wpath, port=self._port, **kwargs) + + def makedirs(self, path, exist_ok=False): + if _share_has_path(path): + wpath = _as_unc_path(self.host, path) + smbclient.makedirs(wpath, exist_ok=exist_ok, port=self._port) + + def rmdir(self, path): + if _share_has_path(path): + wpath = _as_unc_path(self.host, path) + smbclient.rmdir(wpath, port=self._port) + + def info(self, path, **kwargs): + wpath = _as_unc_path(self.host, path) + stats = smbclient.stat(wpath, port=self._port, **kwargs) + if S_ISDIR(stats.st_mode): + stype = "directory" + elif S_ISLNK(stats.st_mode): + stype = "link" + else: + stype = "file" + res = { + "name": path + "/" if stype == "directory" else path, + "size": stats.st_size, + "type": stype, + "uid": stats.st_uid, + "gid": stats.st_gid, + "time": stats.st_atime, + "mtime": stats.st_mtime, + } + return res + + def created(self, path): + """Return the created timestamp of a file as a datetime.datetime""" + wpath = _as_unc_path(self.host, path) + stats = smbclient.stat(wpath, port=self._port) + return datetime.datetime.fromtimestamp(stats.st_ctime, tz=datetime.timezone.utc) + + def modified(self, path): + """Return the modified timestamp of a file as a datetime.datetime""" + wpath = _as_unc_path(self.host, path) + stats = smbclient.stat(wpath, port=self._port) + return datetime.datetime.fromtimestamp(stats.st_mtime, tz=datetime.timezone.utc) + + def ls(self, path, detail=True, **kwargs): + unc = _as_unc_path(self.host, path) + listed = smbclient.listdir(unc, port=self._port, **kwargs) + dirs = ["/".join([path.rstrip("/"), p]) for p in listed] + if detail: + dirs = [self.info(d) for d in dirs] + return dirs + + # pylint: disable=too-many-arguments + def _open( + self, + path, + mode="rb", + block_size=-1, + autocommit=True, + cache_options=None, + **kwargs, + ): + """ + block_size: int or None + If 0, no buffering, 1, line buffering, >1, buffer that many bytes + + Notes + ----- + By specifying 'share_access' in 'kwargs' it is possible to override the + default shared access setting applied in the constructor of this object. + """ + if self.auto_mkdir and "w" in mode: + self.makedirs(self._parent(path), exist_ok=True) + bls = block_size if block_size is not None and block_size >= 0 else -1 + wpath = _as_unc_path(self.host, path) + share_access = kwargs.pop("share_access", self.share_access) + if "w" in mode and autocommit is False: + temp = _as_temp_path(self.host, path, self.temppath) + return SMBFileOpener( + wpath, temp, mode, port=self._port, block_size=bls, **kwargs + ) + return smbclient.open_file( + wpath, + mode, + buffering=bls, + share_access=share_access, + port=self._port, + **kwargs, + ) + + def copy(self, path1, path2, **kwargs): + """Copy within two locations in the same filesystem""" + wpath1 = _as_unc_path(self.host, path1) + wpath2 = _as_unc_path(self.host, path2) + if self.auto_mkdir: + self.makedirs(self._parent(path2), exist_ok=True) + smbclient.copyfile(wpath1, wpath2, port=self._port, **kwargs) + + def _rm(self, path): + if _share_has_path(path): + wpath = _as_unc_path(self.host, path) + stats = smbclient.stat(wpath, port=self._port) + if S_ISDIR(stats.st_mode): + smbclient.rmdir(wpath, port=self._port) + else: + smbclient.remove(wpath, port=self._port) + + def mv(self, path1, path2, recursive=None, maxdepth=None, **kwargs): + wpath1 = _as_unc_path(self.host, path1) + wpath2 = _as_unc_path(self.host, path2) + smbclient.rename(wpath1, wpath2, port=self._port, **kwargs) + + +def _as_unc_path(host, path): + rpath = path.replace("/", "\\") + unc = f"\\\\{host}{rpath}" + return unc + + +def _as_temp_path(host, path, temppath): + share = path.split("/")[1] + temp_file = f"/{share}{temppath}/{uuid.uuid4()}" + unc = _as_unc_path(host, temp_file) + return unc + + +def _share_has_path(path): + parts = path.count("/") + if path.endswith("/"): + return parts > 2 + return parts > 1 + + +class SMBFileOpener: + """writes to remote temporary file, move on commit""" + + def __init__(self, path, temp, mode, port=445, block_size=-1, **kwargs): + self.path = path + self.temp = temp + self.mode = mode + self.block_size = block_size + self.kwargs = kwargs + self.smbfile = None + self._incontext = False + self.port = port + self._open() + + def _open(self): + if self.smbfile is None or self.smbfile.closed: + self.smbfile = smbclient.open_file( + self.temp, + self.mode, + port=self.port, + buffering=self.block_size, + **self.kwargs, + ) + + def commit(self): + """Move temp file to definitive on success.""" + # TODO: use transaction support in SMB protocol + smbclient.replace(self.temp, self.path, port=self.port) + + def discard(self): + """Remove the temp file on failure.""" + smbclient.remove(self.temp, port=self.port) + + def __fspath__(self): + return self.path + + def __iter__(self): + return self.smbfile.__iter__() + + def __getattr__(self, item): + return getattr(self.smbfile, item) + + def __enter__(self): + self._incontext = True + return self.smbfile.__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + self._incontext = False + self.smbfile.__exit__(exc_type, exc_value, traceback) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/tar.py b/MLPY/Lib/site-packages/fsspec/implementations/tar.py new file mode 100644 index 0000000000000000000000000000000000000000..412e5ba4d2cdea7db090dc96412e697909a38d78 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/tar.py @@ -0,0 +1,124 @@ +import logging +import tarfile + +import fsspec +from fsspec.archive import AbstractArchiveFileSystem +from fsspec.compression import compr +from fsspec.utils import infer_compression + +typemap = {b"0": "file", b"5": "directory"} + +logger = logging.getLogger("tar") + + +class TarFileSystem(AbstractArchiveFileSystem): + """Compressed Tar archives as a file-system (read-only) + + Supports the following formats: + tar.gz, tar.bz2, tar.xz + """ + + root_marker = "" + protocol = "tar" + cachable = False + + def __init__( + self, + fo="", + index_store=None, + target_options=None, + target_protocol=None, + compression=None, + **kwargs, + ): + super().__init__(**kwargs) + target_options = target_options or {} + + if isinstance(fo, str): + self.of = fsspec.open(fo, protocol=target_protocol, **target_options) + fo = self.of.open() # keep the reference + + # Try to infer compression. + if compression is None: + name = None + + # Try different ways to get hold of the filename. `fo` might either + # be a `fsspec.LocalFileOpener`, an `io.BufferedReader` or an + # `fsspec.AbstractFileSystem` instance. + try: + # Amended io.BufferedReader or similar. + # This uses a "protocol extension" where original filenames are + # propagated to archive-like filesystems in order to let them + # infer the right compression appropriately. + if hasattr(fo, "original"): + name = fo.original + + # fsspec.LocalFileOpener + elif hasattr(fo, "path"): + name = fo.path + + # io.BufferedReader + elif hasattr(fo, "name"): + name = fo.name + + # fsspec.AbstractFileSystem + elif hasattr(fo, "info"): + name = fo.info()["name"] + + except Exception as ex: + logger.warning( + f"Unable to determine file name, not inferring compression: {ex}" + ) + + if name is not None: + compression = infer_compression(name) + logger.info(f"Inferred compression {compression} from file name {name}") + + if compression is not None: + # TODO: tarfile already implements compression with modes like "'r:gz'", + # but then would seek to offset in the file work? + fo = compr[compression](fo) + + self._fo_ref = fo + self.fo = fo # the whole instance is a context + self.tar = tarfile.TarFile(fileobj=self.fo) + self.dir_cache = None + + self.index_store = index_store + self.index = None + self._index() + + def _index(self): + # TODO: load and set saved index, if exists + out = {} + for ti in self.tar: + info = ti.get_info() + info["type"] = typemap.get(info["type"], "file") + name = ti.get_info()["name"].rstrip("/") + out[name] = (info, ti.offset_data) + + self.index = out + # TODO: save index to self.index_store here, if set + + def _get_dirs(self): + if self.dir_cache is not None: + return + + # This enables ls to get directories as children as well as files + self.dir_cache = { + dirname: {"name": dirname, "size": 0, "type": "directory"} + for dirname in self._all_dirnames(self.tar.getnames()) + } + for member in self.tar.getmembers(): + info = member.get_info() + info["name"] = info["name"].rstrip("/") + info["type"] = typemap.get(info["type"], "file") + self.dir_cache[info["name"]] = info + + def _open(self, path, mode="rb", **kwargs): + if mode != "rb": + raise ValueError("Read-only filesystem implementation") + details, offset = self.index[path] + if details["type"] != "file": + raise ValueError("Can only handle regular files") + return self.tar.extractfile(path) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/webhdfs.py b/MLPY/Lib/site-packages/fsspec/implementations/webhdfs.py new file mode 100644 index 0000000000000000000000000000000000000000..4bac5d51aa52ccfa3319d86c8c8cd384497881a6 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/webhdfs.py @@ -0,0 +1,484 @@ +# https://hadoop.apache.org/docs/r1.0.4/webhdfs.html + +import logging +import os +import secrets +import shutil +import tempfile +import uuid +from contextlib import suppress +from urllib.parse import quote + +import requests + +from ..spec import AbstractBufferedFile, AbstractFileSystem +from ..utils import infer_storage_options, tokenize + +logger = logging.getLogger("webhdfs") + + +class WebHDFS(AbstractFileSystem): + """ + Interface to HDFS over HTTP using the WebHDFS API. Supports also HttpFS gateways. + + Four auth mechanisms are supported: + + insecure: no auth is done, and the user is assumed to be whoever they + say they are (parameter ``user``), or a predefined value such as + "dr.who" if not given + spnego: when kerberos authentication is enabled, auth is negotiated by + requests_kerberos https://github.com/requests/requests-kerberos . + This establishes a session based on existing kinit login and/or + specified principal/password; parameters are passed with ``kerb_kwargs`` + token: uses an existing Hadoop delegation token from another secured + service. Indeed, this client can also generate such tokens when + not insecure. Note that tokens expire, but can be renewed (by a + previously specified user) and may allow for proxying. + basic-auth: used when both parameter ``user`` and parameter ``password`` + are provided. + + """ + + tempdir = str(tempfile.gettempdir()) + protocol = "webhdfs", "webHDFS" + + def __init__( + self, + host, + port=50070, + kerberos=False, + token=None, + user=None, + password=None, + proxy_to=None, + kerb_kwargs=None, + data_proxy=None, + use_https=False, + session_cert=None, + session_verify=True, + **kwargs, + ): + """ + Parameters + ---------- + host: str + Name-node address + port: int + Port for webHDFS + kerberos: bool + Whether to authenticate with kerberos for this connection + token: str or None + If given, use this token on every call to authenticate. A user + and user-proxy may be encoded in the token and should not be also + given + user: str or None + If given, assert the user name to connect with + password: str or None + If given, assert the password to use for basic auth. If password + is provided, user must be provided also + proxy_to: str or None + If given, the user has the authority to proxy, and this value is + the user in who's name actions are taken + kerb_kwargs: dict + Any extra arguments for HTTPKerberosAuth, see + ``_ + data_proxy: dict, callable or None + If given, map data-node addresses. This can be necessary if the + HDFS cluster is behind a proxy, running on Docker or otherwise has + a mismatch between the host-names given by the name-node and the + address by which to refer to them from the client. If a dict, + maps host names ``host->data_proxy[host]``; if a callable, full + URLs are passed, and function must conform to + ``url->data_proxy(url)``. + use_https: bool + Whether to connect to the Name-node using HTTPS instead of HTTP + session_cert: str or Tuple[str, str] or None + Path to a certificate file, or tuple of (cert, key) files to use + for the requests.Session + session_verify: str, bool or None + Path to a certificate file to use for verifying the requests.Session. + kwargs + """ + if self._cached: + return + super().__init__(**kwargs) + self.url = f"{'https' if use_https else 'http'}://{host}:{port}/webhdfs/v1" # noqa + self.kerb = kerberos + self.kerb_kwargs = kerb_kwargs or {} + self.pars = {} + self.proxy = data_proxy or {} + if token is not None: + if user is not None or proxy_to is not None: + raise ValueError( + "If passing a delegation token, must not set " + "user or proxy_to, as these are encoded in the" + " token" + ) + self.pars["delegation"] = token + self.user = user + self.password = password + + if password is not None: + if user is None: + raise ValueError( + "If passing a password, the user must also be" + "set in order to set up the basic-auth" + ) + else: + if user is not None: + self.pars["user.name"] = user + + if proxy_to is not None: + self.pars["doas"] = proxy_to + if kerberos and user is not None: + raise ValueError( + "If using Kerberos auth, do not specify the " + "user, this is handled by kinit." + ) + + self.session_cert = session_cert + self.session_verify = session_verify + + self._connect() + + self._fsid = f"webhdfs_{tokenize(host, port)}" + + @property + def fsid(self): + return self._fsid + + def _connect(self): + self.session = requests.Session() + + if self.session_cert: + self.session.cert = self.session_cert + + self.session.verify = self.session_verify + + if self.kerb: + from requests_kerberos import HTTPKerberosAuth + + self.session.auth = HTTPKerberosAuth(**self.kerb_kwargs) + + if self.user is not None and self.password is not None: + from requests.auth import HTTPBasicAuth + + self.session.auth = HTTPBasicAuth(self.user, self.password) + + def _call(self, op, method="get", path=None, data=None, redirect=True, **kwargs): + url = self._apply_proxy(self.url + quote(path or "", safe="/=")) + args = kwargs.copy() + args.update(self.pars) + args["op"] = op.upper() + logger.debug("sending %s with %s", url, method) + out = self.session.request( + method=method.upper(), + url=url, + params=args, + data=data, + allow_redirects=redirect, + ) + if out.status_code in [400, 401, 403, 404, 500]: + try: + err = out.json() + msg = err["RemoteException"]["message"] + exp = err["RemoteException"]["exception"] + except (ValueError, KeyError): + pass + else: + if exp in ["IllegalArgumentException", "UnsupportedOperationException"]: + raise ValueError(msg) + elif exp in ["SecurityException", "AccessControlException"]: + raise PermissionError(msg) + elif exp in ["FileNotFoundException"]: + raise FileNotFoundError(msg) + else: + raise RuntimeError(msg) + out.raise_for_status() + return out + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + replication=None, + permissions=None, + **kwargs, + ): + """ + + Parameters + ---------- + path: str + File location + mode: str + 'rb', 'wb', etc. + block_size: int + Client buffer size for read-ahead or write buffer + autocommit: bool + If False, writes to temporary file that only gets put in final + location upon commit + replication: int + Number of copies of file on the cluster, write mode only + permissions: str or int + posix permissions, write mode only + kwargs + + Returns + ------- + WebHDFile instance + """ + block_size = block_size or self.blocksize + return WebHDFile( + self, + path, + mode=mode, + block_size=block_size, + tempdir=self.tempdir, + autocommit=autocommit, + replication=replication, + permissions=permissions, + ) + + @staticmethod + def _process_info(info): + info["type"] = info["type"].lower() + info["size"] = info["length"] + return info + + @classmethod + def _strip_protocol(cls, path): + return infer_storage_options(path)["path"] + + @staticmethod + def _get_kwargs_from_urls(urlpath): + out = infer_storage_options(urlpath) + out.pop("path", None) + out.pop("protocol", None) + if "username" in out: + out["user"] = out.pop("username") + return out + + def info(self, path): + out = self._call("GETFILESTATUS", path=path) + info = out.json()["FileStatus"] + info["name"] = path + return self._process_info(info) + + def ls(self, path, detail=False): + out = self._call("LISTSTATUS", path=path) + infos = out.json()["FileStatuses"]["FileStatus"] + for info in infos: + self._process_info(info) + info["name"] = path.rstrip("/") + "/" + info["pathSuffix"] + if detail: + return sorted(infos, key=lambda i: i["name"]) + else: + return sorted(info["name"] for info in infos) + + def content_summary(self, path): + """Total numbers of files, directories and bytes under path""" + out = self._call("GETCONTENTSUMMARY", path=path) + return out.json()["ContentSummary"] + + def ukey(self, path): + """Checksum info of file, giving method and result""" + out = self._call("GETFILECHECKSUM", path=path, redirect=False) + if "Location" in out.headers: + location = self._apply_proxy(out.headers["Location"]) + out2 = self.session.get(location) + out2.raise_for_status() + return out2.json()["FileChecksum"] + else: + out.raise_for_status() + return out.json()["FileChecksum"] + + def home_directory(self): + """Get user's home directory""" + out = self._call("GETHOMEDIRECTORY") + return out.json()["Path"] + + def get_delegation_token(self, renewer=None): + """Retrieve token which can give the same authority to other uses + + Parameters + ---------- + renewer: str or None + User who may use this token; if None, will be current user + """ + if renewer: + out = self._call("GETDELEGATIONTOKEN", renewer=renewer) + else: + out = self._call("GETDELEGATIONTOKEN") + t = out.json()["Token"] + if t is None: + raise ValueError("No token available for this user/security context") + return t["urlString"] + + def renew_delegation_token(self, token): + """Make token live longer. Returns new expiry time""" + out = self._call("RENEWDELEGATIONTOKEN", method="put", token=token) + return out.json()["long"] + + def cancel_delegation_token(self, token): + """Stop the token from being useful""" + self._call("CANCELDELEGATIONTOKEN", method="put", token=token) + + def chmod(self, path, mod): + """Set the permission at path + + Parameters + ---------- + path: str + location to set (file or directory) + mod: str or int + posix epresentation or permission, give as oct string, e.g, '777' + or 0o777 + """ + self._call("SETPERMISSION", method="put", path=path, permission=mod) + + def chown(self, path, owner=None, group=None): + """Change owning user and/or group""" + kwargs = {} + if owner is not None: + kwargs["owner"] = owner + if group is not None: + kwargs["group"] = group + self._call("SETOWNER", method="put", path=path, **kwargs) + + def set_replication(self, path, replication): + """ + Set file replication factor + + Parameters + ---------- + path: str + File location (not for directories) + replication: int + Number of copies of file on the cluster. Should be smaller than + number of data nodes; normally 3 on most systems. + """ + self._call("SETREPLICATION", path=path, method="put", replication=replication) + + def mkdir(self, path, **kwargs): + self._call("MKDIRS", method="put", path=path) + + def makedirs(self, path, exist_ok=False): + if exist_ok is False and self.exists(path): + raise FileExistsError(path) + self.mkdir(path) + + def mv(self, path1, path2, **kwargs): + self._call("RENAME", method="put", path=path1, destination=path2) + + def rm(self, path, recursive=False, **kwargs): + self._call( + "DELETE", + method="delete", + path=path, + recursive="true" if recursive else "false", + ) + + def rm_file(self, path, **kwargs): + self.rm(path) + + def cp_file(self, lpath, rpath, **kwargs): + with self.open(lpath) as lstream: + tmp_fname = "/".join([self._parent(rpath), f".tmp.{secrets.token_hex(16)}"]) + # Perform an atomic copy (stream to a temporary file and + # move it to the actual destination). + try: + with self.open(tmp_fname, "wb") as rstream: + shutil.copyfileobj(lstream, rstream) + self.mv(tmp_fname, rpath) + except BaseException: # noqa + with suppress(FileNotFoundError): + self.rm(tmp_fname) + raise + + def _apply_proxy(self, location): + if self.proxy and callable(self.proxy): + location = self.proxy(location) + elif self.proxy: + # as a dict + for k, v in self.proxy.items(): + location = location.replace(k, v, 1) + return location + + +class WebHDFile(AbstractBufferedFile): + """A file living in HDFS over webHDFS""" + + def __init__(self, fs, path, **kwargs): + super().__init__(fs, path, **kwargs) + kwargs = kwargs.copy() + if kwargs.get("permissions", None) is None: + kwargs.pop("permissions", None) + if kwargs.get("replication", None) is None: + kwargs.pop("replication", None) + self.permissions = kwargs.pop("permissions", 511) + tempdir = kwargs.pop("tempdir") + if kwargs.pop("autocommit", False) is False: + self.target = self.path + self.path = os.path.join(tempdir, str(uuid.uuid4())) + + def _upload_chunk(self, final=False): + """Write one part of a multi-block file upload + + Parameters + ========== + final: bool + This is the last block, so should complete file, if + self.autocommit is True. + """ + out = self.fs.session.post( + self.location, + data=self.buffer.getvalue(), + headers={"content-type": "application/octet-stream"}, + ) + out.raise_for_status() + return True + + def _initiate_upload(self): + """Create remote file/upload""" + kwargs = self.kwargs.copy() + if "a" in self.mode: + op, method = "APPEND", "POST" + else: + op, method = "CREATE", "PUT" + kwargs["overwrite"] = "true" + out = self.fs._call(op, method, self.path, redirect=False, **kwargs) + location = self.fs._apply_proxy(out.headers["Location"]) + if "w" in self.mode: + # create empty file to append to + out2 = self.fs.session.put( + location, headers={"content-type": "application/octet-stream"} + ) + out2.raise_for_status() + # after creating empty file, change location to append to + out2 = self.fs._call("APPEND", "POST", self.path, redirect=False, **kwargs) + self.location = self.fs._apply_proxy(out2.headers["Location"]) + + def _fetch_range(self, start, end): + start = max(start, 0) + end = min(self.size, end) + if start >= end or start >= self.size: + return b"" + out = self.fs._call( + "OPEN", path=self.path, offset=start, length=end - start, redirect=False + ) + out.raise_for_status() + if "Location" in out.headers: + location = out.headers["Location"] + out2 = self.fs.session.get(self.fs._apply_proxy(location)) + return out2.content + else: + return out.content + + def commit(self): + self.fs.mv(self.path, self.target) + + def discard(self): + self.fs.rm(self.path) diff --git a/MLPY/Lib/site-packages/fsspec/implementations/zip.py b/MLPY/Lib/site-packages/fsspec/implementations/zip.py new file mode 100644 index 0000000000000000000000000000000000000000..9d9c046bfde313b6868399c4d200bc779c1ab19f --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/implementations/zip.py @@ -0,0 +1,134 @@ +import zipfile + +import fsspec +from fsspec.archive import AbstractArchiveFileSystem + + +class ZipFileSystem(AbstractArchiveFileSystem): + """Read/Write contents of ZIP archive as a file-system + + Keeps file object open while instance lives. + + This class is pickleable, but not necessarily thread-safe + """ + + root_marker = "" + protocol = "zip" + cachable = False + + def __init__( + self, + fo="", + mode="r", + target_protocol=None, + target_options=None, + compression=zipfile.ZIP_STORED, + allowZip64=True, + compresslevel=None, + **kwargs, + ): + """ + Parameters + ---------- + fo: str or file-like + Contains ZIP, and must exist. If a str, will fetch file using + :meth:`~fsspec.open_files`, which must return one file exactly. + mode: str + Accept: "r", "w", "a" + target_protocol: str (optional) + If ``fo`` is a string, this value can be used to override the + FS protocol inferred from a URL + target_options: dict (optional) + Kwargs passed when instantiating the target FS, if ``fo`` is + a string. + compression, allowZip64, compresslevel: passed to ZipFile + Only relevant when creating a ZIP + """ + super().__init__(self, **kwargs) + if mode not in set("rwa"): + raise ValueError(f"mode '{mode}' no understood") + self.mode = mode + if isinstance(fo, str): + if mode == "a": + m = "r+b" + else: + m = mode + "b" + fo = fsspec.open( + fo, mode=m, protocol=target_protocol, **(target_options or {}) + ) + self.force_zip_64 = allowZip64 + self.of = fo + self.fo = fo.__enter__() # the whole instance is a context + self.zip = zipfile.ZipFile( + self.fo, + mode=mode, + compression=compression, + allowZip64=allowZip64, + compresslevel=compresslevel, + ) + self.dir_cache = None + + @classmethod + def _strip_protocol(cls, path): + # zip file paths are always relative to the archive root + return super()._strip_protocol(path).lstrip("/") + + def __del__(self): + if hasattr(self, "zip"): + self.close() + del self.zip + + def close(self): + """Commits any write changes to the file. Done on ``del`` too.""" + self.zip.close() + + def _get_dirs(self): + if self.dir_cache is None or self.mode in set("wa"): + # when writing, dir_cache is always in the ZipFile's attributes, + # not read from the file. + files = self.zip.infolist() + self.dir_cache = { + dirname.rstrip("/"): { + "name": dirname.rstrip("/"), + "size": 0, + "type": "directory", + } + for dirname in self._all_dirnames(self.zip.namelist()) + } + for z in files: + f = {s: getattr(z, s, None) for s in zipfile.ZipInfo.__slots__} + f.update( + { + "name": z.filename.rstrip("/"), + "size": z.file_size, + "type": ("directory" if z.is_dir() else "file"), + } + ) + self.dir_cache[f["name"]] = f + + def pipe_file(self, path, value, **kwargs): + # override upstream, because we know the exact file size in this case + self.zip.writestr(path, value, **kwargs) + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + path = self._strip_protocol(path) + if "r" in mode and self.mode in set("wa"): + if self.exists(path): + raise OSError("ZipFS can only be open for reading or writing, not both") + raise FileNotFoundError(path) + if "r" in self.mode and "w" in mode: + raise OSError("ZipFS can only be open for reading or writing, not both") + out = self.zip.open(path, mode.strip("b"), force_zip64=self.force_zip_64) + if "r" in mode: + info = self.info(path) + out.size = info["size"] + out.name = info["name"] + return out diff --git a/MLPY/Lib/site-packages/fsspec/json.py b/MLPY/Lib/site-packages/fsspec/json.py new file mode 100644 index 0000000000000000000000000000000000000000..69cead04509a1ebc9175ec5ad449c8a866b60444 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/json.py @@ -0,0 +1,121 @@ +import json +from contextlib import suppress +from pathlib import PurePath +from typing import ( + Any, + Callable, + ClassVar, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, +) + +from .registry import _import_class, get_filesystem_class +from .spec import AbstractFileSystem + + +class FilesystemJSONEncoder(json.JSONEncoder): + include_password: ClassVar[bool] = True + + def default(self, o: Any) -> Any: + if isinstance(o, AbstractFileSystem): + return o.to_dict(include_password=self.include_password) + if isinstance(o, PurePath): + cls = type(o) + return {"cls": f"{cls.__module__}.{cls.__name__}", "str": str(o)} + + return super().default(o) + + def make_serializable(self, obj: Any) -> Any: + """ + Recursively converts an object so that it can be JSON serialized via + :func:`json.dumps` and :func:`json.dump`, without actually calling + said functions. + """ + if isinstance(obj, (str, int, float, bool)): + return obj + if isinstance(obj, Mapping): + return {k: self.make_serializable(v) for k, v in obj.items()} + if isinstance(obj, Sequence): + return [self.make_serializable(v) for v in obj] + + return self.default(obj) + + +class FilesystemJSONDecoder(json.JSONDecoder): + def __init__( + self, + *, + object_hook: Optional[Callable[[Dict[str, Any]], Any]] = None, + parse_float: Optional[Callable[[str], Any]] = None, + parse_int: Optional[Callable[[str], Any]] = None, + parse_constant: Optional[Callable[[str], Any]] = None, + strict: bool = True, + object_pairs_hook: Optional[Callable[[List[Tuple[str, Any]]], Any]] = None, + ) -> None: + self.original_object_hook = object_hook + + super().__init__( + object_hook=self.custom_object_hook, + parse_float=parse_float, + parse_int=parse_int, + parse_constant=parse_constant, + strict=strict, + object_pairs_hook=object_pairs_hook, + ) + + @classmethod + def try_resolve_path_cls(cls, dct: Dict[str, Any]): + with suppress(Exception): + fqp = dct["cls"] + + path_cls = _import_class(fqp) + + if issubclass(path_cls, PurePath): + return path_cls + + return None + + @classmethod + def try_resolve_fs_cls(cls, dct: Dict[str, Any]): + with suppress(Exception): + if "cls" in dct: + try: + fs_cls = _import_class(dct["cls"]) + if issubclass(fs_cls, AbstractFileSystem): + return fs_cls + except Exception: + if "protocol" in dct: # Fallback if cls cannot be imported + return get_filesystem_class(dct["protocol"]) + + raise + + return None + + def custom_object_hook(self, dct: Dict[str, Any]): + if "cls" in dct: + if (obj_cls := self.try_resolve_fs_cls(dct)) is not None: + return AbstractFileSystem.from_dict(dct) + if (obj_cls := self.try_resolve_path_cls(dct)) is not None: + return obj_cls(dct["str"]) + + if self.original_object_hook is not None: + return self.original_object_hook(dct) + + return dct + + def unmake_serializable(self, obj: Any) -> Any: + """ + Inverse function of :meth:`FilesystemJSONEncoder.make_serializable`. + """ + if isinstance(obj, dict): + obj = self.custom_object_hook(obj) + if isinstance(obj, dict): + return {k: self.unmake_serializable(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [self.unmake_serializable(v) for v in obj] + + return obj diff --git a/MLPY/Lib/site-packages/fsspec/mapping.py b/MLPY/Lib/site-packages/fsspec/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..93ebd1df3a127ab1ad7d0d218cdb4fe0217f44bd --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/mapping.py @@ -0,0 +1,251 @@ +import array +import logging +import posixpath +import warnings +from collections.abc import MutableMapping +from functools import cached_property + +from fsspec.core import url_to_fs + +logger = logging.getLogger("fsspec.mapping") + + +class FSMap(MutableMapping): + """Wrap a FileSystem instance as a mutable wrapping. + + The keys of the mapping become files under the given root, and the + values (which must be bytes) the contents of those files. + + Parameters + ---------- + root: string + prefix for all the files + fs: FileSystem instance + check: bool (=True) + performs a touch at the location, to check for write access. + + Examples + -------- + >>> fs = FileSystem(**parameters) # doctest: +SKIP + >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP + or, more likely + >>> d = fs.get_mapper('my-data/path/') + + >>> d['loc1'] = b'Hello World' # doctest: +SKIP + >>> list(d.keys()) # doctest: +SKIP + ['loc1'] + >>> d['loc1'] # doctest: +SKIP + b'Hello World' + """ + + def __init__(self, root, fs, check=False, create=False, missing_exceptions=None): + self.fs = fs + self.root = fs._strip_protocol(root) + self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1] + if missing_exceptions is None: + missing_exceptions = ( + FileNotFoundError, + IsADirectoryError, + NotADirectoryError, + ) + self.missing_exceptions = missing_exceptions + self.check = check + self.create = create + if create: + if not self.fs.exists(root): + self.fs.mkdir(root) + if check: + if not self.fs.exists(root): + raise ValueError( + f"Path {root} does not exist. Create " + f" with the ``create=True`` keyword" + ) + self.fs.touch(root + "/a") + self.fs.rm(root + "/a") + + @cached_property + def dirfs(self): + """dirfs instance that can be used with the same keys as the mapper""" + from .implementations.dirfs import DirFileSystem + + return DirFileSystem(path=self._root_key_to_str, fs=self.fs) + + def clear(self): + """Remove all keys below root - empties out mapping""" + logger.info("Clear mapping at %s", self.root) + try: + self.fs.rm(self.root, True) + self.fs.mkdir(self.root) + except: # noqa: E722 + pass + + def getitems(self, keys, on_error="raise"): + """Fetch multiple items from the store + + If the backend is async-able, this might proceed concurrently + + Parameters + ---------- + keys: list(str) + They keys to be fetched + on_error : "raise", "omit", "return" + If raise, an underlying exception will be raised (converted to KeyError + if the type is in self.missing_exceptions); if omit, keys with exception + will simply not be included in the output; if "return", all keys are + included in the output, but the value will be bytes or an exception + instance. + + Returns + ------- + dict(key, bytes|exception) + """ + keys2 = [self._key_to_str(k) for k in keys] + oe = on_error if on_error == "raise" else "return" + try: + out = self.fs.cat(keys2, on_error=oe) + if isinstance(out, bytes): + out = {keys2[0]: out} + except self.missing_exceptions as e: + raise KeyError from e + out = { + k: (KeyError() if isinstance(v, self.missing_exceptions) else v) + for k, v in out.items() + } + return { + key: out[k2] + for key, k2 in zip(keys, keys2) + if on_error == "return" or not isinstance(out[k2], BaseException) + } + + def setitems(self, values_dict): + """Set the values of multiple items in the store + + Parameters + ---------- + values_dict: dict(str, bytes) + """ + values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()} + self.fs.pipe(values) + + def delitems(self, keys): + """Remove multiple keys from the store""" + self.fs.rm([self._key_to_str(k) for k in keys]) + + def _key_to_str(self, key): + """Generate full path for the key""" + if not isinstance(key, str): + # raise TypeError("key must be of type `str`, got `{type(key).__name__}`" + warnings.warn( + "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError", + DeprecationWarning, + ) + if isinstance(key, list): + key = tuple(key) + key = str(key) + return f"{self._root_key_to_str}{key}".rstrip("/") + + def _str_to_key(self, s): + """Strip path of to leave key name""" + return s[len(self.root) :].lstrip("/") + + def __getitem__(self, key, default=None): + """Retrieve data""" + k = self._key_to_str(key) + try: + result = self.fs.cat(k) + except self.missing_exceptions: + if default is not None: + return default + raise KeyError(key) + return result + + def pop(self, key, default=None): + """Pop data""" + result = self.__getitem__(key, default) + try: + del self[key] + except KeyError: + pass + return result + + def __setitem__(self, key, value): + """Store value in key""" + key = self._key_to_str(key) + self.fs.mkdirs(self.fs._parent(key), exist_ok=True) + self.fs.pipe_file(key, maybe_convert(value)) + + def __iter__(self): + return (self._str_to_key(x) for x in self.fs.find(self.root)) + + def __len__(self): + return len(self.fs.find(self.root)) + + def __delitem__(self, key): + """Remove key""" + try: + self.fs.rm(self._key_to_str(key)) + except: # noqa: E722 + raise KeyError + + def __contains__(self, key): + """Does key exist in mapping?""" + path = self._key_to_str(key) + return self.fs.isfile(path) + + def __reduce__(self): + return FSMap, (self.root, self.fs, False, False, self.missing_exceptions) + + +def maybe_convert(value): + if isinstance(value, array.array) or hasattr(value, "__array__"): + # bytes-like things + if hasattr(value, "dtype") and value.dtype.kind in "Mm": + # The buffer interface doesn't support datetime64/timdelta64 numpy + # arrays + value = value.view("int64") + value = bytes(memoryview(value)) + return value + + +def get_mapper( + url="", + check=False, + create=False, + missing_exceptions=None, + alternate_root=None, + **kwargs, +): + """Create key-value interface for given URL and options + + The URL will be of the form "protocol://location" and point to the root + of the mapper required. All keys will be file-names below this location, + and their values the contents of each key. + + Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``. + + Parameters + ---------- + url: str + Root URL of mapping + check: bool + Whether to attempt to read from the location before instantiation, to + check that the mapping does exist + create: bool + Whether to make the directory corresponding to the root before + instantiating + missing_exceptions: None or tuple + If given, these exception types will be regarded as missing keys and + return KeyError when trying to read data. By default, you get + (FileNotFoundError, IsADirectoryError, NotADirectoryError) + alternate_root: None or str + In cases of complex URLs, the parser may fail to pick the correct part + for the mapper root, so this arg can override + + Returns + ------- + ``FSMap`` instance, the dict-like key-value store. + """ + # Removing protocol here - could defer to each open() on the backend + fs, urlpath = url_to_fs(url, **kwargs) + root = alternate_root if alternate_root is not None else urlpath + return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions) diff --git a/MLPY/Lib/site-packages/fsspec/parquet.py b/MLPY/Lib/site-packages/fsspec/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0fb951c427babcd04b0c974a7951b4d331e25b --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/parquet.py @@ -0,0 +1,541 @@ +import io +import json +import warnings + +from .core import url_to_fs +from .utils import merge_offset_ranges + +# Parquet-Specific Utilities for fsspec +# +# Most of the functions defined in this module are NOT +# intended for public consumption. The only exception +# to this is `open_parquet_file`, which should be used +# place of `fs.open()` to open parquet-formatted files +# on remote file systems. + + +def open_parquet_file( + path, + mode="rb", + fs=None, + metadata=None, + columns=None, + row_groups=None, + storage_options=None, + strict=False, + engine="auto", + max_gap=64_000, + max_block=256_000_000, + footer_sample_size=1_000_000, + **kwargs, +): + """ + Return a file-like object for a single Parquet file. + + The specified parquet `engine` will be used to parse the + footer metadata, and determine the required byte ranges + from the file. The target path will then be opened with + the "parts" (`KnownPartsOfAFile`) caching strategy. + + Note that this method is intended for usage with remote + file systems, and is unlikely to improve parquet-read + performance on local file systems. + + Parameters + ---------- + path: str + Target file path. + mode: str, optional + Mode option to be passed through to `fs.open`. Default is "rb". + metadata: Any, optional + Parquet metadata object. Object type must be supported + by the backend parquet engine. For now, only the "fastparquet" + engine supports an explicit `ParquetFile` metadata object. + If a metadata object is supplied, the remote footer metadata + will not need to be transferred into local memory. + fs: AbstractFileSystem, optional + Filesystem object to use for opening the file. If nothing is + specified, an `AbstractFileSystem` object will be inferred. + engine : str, default "auto" + Parquet engine to use for metadata parsing. Allowed options + include "fastparquet", "pyarrow", and "auto". The specified + engine must be installed in the current environment. If + "auto" is specified, and both engines are installed, + "fastparquet" will take precedence over "pyarrow". + columns: list, optional + List of all column names that may be read from the file. + row_groups : list, optional + List of all row-groups that may be read from the file. This + may be a list of row-group indices (integers), or it may be + a list of `RowGroup` metadata objects (if the "fastparquet" + engine is used). + storage_options : dict, optional + Used to generate an `AbstractFileSystem` object if `fs` was + not specified. + strict : bool, optional + Whether the resulting `KnownPartsOfAFile` cache should + fetch reads that go beyond a known byte-range boundary. + If `False` (the default), any read that ends outside a + known part will be zero padded. Note that using + `strict=True` may be useful for debugging. + max_gap : int, optional + Neighboring byte ranges will only be merged when their + inter-range gap is <= `max_gap`. Default is 64KB. + max_block : int, optional + Neighboring byte ranges will only be merged when the size of + the aggregated range is <= `max_block`. Default is 256MB. + footer_sample_size : int, optional + Number of bytes to read from the end of the path to look + for the footer metadata. If the sampled bytes do not contain + the footer, a second read request will be required, and + performance will suffer. Default is 1MB. + **kwargs : + Optional key-word arguments to pass to `fs.open` + """ + + # Make sure we have an `AbstractFileSystem` object + # to work with + if fs is None: + fs = url_to_fs(path, **(storage_options or {}))[0] + + # For now, `columns == []` not supported. Just use + # default `open` command with `path` input + if columns is not None and len(columns) == 0: + return fs.open(path, mode=mode) + + # Set the engine + engine = _set_engine(engine) + + # Fetch the known byte ranges needed to read + # `columns` and/or `row_groups` + data = _get_parquet_byte_ranges( + [path], + fs, + metadata=metadata, + columns=columns, + row_groups=row_groups, + engine=engine, + max_gap=max_gap, + max_block=max_block, + footer_sample_size=footer_sample_size, + ) + + # Extract file name from `data` + fn = next(iter(data)) if data else path + + # Call self.open with "parts" caching + options = kwargs.pop("cache_options", {}).copy() + return fs.open( + fn, + mode=mode, + cache_type="parts", + cache_options={ + **options, + "data": data.get(fn, {}), + "strict": strict, + }, + **kwargs, + ) + + +def _get_parquet_byte_ranges( + paths, + fs, + metadata=None, + columns=None, + row_groups=None, + max_gap=64_000, + max_block=256_000_000, + footer_sample_size=1_000_000, + engine="auto", +): + """Get a dictionary of the known byte ranges needed + to read a specific column/row-group selection from a + Parquet dataset. Each value in the output dictionary + is intended for use as the `data` argument for the + `KnownPartsOfAFile` caching strategy of a single path. + """ + + # Set engine if necessary + if isinstance(engine, str): + engine = _set_engine(engine) + + # Pass to specialized function if metadata is defined + if metadata is not None: + # Use the provided parquet metadata object + # to avoid transferring/parsing footer metadata + return _get_parquet_byte_ranges_from_metadata( + metadata, + fs, + engine, + columns=columns, + row_groups=row_groups, + max_gap=max_gap, + max_block=max_block, + ) + + # Get file sizes asynchronously + file_sizes = fs.sizes(paths) + + # Populate global paths, starts, & ends + result = {} + data_paths = [] + data_starts = [] + data_ends = [] + add_header_magic = True + if columns is None and row_groups is None: + # We are NOT selecting specific columns or row-groups. + # + # We can avoid sampling the footers, and just transfer + # all file data with cat_ranges + for i, path in enumerate(paths): + result[path] = {} + for b in range(0, file_sizes[i], max_block): + data_paths.append(path) + data_starts.append(b) + data_ends.append(min(b + max_block, file_sizes[i])) + add_header_magic = False # "Magic" should already be included + else: + # We ARE selecting specific columns or row-groups. + # + # Gather file footers. + # We just take the last `footer_sample_size` bytes of each + # file (or the entire file if it is smaller than that) + footer_starts = [] + footer_ends = [] + for i, path in enumerate(paths): + footer_ends.append(file_sizes[i]) + sample_size = max(0, file_sizes[i] - footer_sample_size) + footer_starts.append(sample_size) + footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends) + + # Check our footer samples and re-sample if necessary. + missing_footer_starts = footer_starts.copy() + large_footer = 0 + for i, path in enumerate(paths): + footer_size = int.from_bytes(footer_samples[i][-8:-4], "little") + real_footer_start = file_sizes[i] - (footer_size + 8) + if real_footer_start < footer_starts[i]: + missing_footer_starts[i] = real_footer_start + large_footer = max(large_footer, (footer_size + 8)) + if large_footer: + warnings.warn( + f"Not enough data was used to sample the parquet footer. " + f"Try setting footer_sample_size >= {large_footer}." + ) + for i, block in enumerate( + fs.cat_ranges( + paths, + missing_footer_starts, + footer_starts, + ) + ): + footer_samples[i] = block + footer_samples[i] + footer_starts[i] = missing_footer_starts[i] + + # Calculate required byte ranges for each path + for i, path in enumerate(paths): + # Deal with small-file case. + # Just include all remaining bytes of the file + # in a single range. + if file_sizes[i] < max_block: + if footer_starts[i] > 0: + # Only need to transfer the data if the + # footer sample isn't already the whole file + data_paths.append(path) + data_starts.append(0) + data_ends.append(footer_starts[i]) + continue + + # Use "engine" to collect data byte ranges + path_data_starts, path_data_ends = engine._parquet_byte_ranges( + columns, + row_groups=row_groups, + footer=footer_samples[i], + footer_start=footer_starts[i], + ) + + data_paths += [path] * len(path_data_starts) + data_starts += path_data_starts + data_ends += path_data_ends + + # Merge adjacent offset ranges + data_paths, data_starts, data_ends = merge_offset_ranges( + data_paths, + data_starts, + data_ends, + max_gap=max_gap, + max_block=max_block, + sort=False, # Should already be sorted + ) + + # Start by populating `result` with footer samples + for i, path in enumerate(paths): + result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]} + + # Transfer the data byte-ranges into local memory + _transfer_ranges(fs, result, data_paths, data_starts, data_ends) + + # Add b"PAR1" to header if necessary + if add_header_magic: + _add_header_magic(result) + + return result + + +def _get_parquet_byte_ranges_from_metadata( + metadata, + fs, + engine, + columns=None, + row_groups=None, + max_gap=64_000, + max_block=256_000_000, +): + """Simplified version of `_get_parquet_byte_ranges` for + the case that an engine-specific `metadata` object is + provided, and the remote footer metadata does not need to + be transferred before calculating the required byte ranges. + """ + + # Use "engine" to collect data byte ranges + data_paths, data_starts, data_ends = engine._parquet_byte_ranges( + columns, + row_groups=row_groups, + metadata=metadata, + ) + + # Merge adjacent offset ranges + data_paths, data_starts, data_ends = merge_offset_ranges( + data_paths, + data_starts, + data_ends, + max_gap=max_gap, + max_block=max_block, + sort=False, # Should be sorted + ) + + # Transfer the data byte-ranges into local memory + result = {fn: {} for fn in list(set(data_paths))} + _transfer_ranges(fs, result, data_paths, data_starts, data_ends) + + # Add b"PAR1" to header + _add_header_magic(result) + + return result + + +def _transfer_ranges(fs, blocks, paths, starts, ends): + # Use cat_ranges to gather the data byte_ranges + ranges = (paths, starts, ends) + for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)): + blocks[path][(start, stop)] = data + + +def _add_header_magic(data): + # Add b"PAR1" to file headers + for path in list(data.keys()): + add_magic = True + for k in data[path].keys(): + if k[0] == 0 and k[1] >= 4: + add_magic = False + break + if add_magic: + data[path][(0, 4)] = b"PAR1" + + +def _set_engine(engine_str): + # Define a list of parquet engines to try + if engine_str == "auto": + try_engines = ("fastparquet", "pyarrow") + elif not isinstance(engine_str, str): + raise ValueError( + "Failed to set parquet engine! " + "Please pass 'fastparquet', 'pyarrow', or 'auto'" + ) + elif engine_str not in ("fastparquet", "pyarrow"): + raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`") + else: + try_engines = [engine_str] + + # Try importing the engines in `try_engines`, + # and choose the first one that succeeds + for engine in try_engines: + try: + if engine == "fastparquet": + return FastparquetEngine() + elif engine == "pyarrow": + return PyarrowEngine() + except ImportError: + pass + + # Raise an error if a supported parquet engine + # was not found + raise ImportError( + f"The following parquet engines are not installed " + f"in your python environment: {try_engines}." + f"Please install 'fastparquert' or 'pyarrow' to " + f"utilize the `fsspec.parquet` module." + ) + + +class FastparquetEngine: + # The purpose of the FastparquetEngine class is + # to check if fastparquet can be imported (on initialization) + # and to define a `_parquet_byte_ranges` method. In the + # future, this class may also be used to define other + # methods/logic that are specific to fastparquet. + + def __init__(self): + import fastparquet as fp + + self.fp = fp + + def _row_group_filename(self, row_group, pf): + return pf.row_group_filename(row_group) + + def _parquet_byte_ranges( + self, + columns, + row_groups=None, + metadata=None, + footer=None, + footer_start=None, + ): + # Initialize offset ranges and define ParqetFile metadata + pf = metadata + data_paths, data_starts, data_ends = [], [], [] + if pf is None: + pf = self.fp.ParquetFile(io.BytesIO(footer)) + + # Convert columns to a set and add any index columns + # specified in the pandas metadata (just in case) + column_set = None if columns is None else set(columns) + if column_set is not None and hasattr(pf, "pandas_metadata"): + md_index = [ + ind + for ind in pf.pandas_metadata.get("index_columns", []) + # Ignore RangeIndex information + if not isinstance(ind, dict) + ] + column_set |= set(md_index) + + # Check if row_groups is a list of integers + # or a list of row-group metadata + if row_groups and not isinstance(row_groups[0], int): + # Input row_groups contains row-group metadata + row_group_indices = None + else: + # Input row_groups contains row-group indices + row_group_indices = row_groups + row_groups = pf.row_groups + + # Loop through column chunks to add required byte ranges + for r, row_group in enumerate(row_groups): + # Skip this row-group if we are targeting + # specific row-groups + if row_group_indices is None or r in row_group_indices: + # Find the target parquet-file path for `row_group` + fn = self._row_group_filename(row_group, pf) + + for column in row_group.columns: + name = column.meta_data.path_in_schema[0] + # Skip this column if we are targeting a + # specific columns + if column_set is None or name in column_set: + file_offset0 = column.meta_data.dictionary_page_offset + if file_offset0 is None: + file_offset0 = column.meta_data.data_page_offset + num_bytes = column.meta_data.total_compressed_size + if footer_start is None or file_offset0 < footer_start: + data_paths.append(fn) + data_starts.append(file_offset0) + data_ends.append( + min( + file_offset0 + num_bytes, + footer_start or (file_offset0 + num_bytes), + ) + ) + + if metadata: + # The metadata in this call may map to multiple + # file paths. Need to include `data_paths` + return data_paths, data_starts, data_ends + return data_starts, data_ends + + +class PyarrowEngine: + # The purpose of the PyarrowEngine class is + # to check if pyarrow can be imported (on initialization) + # and to define a `_parquet_byte_ranges` method. In the + # future, this class may also be used to define other + # methods/logic that are specific to pyarrow. + + def __init__(self): + import pyarrow.parquet as pq + + self.pq = pq + + def _row_group_filename(self, row_group, metadata): + raise NotImplementedError + + def _parquet_byte_ranges( + self, + columns, + row_groups=None, + metadata=None, + footer=None, + footer_start=None, + ): + if metadata is not None: + raise ValueError("metadata input not supported for PyarrowEngine") + + data_starts, data_ends = [], [] + md = self.pq.ParquetFile(io.BytesIO(footer)).metadata + + # Convert columns to a set and add any index columns + # specified in the pandas metadata (just in case) + column_set = None if columns is None else set(columns) + if column_set is not None: + schema = md.schema.to_arrow_schema() + has_pandas_metadata = ( + schema.metadata is not None and b"pandas" in schema.metadata + ) + if has_pandas_metadata: + md_index = [ + ind + for ind in json.loads( + schema.metadata[b"pandas"].decode("utf8") + ).get("index_columns", []) + # Ignore RangeIndex information + if not isinstance(ind, dict) + ] + column_set |= set(md_index) + + # Loop through column chunks to add required byte ranges + for r in range(md.num_row_groups): + # Skip this row-group if we are targeting + # specific row-groups + if row_groups is None or r in row_groups: + row_group = md.row_group(r) + for c in range(row_group.num_columns): + column = row_group.column(c) + name = column.path_in_schema + # Skip this column if we are targeting a + # specific columns + split_name = name.split(".")[0] + if ( + column_set is None + or name in column_set + or split_name in column_set + ): + file_offset0 = column.dictionary_page_offset + if file_offset0 is None: + file_offset0 = column.data_page_offset + num_bytes = column.total_compressed_size + if file_offset0 < footer_start: + data_starts.append(file_offset0) + data_ends.append( + min(file_offset0 + num_bytes, footer_start) + ) + return data_starts, data_ends diff --git a/MLPY/Lib/site-packages/fsspec/registry.py b/MLPY/Lib/site-packages/fsspec/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..c261b9b088a72cd94412b6810d4f353b47dea274 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/registry.py @@ -0,0 +1,311 @@ +from __future__ import annotations + +import importlib +import types +import warnings + +__all__ = ["registry", "get_filesystem_class", "default"] + +# internal, mutable +_registry: dict[str, type] = {} + +# external, immutable +registry = types.MappingProxyType(_registry) +default = "file" + + +def register_implementation(name, cls, clobber=False, errtxt=None): + """Add implementation class to the registry + + Parameters + ---------- + name: str + Protocol name to associate with the class + cls: class or str + if a class: fsspec-compliant implementation class (normally inherits from + ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a + str, the full path to an implementation class like package.module.class, + which gets added to known_implementations, + so the import is deferred until the filesystem is actually used. + clobber: bool (optional) + Whether to overwrite a protocol with the same name; if False, will raise + instead. + errtxt: str (optional) + If given, then a failure to import the given class will result in this + text being given. + """ + if isinstance(cls, str): + if name in known_implementations and clobber is False: + if cls != known_implementations[name]["class"]: + raise ValueError( + f"Name ({name}) already in the known_implementations and clobber " + f"is False" + ) + else: + known_implementations[name] = { + "class": cls, + "err": errtxt or f"{cls} import failed for protocol {name}", + } + + else: + if name in registry and clobber is False: + if _registry[name] is not cls: + raise ValueError( + f"Name ({name}) already in the registry and clobber is False" + ) + else: + _registry[name] = cls + + +# protocols mapped to the class which implements them. This dict can be +# updated with register_implementation +known_implementations = { + "abfs": { + "class": "adlfs.AzureBlobFileSystem", + "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", + }, + "adl": { + "class": "adlfs.AzureDatalakeFileSystem", + "err": "Install adlfs to access Azure Datalake Gen1", + }, + "arrow_hdfs": { + "class": "fsspec.implementations.arrow.HadoopFileSystem", + "err": "pyarrow and local java libraries required for HDFS", + }, + "asynclocal": { + "class": "morefs.asyn_local.AsyncLocalFileSystem", + "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem", + }, + "az": { + "class": "adlfs.AzureBlobFileSystem", + "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", + }, + "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"}, + "box": { + "class": "boxfs.BoxFileSystem", + "err": "Please install boxfs to access BoxFileSystem", + }, + "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"}, + "dask": { + "class": "fsspec.implementations.dask.DaskWorkerFileSystem", + "err": "Install dask distributed to access worker file system", + }, + "data": {"class": "fsspec.implementations.data.DataFileSystem"}, + "dbfs": { + "class": "fsspec.implementations.dbfs.DatabricksFileSystem", + "err": "Install the requests package to use the DatabricksFileSystem", + }, + "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"}, + "dropbox": { + "class": "dropboxdrivefs.DropboxDriveFileSystem", + "err": ( + 'DropboxFileSystem requires "dropboxdrivefs","requests" and "' + '"dropbox" to be installed' + ), + }, + "dvc": { + "class": "dvc.api.DVCFileSystem", + "err": "Install dvc to access DVCFileSystem", + }, + "file": {"class": "fsspec.implementations.local.LocalFileSystem"}, + "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"}, + "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"}, + "gcs": { + "class": "gcsfs.GCSFileSystem", + "err": "Please install gcsfs to access Google Storage", + }, + "gdrive": { + "class": "gdrivefs.GoogleDriveFileSystem", + "err": "Please install gdrivefs for access to Google Drive", + }, + "generic": {"class": "fsspec.generic.GenericFileSystem"}, + "git": { + "class": "fsspec.implementations.git.GitFileSystem", + "err": "Install pygit2 to browse local git repos", + }, + "github": { + "class": "fsspec.implementations.github.GithubFileSystem", + "err": "Install the requests package to use the github FS", + }, + "gs": { + "class": "gcsfs.GCSFileSystem", + "err": "Please install gcsfs to access Google Storage", + }, + "hdfs": { + "class": "fsspec.implementations.arrow.HadoopFileSystem", + "err": "pyarrow and local java libraries required for HDFS", + }, + "hf": { + "class": "huggingface_hub.HfFileSystem", + "err": "Install huggingface_hub to access HfFileSystem", + }, + "http": { + "class": "fsspec.implementations.http.HTTPFileSystem", + "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', + }, + "https": { + "class": "fsspec.implementations.http.HTTPFileSystem", + "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', + }, + "jlab": { + "class": "fsspec.implementations.jupyter.JupyterFileSystem", + "err": "Jupyter FS requires requests to be installed", + }, + "jupyter": { + "class": "fsspec.implementations.jupyter.JupyterFileSystem", + "err": "Jupyter FS requires requests to be installed", + }, + "lakefs": { + "class": "lakefs_spec.LakeFSFileSystem", + "err": "Please install lakefs-spec to access LakeFSFileSystem", + }, + "libarchive": { + "class": "fsspec.implementations.libarchive.LibArchiveFileSystem", + "err": "LibArchive requires to be installed", + }, + "local": {"class": "fsspec.implementations.local.LocalFileSystem"}, + "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"}, + "oci": { + "class": "ocifs.OCIFileSystem", + "err": "Install ocifs to access OCI Object Storage", + }, + "ocilake": { + "class": "ocifs.OCIFileSystem", + "err": "Install ocifs to access OCI Data Lake", + }, + "oss": { + "class": "ossfs.OSSFileSystem", + "err": "Install ossfs to access Alibaba Object Storage System", + }, + "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"}, + "root": { + "class": "fsspec_xrootd.XRootDFileSystem", + "err": ( + "Install fsspec-xrootd to access xrootd storage system. " + "Note: 'root' is the protocol name for xrootd storage systems, " + "not referring to root directories" + ), + }, + "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, + "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, + "sftp": { + "class": "fsspec.implementations.sftp.SFTPFileSystem", + "err": 'SFTPFileSystem requires "paramiko" to be installed', + }, + "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"}, + "smb": { + "class": "fsspec.implementations.smb.SMBFileSystem", + "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed', + }, + "ssh": { + "class": "fsspec.implementations.sftp.SFTPFileSystem", + "err": 'SFTPFileSystem requires "paramiko" to be installed', + }, + "tar": {"class": "fsspec.implementations.tar.TarFileSystem"}, + "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"}, + "webdav": { + "class": "webdav4.fsspec.WebdavFileSystem", + "err": "Install webdav4 to access WebDAV", + }, + "webhdfs": { + "class": "fsspec.implementations.webhdfs.WebHDFS", + "err": 'webHDFS access requires "requests" to be installed', + }, + "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"}, +} + +assert list(known_implementations) == sorted( + known_implementations +), "Not in alphabetical order" + + +def get_filesystem_class(protocol): + """Fetch named protocol implementation from the registry + + The dict ``known_implementations`` maps protocol names to the locations + of classes implementing the corresponding file-system. When used for the + first time, appropriate imports will happen and the class will be placed in + the registry. All subsequent calls will fetch directly from the registry. + + Some protocol implementations require additional dependencies, and so the + import may fail. In this case, the string in the "err" field of the + ``known_implementations`` will be given as the error message. + """ + if not protocol: + protocol = default + + if protocol not in registry: + if protocol not in known_implementations: + raise ValueError(f"Protocol not known: {protocol}") + bit = known_implementations[protocol] + try: + register_implementation(protocol, _import_class(bit["class"])) + except ImportError as e: + raise ImportError(bit["err"]) from e + cls = registry[protocol] + if getattr(cls, "protocol", None) in ("abstract", None): + cls.protocol = protocol + + return cls + + +s3_msg = """Your installed version of s3fs is very old and known to cause +severe performance issues, see also https://github.com/dask/dask/issues/10276 + +To fix, you should specify a lower version bound on s3fs, or +update the current installation. +""" + + +def _import_class(fqp: str): + """Take a fully-qualified path and return the imported class or identifier. + + ``fqp`` is of the form "package.module.klass" or + "package.module:subobject.klass". + + Warnings + -------- + This can import arbitrary modules. Make sure you haven't installed any modules + that may execute malicious code at import time. + """ + if ":" in fqp: + mod, name = fqp.rsplit(":", 1) + else: + mod, name = fqp.rsplit(".", 1) + + is_s3 = mod == "s3fs" + mod = importlib.import_module(mod) + if is_s3 and mod.__version__.split(".") < ["0", "5"]: + warnings.warn(s3_msg) + for part in name.split("."): + mod = getattr(mod, part) + + if not isinstance(mod, type): + raise TypeError(f"{fqp} is not a class") + + return mod + + +def filesystem(protocol, **storage_options): + """Instantiate filesystems for given protocol and arguments + + ``storage_options`` are specific to the protocol being chosen, and are + passed directly to the class. + """ + if protocol == "arrow_hdfs": + warnings.warn( + "The 'arrow_hdfs' protocol has been deprecated and will be " + "removed in the future. Specify it as 'hdfs'.", + DeprecationWarning, + ) + + cls = get_filesystem_class(protocol) + return cls(**storage_options) + + +def available_protocols(): + """Return a list of the implemented protocols. + + Note that any given protocol may require extra packages to be importable. + """ + return list(known_implementations) diff --git a/MLPY/Lib/site-packages/fsspec/spec.py b/MLPY/Lib/site-packages/fsspec/spec.py new file mode 100644 index 0000000000000000000000000000000000000000..1463a4499988e45ddfb1c3f561f90a4e57229713 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/spec.py @@ -0,0 +1,2068 @@ +from __future__ import annotations + +import io +import json +import logging +import os +import threading +import warnings +import weakref +from errno import ESPIPE +from glob import has_magic +from hashlib import sha256 +from typing import Any, ClassVar, Dict, Tuple + +from .callbacks import DEFAULT_CALLBACK +from .config import apply_config, conf +from .dircache import DirCache +from .transaction import Transaction +from .utils import ( + _unstrip_protocol, + glob_translate, + isfilelike, + other_paths, + read_block, + stringify_path, + tokenize, +) + +logger = logging.getLogger("fsspec") + + +def make_instance(cls, args, kwargs): + return cls(*args, **kwargs) + + +class _Cached(type): + """ + Metaclass for caching file system instances. + + Notes + ----- + Instances are cached according to + + * The values of the class attributes listed in `_extra_tokenize_attributes` + * The arguments passed to ``__init__``. + + This creates an additional reference to the filesystem, which prevents the + filesystem from being garbage collected when all *user* references go away. + A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also* + be made for a filesystem instance to be garbage collected. + """ + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + # Note: we intentionally create a reference here, to avoid garbage + # collecting instances when all other references are gone. To really + # delete a FileSystem, the cache must be cleared. + if conf.get("weakref_instance_cache"): # pragma: no cover + # debug option for analysing fork/spawn conditions + cls._cache = weakref.WeakValueDictionary() + else: + cls._cache = {} + cls._pid = os.getpid() + + def __call__(cls, *args, **kwargs): + kwargs = apply_config(cls, kwargs) + extra_tokens = tuple( + getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes + ) + token = tokenize( + cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs + ) + skip = kwargs.pop("skip_instance_cache", False) + if os.getpid() != cls._pid: + cls._cache.clear() + cls._pid = os.getpid() + if not skip and cls.cachable and token in cls._cache: + cls._latest = token + return cls._cache[token] + else: + obj = super().__call__(*args, **kwargs) + # Setting _fs_token here causes some static linters to complain. + obj._fs_token_ = token + obj.storage_args = args + obj.storage_options = kwargs + if obj.async_impl and obj.mirror_sync_methods: + from .asyn import mirror_sync_methods + + mirror_sync_methods(obj) + + if cls.cachable and not skip: + cls._latest = token + cls._cache[token] = obj + return obj + + +class AbstractFileSystem(metaclass=_Cached): + """ + An abstract super-class for pythonic file-systems + + Implementations are expected to be compatible with or, better, subclass + from here. + """ + + cachable = True # this class can be cached, instances reused + _cached = False + blocksize = 2**22 + sep = "/" + protocol: ClassVar[str | tuple[str, ...]] = "abstract" + _latest = None + async_impl = False + mirror_sync_methods = False + root_marker = "" # For some FSs, may require leading '/' or other character + transaction_type = Transaction + + #: Extra *class attributes* that should be considered when hashing. + _extra_tokenize_attributes = () + + # Set by _Cached metaclass + storage_args: Tuple[Any, ...] + storage_options: Dict[str, Any] + + def __init__(self, *args, **storage_options): + """Create and configure file-system instance + + Instances may be cachable, so if similar enough arguments are seen + a new instance is not required. The token attribute exists to allow + implementations to cache instances if they wish. + + A reasonable default should be provided if there are no arguments. + + Subclasses should call this method. + + Parameters + ---------- + use_listings_cache, listings_expiry_time, max_paths: + passed to ``DirCache``, if the implementation supports + directory listing caching. Pass use_listings_cache=False + to disable such caching. + skip_instance_cache: bool + If this is a cachable implementation, pass True here to force + creating a new instance even if a matching instance exists, and prevent + storing this instance. + asynchronous: bool + loop: asyncio-compatible IOLoop or None + """ + if self._cached: + # reusing instance, don't change + return + self._cached = True + self._intrans = False + self._transaction = None + self._invalidated_caches_in_transaction = [] + self.dircache = DirCache(**storage_options) + + if storage_options.pop("add_docs", None): + warnings.warn("add_docs is no longer supported.", FutureWarning) + + if storage_options.pop("add_aliases", None): + warnings.warn("add_aliases has been removed.", FutureWarning) + # This is set in _Cached + self._fs_token_ = None + + @property + def fsid(self): + """Persistent filesystem id that can be used to compare filesystems + across sessions. + """ + raise NotImplementedError + + @property + def _fs_token(self): + return self._fs_token_ + + def __dask_tokenize__(self): + return self._fs_token + + def __hash__(self): + return int(self._fs_token, 16) + + def __eq__(self, other): + return isinstance(other, type(self)) and self._fs_token == other._fs_token + + def __reduce__(self): + return make_instance, (type(self), self.storage_args, self.storage_options) + + @classmethod + def _strip_protocol(cls, path): + """Turn path from fully-qualified to file-system-specific + + May require FS-specific handling, e.g., for relative paths or links. + """ + if isinstance(path, list): + return [cls._strip_protocol(p) for p in path] + path = stringify_path(path) + protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol + for protocol in protos: + if path.startswith(protocol + "://"): + path = path[len(protocol) + 3 :] + elif path.startswith(protocol + "::"): + path = path[len(protocol) + 2 :] + path = path.rstrip("/") + # use of root_marker to make minimum required path, e.g., "/" + return path or cls.root_marker + + def unstrip_protocol(self, name: str) -> str: + """Format FS-specific path to generic, including protocol""" + protos = (self.protocol,) if isinstance(self.protocol, str) else self.protocol + for protocol in protos: + if name.startswith(f"{protocol}://"): + return name + return f"{protos[0]}://{name}" + + @staticmethod + def _get_kwargs_from_urls(path): + """If kwargs can be encoded in the paths, extract them here + + This should happen before instantiation of the class; incoming paths + then should be amended to strip the options in methods. + + Examples may look like an sftp path "sftp://user@host:/my/path", where + the user and host should become kwargs and later get stripped. + """ + # by default, nothing happens + return {} + + @classmethod + def current(cls): + """Return the most recently instantiated FileSystem + + If no instance has been created, then create one with defaults + """ + if cls._latest in cls._cache: + return cls._cache[cls._latest] + return cls() + + @property + def transaction(self): + """A context within which files are committed together upon exit + + Requires the file class to implement `.commit()` and `.discard()` + for the normal and exception cases. + """ + if self._transaction is None: + self._transaction = self.transaction_type(self) + return self._transaction + + def start_transaction(self): + """Begin write transaction for deferring files, non-context version""" + self._intrans = True + self._transaction = self.transaction_type(self) + return self.transaction + + def end_transaction(self): + """Finish write transaction, non-context version""" + self.transaction.complete() + self._transaction = None + # The invalid cache must be cleared after the transaction is completed. + for path in self._invalidated_caches_in_transaction: + self.invalidate_cache(path) + self._invalidated_caches_in_transaction.clear() + + def invalidate_cache(self, path=None): + """ + Discard any cached directory information + + Parameters + ---------- + path: string or None + If None, clear all listings cached else listings at or under given + path. + """ + # Not necessary to implement invalidation mechanism, may have no cache. + # But if have, you should call this method of parent class from your + # subclass to ensure expiring caches after transacations correctly. + # See the implementation of FTPFileSystem in ftp.py + if self._intrans: + self._invalidated_caches_in_transaction.append(path) + + def mkdir(self, path, create_parents=True, **kwargs): + """ + Create directory entry at path + + For systems that don't have true directories, may create an for + this instance only and not touch the real filesystem + + Parameters + ---------- + path: str + location + create_parents: bool + if True, this is equivalent to ``makedirs`` + kwargs: + may be permissions, etc. + """ + pass # not necessary to implement, may not have directories + + def makedirs(self, path, exist_ok=False): + """Recursively make directories + + Creates directory at path and any intervening required directories. + Raises exception if, for instance, the path already exists but is a + file. + + Parameters + ---------- + path: str + leaf directory name + exist_ok: bool (False) + If False, will error if the target already exists + """ + pass # not necessary to implement, may not have directories + + def rmdir(self, path): + """Remove a directory, if empty""" + pass # not necessary to implement, may not have directories + + def ls(self, path, detail=True, **kwargs): + """List objects at path. + + This should include subdirectories and files at that location. The + difference between a file and a directory must be clear when details + are requested. + + The specific keys, or perhaps a FileInfo class, or similar, is TBD, + but must be consistent across implementations. + Must include: + + - full path to the entry (without protocol) + - size of the entry, in bytes. If the value cannot be determined, will + be ``None``. + - type of entry, "file", "directory" or other + + Additional information + may be present, appropriate to the file-system, e.g., generation, + checksum, etc. + + May use refresh=True|False to allow use of self._ls_from_cache to + check for a saved listing and avoid calling the backend. This would be + common where listing may be expensive. + + Parameters + ---------- + path: str + detail: bool + if True, gives a list of dictionaries, where each is the same as + the result of ``info(path)``. If False, gives a list of paths + (str). + kwargs: may have additional backend-specific options, such as version + information + + Returns + ------- + List of strings if detail is False, or list of directory information + dicts if detail is True. + """ + raise NotImplementedError + + def _ls_from_cache(self, path): + """Check cache for listing + + Returns listing, if found (may be empty list for a directly that exists + but contains nothing), None if not in cache. + """ + parent = self._parent(path) + try: + return self.dircache[path.rstrip("/")] + except KeyError: + pass + try: + files = [ + f + for f in self.dircache[parent] + if f["name"] == path + or (f["name"] == path.rstrip("/") and f["type"] == "directory") + ] + if len(files) == 0: + # parent dir was listed but did not contain this file + raise FileNotFoundError(path) + return files + except KeyError: + pass + + def walk(self, path, maxdepth=None, topdown=True, on_error="omit", **kwargs): + """Return all files belows path + + List all files, recursing into subdirectories; output is iterator-style, + like ``os.walk()``. For a simple list of files, ``find()`` is available. + + When topdown is True, the caller can modify the dirnames list in-place (perhaps + using del or slice assignment), and walk() will + only recurse into the subdirectories whose names remain in dirnames; + this can be used to prune the search, impose a specific order of visiting, + or even to inform walk() about directories the caller creates or renames before + it resumes walk() again. + Modifying dirnames when topdown is False has no effect. (see os.walk) + + Note that the "files" outputted will include anything that is not + a directory, such as links. + + Parameters + ---------- + path: str + Root to recurse into + maxdepth: int + Maximum recursion depth. None means limitless, but not recommended + on link-based file-systems. + topdown: bool (True) + Whether to walk the directory tree from the top downwards or from + the bottom upwards. + on_error: "omit", "raise", a collable + if omit (default), path with exception will simply be empty; + If raise, an underlying exception will be raised; + if callable, it will be called with a single OSError instance as argument + kwargs: passed to ``ls`` + """ + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + path = self._strip_protocol(path) + full_dirs = {} + dirs = {} + files = {} + + detail = kwargs.pop("detail", False) + try: + listing = self.ls(path, detail=True, **kwargs) + except (FileNotFoundError, OSError) as e: + if on_error == "raise": + raise + elif callable(on_error): + on_error(e) + if detail: + return path, {}, {} + return path, [], [] + + for info in listing: + # each info name must be at least [path]/part , but here + # we check also for names like [path]/part/ + pathname = info["name"].rstrip("/") + name = pathname.rsplit("/", 1)[-1] + if info["type"] == "directory" and pathname != path: + # do not include "self" path + full_dirs[name] = pathname + dirs[name] = info + elif pathname == path: + # file-like with same name as give path + files[""] = info + else: + files[name] = info + + if not detail: + dirs = list(dirs) + files = list(files) + + if topdown: + # Yield before recursion if walking top down + yield path, dirs, files + + if maxdepth is not None: + maxdepth -= 1 + if maxdepth < 1: + if not topdown: + yield path, dirs, files + return + + for d in dirs: + yield from self.walk( + full_dirs[d], + maxdepth=maxdepth, + detail=detail, + topdown=topdown, + **kwargs, + ) + + if not topdown: + # Yield after recursion if walking bottom up + yield path, dirs, files + + def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + """List all files below path. + + Like posix ``find`` command without conditions + + Parameters + ---------- + path : str + maxdepth: int or None + If not None, the maximum number of levels to descend + withdirs: bool + Whether to include directory paths in the output. This is True + when used by glob, but users usually only want files. + kwargs are passed to ``ls``. + """ + # TODO: allow equivalent of -name parameter + path = self._strip_protocol(path) + out = {} + + # Add the root directory if withdirs is requested + # This is needed for posix glob compliance + if withdirs and path != "" and self.isdir(path): + out[path] = self.info(path) + + for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs): + if withdirs: + files.update(dirs) + out.update({info["name"]: info for name, info in files.items()}) + if not out and self.isfile(path): + # walk works on directories, but find should also return [path] + # when path happens to be a file + out[path] = {} + names = sorted(out) + if not detail: + return names + else: + return {name: out[name] for name in names} + + def du(self, path, total=True, maxdepth=None, withdirs=False, **kwargs): + """Space used by files and optionally directories within a path + + Directory size does not include the size of its contents. + + Parameters + ---------- + path: str + total: bool + Whether to sum all the file sizes + maxdepth: int or None + Maximum number of directory levels to descend, None for unlimited. + withdirs: bool + Whether to include directory paths in the output. + kwargs: passed to ``find`` + + Returns + ------- + Dict of {path: size} if total=False, or int otherwise, where numbers + refer to bytes used. + """ + sizes = {} + if withdirs and self.isdir(path): + # Include top-level directory in output + info = self.info(path) + sizes[info["name"]] = info["size"] + for f in self.find(path, maxdepth=maxdepth, withdirs=withdirs, **kwargs): + info = self.info(f) + sizes[info["name"]] = info["size"] + if total: + return sum(sizes.values()) + else: + return sizes + + def glob(self, path, maxdepth=None, **kwargs): + """ + Find files by glob-matching. + + If the path ends with '/', only folders are returned. + + We support ``"**"``, + ``"?"`` and ``"[..]"``. We do not support ^ for pattern negation. + + The `maxdepth` option is applied on the first `**` found in the path. + + kwargs are passed to ``ls``. + """ + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + import re + + seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,) + ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash + path = self._strip_protocol(path) + append_slash_to_dirname = ends_with_sep or path.endswith( + tuple(sep + "**" for sep in seps) + ) + idx_star = path.find("*") if path.find("*") >= 0 else len(path) + idx_qmark = path.find("?") if path.find("?") >= 0 else len(path) + idx_brace = path.find("[") if path.find("[") >= 0 else len(path) + + min_idx = min(idx_star, idx_qmark, idx_brace) + + detail = kwargs.pop("detail", False) + + if not has_magic(path): + if self.exists(path, **kwargs): + if not detail: + return [path] + else: + return {path: self.info(path, **kwargs)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:min_idx]: + min_idx = path[:min_idx].rindex("/") + root = path[: min_idx + 1] + depth = path[min_idx + 1 :].count("/") + 1 + else: + root = "" + depth = path[min_idx + 1 :].count("/") + 1 + + if "**" in path: + if maxdepth is not None: + idx_double_stars = path.find("**") + depth_double_stars = path[idx_double_stars:].count("/") + 1 + depth = depth - depth_double_stars + maxdepth + else: + depth = None + + allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs) + + pattern = glob_translate(path + ("/" if ends_with_sep else "")) + pattern = re.compile(pattern) + + out = { + p: info + for p, info in sorted(allpaths.items()) + if pattern.match( + ( + p + "/" + if append_slash_to_dirname and info["type"] == "directory" + else p + ) + ) + } + + if detail: + return out + else: + return list(out) + + def exists(self, path, **kwargs): + """Is there a file at the given path""" + try: + self.info(path, **kwargs) + return True + except: # noqa: E722 + # any exception allowed bar FileNotFoundError? + return False + + def lexists(self, path, **kwargs): + """If there is a file at the given path (including + broken links)""" + return self.exists(path) + + def info(self, path, **kwargs): + """Give details of entry at path + + Returns a single dictionary, with exactly the same information as ``ls`` + would with ``detail=True``. + + The default implementation should calls ls and could be overridden by a + shortcut. kwargs are passed on to ```ls()``. + + Some file systems might not be able to measure the file's size, in + which case, the returned dict will include ``'size': None``. + + Returns + ------- + dict with keys: name (full path in the FS), size (in bytes), type (file, + directory, or something else) and other FS-specific keys. + """ + path = self._strip_protocol(path) + out = self.ls(self._parent(path), detail=True, **kwargs) + out = [o for o in out if o["name"].rstrip("/") == path] + if out: + return out[0] + out = self.ls(path, detail=True, **kwargs) + path = path.rstrip("/") + out1 = [o for o in out if o["name"].rstrip("/") == path] + if len(out1) == 1: + if "size" not in out1[0]: + out1[0]["size"] = None + return out1[0] + elif len(out1) > 1 or out: + return {"name": path, "size": 0, "type": "directory"} + else: + raise FileNotFoundError(path) + + def checksum(self, path): + """Unique value for current version of file + + If the checksum is the same from one moment to another, the contents + are guaranteed to be the same. If the checksum changes, the contents + *might* have changed. + + This should normally be overridden; default will probably capture + creation/modification timestamp (which would be good) or maybe + access timestamp (which would be bad) + """ + return int(tokenize(self.info(path)), 16) + + def size(self, path): + """Size in bytes of file""" + return self.info(path).get("size", None) + + def sizes(self, paths): + """Size in bytes of each file in a list of paths""" + return [self.size(p) for p in paths] + + def isdir(self, path): + """Is this entry directory-like?""" + try: + return self.info(path)["type"] == "directory" + except OSError: + return False + + def isfile(self, path): + """Is this entry file-like?""" + try: + return self.info(path)["type"] == "file" + except: # noqa: E722 + return False + + def read_text(self, path, encoding=None, errors=None, newline=None, **kwargs): + """Get the contents of the file as a string. + + Parameters + ---------- + path: str + URL of file on this filesystems + encoding, errors, newline: same as `open`. + """ + with self.open( + path, + mode="r", + encoding=encoding, + errors=errors, + newline=newline, + **kwargs, + ) as f: + return f.read() + + def write_text( + self, path, value, encoding=None, errors=None, newline=None, **kwargs + ): + """Write the text to the given file. + + An existing file will be overwritten. + + Parameters + ---------- + path: str + URL of file on this filesystems + value: str + Text to write. + encoding, errors, newline: same as `open`. + """ + with self.open( + path, + mode="w", + encoding=encoding, + errors=errors, + newline=newline, + **kwargs, + ) as f: + return f.write(value) + + def cat_file(self, path, start=None, end=None, **kwargs): + """Get the content of a file + + Parameters + ---------- + path: URL of file on this filesystems + start, end: int + Bytes limits of the read. If negative, backwards from end, + like usual python slices. Either can be None for start or + end of file, respectively + kwargs: passed to ``open()``. + """ + # explicitly set buffering off? + with self.open(path, "rb", **kwargs) as f: + if start is not None: + if start >= 0: + f.seek(start) + else: + f.seek(max(0, f.size + start)) + if end is not None: + if end < 0: + end = f.size + end + return f.read(end - f.tell()) + return f.read() + + def pipe_file(self, path, value, **kwargs): + """Set the bytes of given file""" + with self.open(path, "wb", **kwargs) as f: + f.write(value) + + def pipe(self, path, value=None, **kwargs): + """Put value into path + + (counterpart to ``cat``) + + Parameters + ---------- + path: string or dict(str, bytes) + If a string, a single remote location to put ``value`` bytes; if a dict, + a mapping of {path: bytesvalue}. + value: bytes, optional + If using a single path, these are the bytes to put there. Ignored if + ``path`` is a dict + """ + if isinstance(path, str): + self.pipe_file(self._strip_protocol(path), value, **kwargs) + elif isinstance(path, dict): + for k, v in path.items(): + self.pipe_file(self._strip_protocol(k), v, **kwargs) + else: + raise ValueError("path must be str or dict") + + def cat_ranges( + self, paths, starts, ends, max_gap=None, on_error="return", **kwargs + ): + """Get the contents of byte ranges from one or more files + + Parameters + ---------- + paths: list + A list of of filepaths on this filesystems + starts, ends: int or list + Bytes limits of the read. If using a single int, the same value will be + used to read all the specified files. + """ + if max_gap is not None: + raise NotImplementedError + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, list): + starts = [starts] * len(paths) + if not isinstance(ends, list): + ends = [ends] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + out = [] + for p, s, e in zip(paths, starts, ends): + try: + out.append(self.cat_file(p, s, e)) + except Exception as e: + if on_error == "return": + out.append(e) + else: + raise + return out + + def cat(self, path, recursive=False, on_error="raise", **kwargs): + """Fetch (potentially multiple) paths' contents + + Parameters + ---------- + recursive: bool + If True, assume the path(s) are directories, and get all the + contained files + on_error : "raise", "omit", "return" + If raise, an underlying exception will be raised (converted to KeyError + if the type is in self.missing_exceptions); if omit, keys with exception + will simply not be included in the output; if "return", all keys are + included in the output, but the value will be bytes or an exception + instance. + kwargs: passed to cat_file + + Returns + ------- + dict of {path: contents} if there are multiple paths + or the path has been otherwise expanded + """ + paths = self.expand_path(path, recursive=recursive) + if ( + len(paths) > 1 + or isinstance(path, list) + or paths[0] != self._strip_protocol(path) + ): + out = {} + for path in paths: + try: + out[path] = self.cat_file(path, **kwargs) + except Exception as e: + if on_error == "raise": + raise + if on_error == "return": + out[path] = e + return out + else: + return self.cat_file(paths[0], **kwargs) + + def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, outfile=None, **kwargs): + """Copy single remote file to local""" + from .implementations.local import LocalFileSystem + + if isfilelike(lpath): + outfile = lpath + elif self.isdir(rpath): + os.makedirs(lpath, exist_ok=True) + return None + + fs = LocalFileSystem(auto_mkdir=True) + fs.makedirs(fs._parent(lpath), exist_ok=True) + + with self.open(rpath, "rb", **kwargs) as f1: + if outfile is None: + outfile = open(lpath, "wb") + + try: + callback.set_size(getattr(f1, "size", None)) + data = True + while data: + data = f1.read(self.blocksize) + segment_len = outfile.write(data) + if segment_len is None: + segment_len = len(data) + callback.relative_update(segment_len) + finally: + if not isfilelike(lpath): + outfile.close() + + def get( + self, + rpath, + lpath, + recursive=False, + callback=DEFAULT_CALLBACK, + maxdepth=None, + **kwargs, + ): + """Copy file(s) to local. + + Copies a specific file or tree of files (if recursive=True). If lpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. Can submit a list of paths, which may be glob-patterns + and will be expanded. + + Calls get_file for each source. + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + from .implementations.local import ( + LocalFileSystem, + make_path_posix, + trailing_sep, + ) + + source_is_str = isinstance(rpath, str) + rpaths = self.expand_path(rpath, recursive=recursive, maxdepth=maxdepth) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + rpaths = [p for p in rpaths if not (trailing_sep(p) or self.isdir(p))] + if not rpaths: + return + + if isinstance(lpath, str): + lpath = make_path_posix(lpath) + + source_is_file = len(rpaths) == 1 + dest_is_dir = isinstance(lpath, str) and ( + trailing_sep(lpath) or LocalFileSystem().isdir(lpath) + ) + + exists = source_is_str and ( + (has_magic(rpath) and source_is_file) + or (not has_magic(rpath) and dest_is_dir and not trailing_sep(rpath)) + ) + lpaths = other_paths( + rpaths, + lpath, + exists=exists, + flatten=not source_is_str, + ) + + callback.set_size(len(lpaths)) + for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): + with callback.branched(rpath, lpath) as child: + self.get_file(rpath, lpath, callback=child, **kwargs) + + def put_file(self, lpath, rpath, callback=DEFAULT_CALLBACK, **kwargs): + """Copy single file to remote""" + if os.path.isdir(lpath): + self.makedirs(rpath, exist_ok=True) + return None + + with open(lpath, "rb") as f1: + size = f1.seek(0, 2) + callback.set_size(size) + f1.seek(0) + + self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True) + with self.open(rpath, "wb", **kwargs) as f2: + while f1.tell() < size: + data = f1.read(self.blocksize) + segment_len = f2.write(data) + if segment_len is None: + segment_len = len(data) + callback.relative_update(segment_len) + + def put( + self, + lpath, + rpath, + recursive=False, + callback=DEFAULT_CALLBACK, + maxdepth=None, + **kwargs, + ): + """Copy file(s) from local. + + Copies a specific file or tree of files (if recursive=True). If rpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. + + Calls put_file for each source. + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + from .implementations.local import ( + LocalFileSystem, + make_path_posix, + trailing_sep, + ) + + source_is_str = isinstance(lpath, str) + if source_is_str: + lpath = make_path_posix(lpath) + fs = LocalFileSystem() + lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))] + if not lpaths: + return + + source_is_file = len(lpaths) == 1 + dest_is_dir = isinstance(rpath, str) and ( + trailing_sep(rpath) or self.isdir(rpath) + ) + + rpath = ( + self._strip_protocol(rpath) + if isinstance(rpath, str) + else [self._strip_protocol(p) for p in rpath] + ) + exists = source_is_str and ( + (has_magic(lpath) and source_is_file) + or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath)) + ) + rpaths = other_paths( + lpaths, + rpath, + exists=exists, + flatten=not source_is_str, + ) + + callback.set_size(len(rpaths)) + for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): + with callback.branched(lpath, rpath) as child: + self.put_file(lpath, rpath, callback=child, **kwargs) + + def head(self, path, size=1024): + """Get the first ``size`` bytes from file""" + with self.open(path, "rb") as f: + return f.read(size) + + def tail(self, path, size=1024): + """Get the last ``size`` bytes from file""" + with self.open(path, "rb") as f: + f.seek(max(-size, -f.size), 2) + return f.read() + + def cp_file(self, path1, path2, **kwargs): + raise NotImplementedError + + def copy( + self, path1, path2, recursive=False, maxdepth=None, on_error=None, **kwargs + ): + """Copy within two locations in the filesystem + + on_error : "raise", "ignore" + If raise, any not-found exceptions will be raised; if ignore any + not-found exceptions will cause the path to be skipped; defaults to + raise unless recursive is true, where the default is ignore + """ + if on_error is None and recursive: + on_error = "ignore" + elif on_error is None: + on_error = "raise" + + if isinstance(path1, list) and isinstance(path2, list): + # No need to expand paths when both source and destination + # are provided as lists + paths1 = path1 + paths2 = path2 + else: + from .implementations.local import trailing_sep + + source_is_str = isinstance(path1, str) + paths1 = self.expand_path(path1, recursive=recursive, maxdepth=maxdepth) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + paths1 = [p for p in paths1 if not (trailing_sep(p) or self.isdir(p))] + if not paths1: + return + + source_is_file = len(paths1) == 1 + dest_is_dir = isinstance(path2, str) and ( + trailing_sep(path2) or self.isdir(path2) + ) + + exists = source_is_str and ( + (has_magic(path1) and source_is_file) + or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1)) + ) + paths2 = other_paths( + paths1, + path2, + exists=exists, + flatten=not source_is_str, + ) + + for p1, p2 in zip(paths1, paths2): + try: + self.cp_file(p1, p2, **kwargs) + except FileNotFoundError: + if on_error == "raise": + raise + + def expand_path(self, path, recursive=False, maxdepth=None, **kwargs): + """Turn one or more globs or directories into a list of all matching paths + to files or directories. + + kwargs are passed to ``glob`` or ``find``, which may in turn call ``ls`` + """ + + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + if isinstance(path, (str, os.PathLike)): + out = self.expand_path([path], recursive, maxdepth) + else: + out = set() + path = [self._strip_protocol(p) for p in path] + for p in path: + if has_magic(p): + bit = set(self.glob(p, maxdepth=maxdepth, **kwargs)) + out |= bit + if recursive: + # glob call above expanded one depth so if maxdepth is defined + # then decrement it in expand_path call below. If it is zero + # after decrementing then avoid expand_path call. + if maxdepth is not None and maxdepth <= 1: + continue + out |= set( + self.expand_path( + list(bit), + recursive=recursive, + maxdepth=maxdepth - 1 if maxdepth is not None else None, + **kwargs, + ) + ) + continue + elif recursive: + rec = set( + self.find( + p, maxdepth=maxdepth, withdirs=True, detail=False, **kwargs + ) + ) + out |= rec + if p not in out and (recursive is False or self.exists(p)): + # should only check once, for the root + out.add(p) + if not out: + raise FileNotFoundError(path) + return sorted(out) + + def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs): + """Move file(s) from one location to another""" + if path1 == path2: + logger.debug("%s mv: The paths are the same, so no files were moved.", self) + else: + # explicitly raise exception to prevent data corruption + self.copy( + path1, path2, recursive=recursive, maxdepth=maxdepth, onerror="raise" + ) + self.rm(path1, recursive=recursive) + + def rm_file(self, path): + """Delete a file""" + self._rm(path) + + def _rm(self, path): + """Delete one file""" + # this is the old name for the method, prefer rm_file + raise NotImplementedError + + def rm(self, path, recursive=False, maxdepth=None): + """Delete files. + + Parameters + ---------- + path: str or list of str + File(s) to delete. + recursive: bool + If file(s) are directories, recursively delete contents and then + also remove the directory + maxdepth: int or None + Depth to pass to walk for finding files to delete, if recursive. + If None, there will be no limit and infinite recursion may be + possible. + """ + path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth) + for p in reversed(path): + self.rm_file(p) + + @classmethod + def _parent(cls, path): + path = cls._strip_protocol(path) + if "/" in path: + parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker) + return cls.root_marker + parent + else: + return cls.root_marker + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + """Return raw bytes-mode file-like from the file-system""" + return AbstractBufferedFile( + self, + path, + mode, + block_size, + autocommit, + cache_options=cache_options, + **kwargs, + ) + + def open( + self, + path, + mode="rb", + block_size=None, + cache_options=None, + compression=None, + **kwargs, + ): + """ + Return a file-like object from the filesystem + + The resultant instance must function correctly in a context ``with`` + block. + + Parameters + ---------- + path: str + Target file + mode: str like 'rb', 'w' + See builtin ``open()`` + block_size: int + Some indication of buffering - this is a value in bytes + cache_options : dict, optional + Extra arguments to pass through to the cache. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding, errors, newline: passed on to TextIOWrapper for text mode + """ + import io + + path = self._strip_protocol(path) + if "b" not in mode: + mode = mode.replace("t", "") + "b" + + text_kwargs = { + k: kwargs.pop(k) + for k in ["encoding", "errors", "newline"] + if k in kwargs + } + return io.TextIOWrapper( + self.open( + path, + mode, + block_size=block_size, + cache_options=cache_options, + compression=compression, + **kwargs, + ), + **text_kwargs, + ) + else: + ac = kwargs.pop("autocommit", not self._intrans) + f = self._open( + path, + mode=mode, + block_size=block_size, + autocommit=ac, + cache_options=cache_options, + **kwargs, + ) + if compression is not None: + from fsspec.compression import compr + from fsspec.core import get_compression + + compression = get_compression(path, compression) + compress = compr[compression] + f = compress(f, mode=mode[0]) + + if not ac and "r" not in mode: + self.transaction.files.append(f) + return f + + def touch(self, path, truncate=True, **kwargs): + """Create empty file, or update timestamp + + Parameters + ---------- + path: str + file location + truncate: bool + If True, always set file size to 0; if False, update timestamp and + leave file unchanged, if backend allows this + """ + if truncate or not self.exists(path): + with self.open(path, "wb", **kwargs): + pass + else: + raise NotImplementedError # update timestamp, if possible + + def ukey(self, path): + """Hash of file properties, to tell if it has changed""" + return sha256(str(self.info(path)).encode()).hexdigest() + + def read_block(self, fn, offset, length, delimiter=None): + """Read a block of bytes from + + Starting at ``offset`` of the file, read ``length`` bytes. If + ``delimiter`` is set then we ensure that the read starts and stops at + delimiter boundaries that follow the locations ``offset`` and ``offset + + length``. If ``offset`` is zero then we start at zero. The + bytestring returned WILL include the end delimiter string. + + If offset+length is beyond the eof, reads to eof. + + Parameters + ---------- + fn: string + Path to filename + offset: int + Byte offset to start read + length: int + Number of bytes to read. If None, read to end. + delimiter: bytes (optional) + Ensure reading starts and stops at delimiter bytestring + + Examples + -------- + >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP + b'Alice, 100\\nBo' + >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\n' + + Use ``length=None`` to read to the end of the file. + >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\nCharlie, 300' + + See Also + -------- + :func:`fsspec.utils.read_block` + """ + with self.open(fn, "rb") as f: + size = f.size + if length is None: + length = size + if size is not None and offset + length > size: + length = size - offset + return read_block(f, offset, length, delimiter) + + def to_json(self, *, include_password: bool = True) -> str: + """ + JSON representation of this filesystem instance. + + Parameters + ---------- + include_password: bool, default True + Whether to include the password (if any) in the output. + + Returns + ------- + JSON string with keys ``cls`` (the python location of this class), + protocol (text name of this class's protocol, first one in case of + multiple), ``args`` (positional args, usually empty), and all other + keyword arguments as their own keys. + + Warnings + -------- + Serialized filesystems may contain sensitive information which have been + passed to the constructor, such as passwords and tokens. Make sure you + store and send them in a secure environment! + """ + from .json import FilesystemJSONEncoder + + return json.dumps( + self, + cls=type( + "_FilesystemJSONEncoder", + (FilesystemJSONEncoder,), + {"include_password": include_password}, + ), + ) + + @staticmethod + def from_json(blob: str) -> AbstractFileSystem: + """ + Recreate a filesystem instance from JSON representation. + + See ``.to_json()`` for the expected structure of the input. + + Parameters + ---------- + blob: str + + Returns + ------- + file system instance, not necessarily of this particular class. + + Warnings + -------- + This can import arbitrary modules (as determined by the ``cls`` key). + Make sure you haven't installed any modules that may execute malicious code + at import time. + """ + from .json import FilesystemJSONDecoder + + return json.loads(blob, cls=FilesystemJSONDecoder) + + def to_dict(self, *, include_password: bool = True) -> Dict[str, Any]: + """ + JSON-serializable dictionary representation of this filesystem instance. + + Parameters + ---------- + include_password: bool, default True + Whether to include the password (if any) in the output. + + Returns + ------- + Dictionary with keys ``cls`` (the python location of this class), + protocol (text name of this class's protocol, first one in case of + multiple), ``args`` (positional args, usually empty), and all other + keyword arguments as their own keys. + + Warnings + -------- + Serialized filesystems may contain sensitive information which have been + passed to the constructor, such as passwords and tokens. Make sure you + store and send them in a secure environment! + """ + from .json import FilesystemJSONEncoder + + json_encoder = FilesystemJSONEncoder() + + cls = type(self) + proto = self.protocol + + storage_options = dict(self.storage_options) + if not include_password: + storage_options.pop("password", None) + + return dict( + cls=f"{cls.__module__}:{cls.__name__}", + protocol=proto[0] if isinstance(proto, (tuple, list)) else proto, + args=json_encoder.make_serializable(self.storage_args), + **json_encoder.make_serializable(storage_options), + ) + + @staticmethod + def from_dict(dct: Dict[str, Any]) -> AbstractFileSystem: + """ + Recreate a filesystem instance from dictionary representation. + + See ``.to_dict()`` for the expected structure of the input. + + Parameters + ---------- + dct: Dict[str, Any] + + Returns + ------- + file system instance, not necessarily of this particular class. + + Warnings + -------- + This can import arbitrary modules (as determined by the ``cls`` key). + Make sure you haven't installed any modules that may execute malicious code + at import time. + """ + from .json import FilesystemJSONDecoder + + json_decoder = FilesystemJSONDecoder() + + dct = dict(dct) # Defensive copy + + cls = FilesystemJSONDecoder.try_resolve_fs_cls(dct) + if cls is None: + raise ValueError("Not a serialized AbstractFileSystem") + + dct.pop("cls", None) + dct.pop("protocol", None) + + return cls( + *json_decoder.unmake_serializable(dct.pop("args", ())), + **json_decoder.unmake_serializable(dct), + ) + + def _get_pyarrow_filesystem(self): + """ + Make a version of the FS instance which will be acceptable to pyarrow + """ + # all instances already also derive from pyarrow + return self + + def get_mapper(self, root="", check=False, create=False, missing_exceptions=None): + """Create key/value store based on this file-system + + Makes a MutableMapping interface to the FS at the given root path. + See ``fsspec.mapping.FSMap`` for further details. + """ + from .mapping import FSMap + + return FSMap( + root, + self, + check=check, + create=create, + missing_exceptions=missing_exceptions, + ) + + @classmethod + def clear_instance_cache(cls): + """ + Clear the cache of filesystem instances. + + Notes + ----- + Unless overridden by setting the ``cachable`` class attribute to False, + the filesystem class stores a reference to newly created instances. This + prevents Python's normal rules around garbage collection from working, + since the instances refcount will not drop to zero until + ``clear_instance_cache`` is called. + """ + cls._cache.clear() + + def created(self, path): + """Return the created timestamp of a file as a datetime.datetime""" + raise NotImplementedError + + def modified(self, path): + """Return the modified timestamp of a file as a datetime.datetime""" + raise NotImplementedError + + # ------------------------------------------------------------------------ + # Aliases + + def read_bytes(self, path, start=None, end=None, **kwargs): + """Alias of `AbstractFileSystem.cat_file`.""" + return self.cat_file(path, start=start, end=end, **kwargs) + + def write_bytes(self, path, value, **kwargs): + """Alias of `AbstractFileSystem.pipe_file`.""" + self.pipe_file(path, value, **kwargs) + + def makedir(self, path, create_parents=True, **kwargs): + """Alias of `AbstractFileSystem.mkdir`.""" + return self.mkdir(path, create_parents=create_parents, **kwargs) + + def mkdirs(self, path, exist_ok=False): + """Alias of `AbstractFileSystem.makedirs`.""" + return self.makedirs(path, exist_ok=exist_ok) + + def listdir(self, path, detail=True, **kwargs): + """Alias of `AbstractFileSystem.ls`.""" + return self.ls(path, detail=detail, **kwargs) + + def cp(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.copy`.""" + return self.copy(path1, path2, **kwargs) + + def move(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.mv`.""" + return self.mv(path1, path2, **kwargs) + + def stat(self, path, **kwargs): + """Alias of `AbstractFileSystem.info`.""" + return self.info(path, **kwargs) + + def disk_usage(self, path, total=True, maxdepth=None, **kwargs): + """Alias of `AbstractFileSystem.du`.""" + return self.du(path, total=total, maxdepth=maxdepth, **kwargs) + + def rename(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.mv`.""" + return self.mv(path1, path2, **kwargs) + + def delete(self, path, recursive=False, maxdepth=None): + """Alias of `AbstractFileSystem.rm`.""" + return self.rm(path, recursive=recursive, maxdepth=maxdepth) + + def upload(self, lpath, rpath, recursive=False, **kwargs): + """Alias of `AbstractFileSystem.put`.""" + return self.put(lpath, rpath, recursive=recursive, **kwargs) + + def download(self, rpath, lpath, recursive=False, **kwargs): + """Alias of `AbstractFileSystem.get`.""" + return self.get(rpath, lpath, recursive=recursive, **kwargs) + + def sign(self, path, expiration=100, **kwargs): + """Create a signed URL representing the given path + + Some implementations allow temporary URLs to be generated, as a + way of delegating credentials. + + Parameters + ---------- + path : str + The path on the filesystem + expiration : int + Number of seconds to enable the URL for (if supported) + + Returns + ------- + URL : str + The signed URL + + Raises + ------ + NotImplementedError : if method is not implemented for a filesystem + """ + raise NotImplementedError("Sign is not implemented for this filesystem") + + def _isfilestore(self): + # Originally inherited from pyarrow DaskFileSystem. Keeping this + # here for backwards compatibility as long as pyarrow uses its + # legacy fsspec-compatible filesystems and thus accepts fsspec + # filesystems as well + return False + + +class AbstractBufferedFile(io.IOBase): + """Convenient class to derive from to provide buffering + + In the case that the backend does not provide a pythonic file-like object + already, this class contains much of the logic to build one. The only + methods that need to be overridden are ``_upload_chunk``, + ``_initiate_upload`` and ``_fetch_range``. + """ + + DEFAULT_BLOCK_SIZE = 5 * 2**20 + _details = None + + def __init__( + self, + fs, + path, + mode="rb", + block_size="default", + autocommit=True, + cache_type="readahead", + cache_options=None, + size=None, + **kwargs, + ): + """ + Template for files with buffered reading and writing + + Parameters + ---------- + fs: instance of FileSystem + path: str + location in file-system + mode: str + Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file + systems may be read-only, and some may not support append. + block_size: int + Buffer size for reading or writing, 'default' for class default + autocommit: bool + Whether to write to final destination; may only impact what + happens when file is being closed. + cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead" + Caching policy in read mode. See the definitions in ``core``. + cache_options : dict + Additional options passed to the constructor for the cache specified + by `cache_type`. + size: int + If given and in read mode, suppressed having to look up the file size + kwargs: + Gets stored as self.kwargs + """ + from .core import caches + + self.path = path + self.fs = fs + self.mode = mode + self.blocksize = ( + self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size + ) + self.loc = 0 + self.autocommit = autocommit + self.end = None + self.start = None + self.closed = False + + if cache_options is None: + cache_options = {} + + if "trim" in kwargs: + warnings.warn( + "Passing 'trim' to control the cache behavior has been deprecated. " + "Specify it within the 'cache_options' argument instead.", + FutureWarning, + ) + cache_options["trim"] = kwargs.pop("trim") + + self.kwargs = kwargs + + if mode not in {"ab", "rb", "wb"}: + raise NotImplementedError("File mode not supported") + if mode == "rb": + if size is not None: + self.size = size + else: + self.size = self.details["size"] + self.cache = caches[cache_type]( + self.blocksize, self._fetch_range, self.size, **cache_options + ) + else: + self.buffer = io.BytesIO() + self.offset = None + self.forced = False + self.location = None + + @property + def details(self): + if self._details is None: + self._details = self.fs.info(self.path) + return self._details + + @details.setter + def details(self, value): + self._details = value + self.size = value["size"] + + @property + def full_name(self): + return _unstrip_protocol(self.path, self.fs) + + @property + def closed(self): + # get around this attr being read-only in IOBase + # use getattr here, since this can be called during del + return getattr(self, "_closed", True) + + @closed.setter + def closed(self, c): + self._closed = c + + def __hash__(self): + if "w" in self.mode: + return id(self) + else: + return int(tokenize(self.details), 16) + + def __eq__(self, other): + """Files are equal if they have the same checksum, only in read mode""" + if self is other: + return True + return ( + isinstance(other, type(self)) + and self.mode == "rb" + and other.mode == "rb" + and hash(self) == hash(other) + ) + + def commit(self): + """Move from temp to final destination""" + + def discard(self): + """Throw away temporary file""" + + def info(self): + """File information about this path""" + if "r" in self.mode: + return self.details + else: + raise ValueError("Info not available while writing") + + def tell(self): + """Current file location""" + return self.loc + + def seek(self, loc, whence=0): + """Set current file location + + Parameters + ---------- + loc: int + byte location + whence: {0, 1, 2} + from start of file, current location or end of file, resp. + """ + loc = int(loc) + if not self.mode == "rb": + raise OSError(ESPIPE, "Seek only available in read mode") + if whence == 0: + nloc = loc + elif whence == 1: + nloc = self.loc + loc + elif whence == 2: + nloc = self.size + loc + else: + raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)") + if nloc < 0: + raise ValueError("Seek before start of file") + self.loc = nloc + return self.loc + + def write(self, data): + """ + Write data to buffer. + + Buffer only sent on flush() or if buffer is greater than + or equal to blocksize. + + Parameters + ---------- + data: bytes + Set of bytes to be written. + """ + if self.mode not in {"wb", "ab"}: + raise ValueError("File not in write mode") + if self.closed: + raise ValueError("I/O operation on closed file.") + if self.forced: + raise ValueError("This file has been force-flushed, can only close") + out = self.buffer.write(data) + self.loc += out + if self.buffer.tell() >= self.blocksize: + self.flush() + return out + + def flush(self, force=False): + """ + Write buffered data to backend store. + + Writes the current buffer, if it is larger than the block-size, or if + the file is being closed. + + Parameters + ---------- + force: bool + When closing, write the last block even if it is smaller than + blocks are allowed to be. Disallows further writing to this file. + """ + + if self.closed: + raise ValueError("Flush on closed file") + if force and self.forced: + raise ValueError("Force flush cannot be called more than once") + if force: + self.forced = True + + if self.mode not in {"wb", "ab"}: + # no-op to flush on read-mode + return + + if not force and self.buffer.tell() < self.blocksize: + # Defer write on small block + return + + if self.offset is None: + # Initialize a multipart upload + self.offset = 0 + try: + self._initiate_upload() + except: # noqa: E722 + self.closed = True + raise + + if self._upload_chunk(final=force) is not False: + self.offset += self.buffer.seek(0, 2) + self.buffer = io.BytesIO() + + def _upload_chunk(self, final=False): + """Write one part of a multi-block file upload + + Parameters + ========== + final: bool + This is the last block, so should complete file, if + self.autocommit is True. + """ + # may not yet have been initialized, may need to call _initialize_upload + + def _initiate_upload(self): + """Create remote file/upload""" + pass + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + raise NotImplementedError + + def read(self, length=-1): + """ + Return data from cache, or fetch pieces as necessary + + Parameters + ---------- + length: int (-1) + Number of bytes to read; if <0, all remaining bytes. + """ + length = -1 if length is None else int(length) + if self.mode != "rb": + raise ValueError("File not in read mode") + if length < 0: + length = self.size - self.loc + if self.closed: + raise ValueError("I/O operation on closed file.") + if length == 0: + # don't even bother calling fetch + return b"" + out = self.cache._fetch(self.loc, self.loc + length) + + logger.debug( + "%s read: %i - %i %s", + self, + self.loc, + self.loc + length, + self.cache._log_stats(), + ) + self.loc += len(out) + return out + + def readinto(self, b): + """mirrors builtin file's readinto method + + https://docs.python.org/3/library/io.html#io.RawIOBase.readinto + """ + out = memoryview(b).cast("B") + data = self.read(out.nbytes) + out[: len(data)] = data + return len(data) + + def readuntil(self, char=b"\n", blocks=None): + """Return data between current position and first occurrence of char + + char is included in the output, except if the end of the tile is + encountered first. + + Parameters + ---------- + char: bytes + Thing to find + blocks: None or int + How much to read in each go. Defaults to file blocksize - which may + mean a new read on every call. + """ + out = [] + while True: + start = self.tell() + part = self.read(blocks or self.blocksize) + if len(part) == 0: + break + found = part.find(char) + if found > -1: + out.append(part[: found + len(char)]) + self.seek(start + found + len(char)) + break + out.append(part) + return b"".join(out) + + def readline(self): + """Read until first occurrence of newline character + + Note that, because of character encoding, this is not necessarily a + true line ending. + """ + return self.readuntil(b"\n") + + def __next__(self): + out = self.readline() + if out: + return out + raise StopIteration + + def __iter__(self): + return self + + def readlines(self): + """Return all data, split by the newline character""" + data = self.read() + lines = data.split(b"\n") + out = [l + b"\n" for l in lines[:-1]] + if data.endswith(b"\n"): + return out + else: + return out + [lines[-1]] + # return list(self) ??? + + def readinto1(self, b): + return self.readinto(b) + + def close(self): + """Close file + + Finalizes writes, discards cache + """ + if getattr(self, "_unclosable", False): + return + if self.closed: + return + if self.mode == "rb": + self.cache = None + else: + if not self.forced: + self.flush(force=True) + + if self.fs is not None: + self.fs.invalidate_cache(self.path) + self.fs.invalidate_cache(self.fs._parent(self.path)) + + self.closed = True + + def readable(self): + """Whether opened for reading""" + return self.mode == "rb" and not self.closed + + def seekable(self): + """Whether is seekable (only in read mode)""" + return self.readable() + + def writable(self): + """Whether opened for writing""" + return self.mode in {"wb", "ab"} and not self.closed + + def __del__(self): + if not self.closed: + self.close() + + def __str__(self): + return f"" + + __repr__ = __str__ + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__init__.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..45d081921ad29104bedd336dbf04fa86e1e48b7a --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/__init__.py @@ -0,0 +1,287 @@ +import os +from hashlib import md5 + +import pytest + +from fsspec.implementations.local import LocalFileSystem +from fsspec.tests.abstract.copy import AbstractCopyTests # noqa +from fsspec.tests.abstract.get import AbstractGetTests # noqa +from fsspec.tests.abstract.put import AbstractPutTests # noqa + + +class BaseAbstractFixtures: + """ + Abstract base class containing fixtures that are used by but never need to + be overridden in derived filesystem-specific classes to run the abstract + tests on such filesystems. + """ + + @pytest.fixture + def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_glob_edge_cases_files(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_target(self, fs, fs_join, fs_path): + """ + Return name of remote directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = fs_join(fs_path, "target") + yield target + if fs.exists(target): + fs.rm(target, recursive=True) + + @pytest.fixture + def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_glob_edge_cases_files(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_dir_and_file_with_same_name_prefix( + self, local_fs, local_join, local_path + ): + """ + Scenario on local filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix( + local_fs, local_join, local_path + ) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_10_files_with_hashed_names(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_target(self, local_fs, local_join, local_path): + """ + Return name of local directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = local_join(local_path, "target") + yield target + if local_fs.exists(target): + local_fs.rm(target, recursive=True) + + def _glob_edge_cases_files(self, some_fs, some_join, some_path): + """ + Scenario that is used for glob edge cases cp/get/put tests. + Creates the following directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + ├── 📁 subdir0 + │ ├── 📄 subfile1 + │ ├── 📄 subfile2 + │ └── 📁 nesteddir + │ └── 📄 nestedfile + └── 📁 subdir1 + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + + for subdir_idx in range(2): + subdir = some_join(source, f"subdir{subdir_idx}") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + + return source + + def _bulk_operations_scenario_0(self, some_fs, some_join, some_path): + """ + Scenario that is used for many cp/get/put tests. Creates the following + directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + └── 📁 subdir + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + return source + + def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put on directory and file with + the same name prefixes. Creates the following directory and file structure: + + 📁 source + ├── 📄 subdir.txt + └── 📁 subdir + └── 📄 subfile.txt + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + file = some_join(source, "subdir.txt") + subfile = some_join(subdir, "subfile.txt") + some_fs.makedirs(subdir) + some_fs.touch(file) + some_fs.touch(subfile) + return source + + def _10_files_with_hashed_names(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put files order when source and + destination are lists. Creates the following directory and file structure: + + 📁 source + └── 📄 {hashed([0-9])}.txt + """ + source = some_join(some_path, "source") + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + path = some_join(source, f"{hashed_i}.txt") + some_fs.pipe(path=path, value=f"{i}".encode("utf-8")) + return source + + +class AbstractFixtures(BaseAbstractFixtures): + """ + Abstract base class containing fixtures that may be overridden in derived + filesystem-specific classes to run the abstract tests on such filesystems. + + For any particular filesystem some of these fixtures must be overridden, + such as ``fs`` and ``fs_path``, and others may be overridden if the + default functions here are not appropriate, such as ``fs_join``. + """ + + @pytest.fixture + def fs(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture + def fs_join(self): + """ + Return a function that joins its arguments together into a path. + + Most fsspec implementations join paths in a platform-dependent way, + but some will override this to always use a forward slash. + """ + return os.path.join + + @pytest.fixture + def fs_path(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture(scope="class") + def local_fs(self): + # Maybe need an option for auto_mkdir=False? This is only relevant + # for certain implementations. + return LocalFileSystem(auto_mkdir=True) + + @pytest.fixture + def local_join(self): + """ + Return a function that joins its arguments together into a path, on + the local filesystem. + """ + return os.path.join + + @pytest.fixture + def local_path(self, tmpdir): + return tmpdir + + @pytest.fixture + def supports_empty_directories(self): + """ + Return whether this implementation supports empty directories. + """ + return True + + @pytest.fixture + def fs_sanitize_path(self): + return lambda x: x diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccde1c82e34e96b98d8b53c1372c954c1c3c5d4f Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8927b09e2b79ebd42d80ca3e671f24592a4fe00d Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1181077a99c58df7f6787a1759e5cb6bd8e3b00 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a2c7c3bbab61c6fa0ad38f69a420d15f075946 Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a8725d2a3ddab1db8f7e000abba726de64da1f Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-39.pyc b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aae010bdb988cd50c606bfee27e3d0a921f7eba Binary files /dev/null and b/MLPY/Lib/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/common.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/common.py new file mode 100644 index 0000000000000000000000000000000000000000..22e7c4140404ab2a8928689721419cf05c2760b9 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/common.py @@ -0,0 +1,175 @@ +GLOB_EDGE_CASES_TESTS = { + "argnames": ("path", "recursive", "maxdepth", "expected"), + "argvalues": [ + ("fil?1", False, None, ["file1"]), + ("fil?1", True, None, ["file1"]), + ("file[1-2]", False, None, ["file1", "file2"]), + ("file[1-2]", True, None, ["file1", "file2"]), + ("*", False, None, ["file1", "file2"]), + ( + "*", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*", True, 1, ["file1", "file2"]), + ( + "*", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("*1", False, None, ["file1"]), + ( + "*1", + True, + None, + [ + "file1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*1", True, 2, ["file1", "subdir1/subfile1", "subdir1/subfile2"]), + ( + "**", + False, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**", True, 1, ["file1", "file2"]), + ( + "**", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + False, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("**/*1", False, None, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ( + "**/*1", + True, + None, + [ + "file1", + "subdir0/subfile1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**/*1", True, 1, ["file1"]), + ( + "**/*1", + True, + 2, + ["file1", "subdir0/subfile1", "subdir1/subfile1", "subdir1/subfile2"], + ), + ("**/*1", False, 2, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ("**/subdir0", False, None, []), + ("**/subdir0", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("**/subdir0/nested*", False, 2, []), + ("**/subdir0/nested*", True, 2, ["nestedfile"]), + ("subdir[1-2]", False, None, []), + ("subdir[1-2]", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("subdir[1-2]", True, 2, ["subfile1", "subfile2"]), + ("subdir[0-1]", False, None, []), + ( + "subdir[0-1]", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + False, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ], +} diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/copy.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/copy.py new file mode 100644 index 0000000000000000000000000000000000000000..e39e57e5f7d52bfda8ab5e2398b04cc2303630a0 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/copy.py @@ -0,0 +1,557 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractCopyTests: + def test_copy_file_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1a + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + target_file2 = fs_join(target, "file2") + target_subfile1 = fs_join(target, "subfile1") + + # Copy from source directory + fs.cp(fs_join(source, "file2"), target) + assert fs.isfile(target_file2) + + # Copy from sub directory + fs.cp(fs_join(source, "subdir", "subfile1"), target) + assert fs.isfile(target_subfile1) + + # Remove copied files + fs.rm([target_file2, target_subfile1]) + assert not fs.exists(target_file2) + assert not fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.cp(fs_join(source, "file2"), target + "/") + assert fs.isdir(target) + assert fs.isfile(target_file2) + + fs.cp(fs_join(source, "subdir", "subfile1"), target + "/") + assert fs.isfile(target_subfile1) + + def test_copy_file_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1b + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.cp( + fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/") + ) # Note trailing slash + assert fs.isdir(target) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_copy_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1c + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile")) + assert fs.isfile(fs_join(target, "newfile")) + + def test_copy_file_to_file_in_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1d + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.cp( + fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile") + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "newfile")) + + def test_copy_directory_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1e + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.cp(s, t) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + fs.cp(s, t, recursive=True) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert fs.isdir(fs_join(target, "subdir", "nesteddir")) + assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.cp(s, t, recursive=True, maxdepth=1) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert not fs.exists(fs_join(target, "subdir", "nesteddir")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_directory_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1f + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.cp(s, t) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + with pytest.raises(FileNotFoundError): + fs.ls(target) + + # With recursive + fs.cp(s, t, recursive=True) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.cp(s, t, recursive=True, maxdepth=1) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + def test_copy_glob_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1g + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.cp(fs_join(source, "subdir", "*"), t) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.isdir(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.cp( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_glob_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1h + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.cp(fs_join(source, "subdir", "*"), t) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.cp( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_copy_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_glob_edge_cases_files, + fs_target, + fs_sanitize_path, + ): + # Copy scenario 1g + source = fs_glob_edge_cases_files + + target = fs_target + + for new_dir, target_slash in product([True, False], [True, False]): + fs.mkdir(target) + + t = fs_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = fs.find(target) + if new_dir: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_copy_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 2a + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.cp(source_files, t) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + assert fs.isfile(fs_join(target, "subfile1")) + + fs.rm( + [ + fs_join(target, "file1"), + fs_join(target, "file2"), + fs_join(target, "subfile1"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_list_of_files_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 2b + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "file1")) + assert fs.isfile(fs_join(target, "newdir", "file2")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_copy_two_files_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # This is a duplicate of test_copy_list_of_files_to_new_directory and + # can eventually be removed. + source = fs_bulk_operations_scenario_0 + + target = fs_target + assert not fs.exists(target) + fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target) + + assert fs.isdir(target) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + + def test_copy_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + fs_target, + fs_dir_and_file_with_same_name_prefix, + supports_empty_directories, + ): + # Create the test dirs + source = fs_dir_and_file_with_same_name_prefix + target = fs_target + + # Test without glob + fs.cp(fs_join(source, "subdir"), target, recursive=True) + + assert fs.isfile(fs_join(target, "subfile.txt")) + assert not fs.isfile(fs_join(target, "subdir.txt")) + + fs.rm([fs_join(target, "subfile.txt")]) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + assert not fs.exists(target) + + # Test with glob + fs.cp(fs_join(source, "subdir*"), target, recursive=True) + + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile.txt")) + assert fs.isfile(fs_join(target, "subdir.txt")) + + def test_copy_with_source_and_destination_as_list( + self, fs, fs_target, fs_join, fs_10_files_with_hashed_names + ): + # Create the test dir + source = fs_10_files_with_hashed_names + target = fs_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(fs_join(source, f"{hashed_i}.txt")) + destination_files.append(fs_join(target, f"{hashed_i}.txt")) + + # Copy and assert order was kept + fs.copy(path1=source_files, path2=destination_files) + + for i in range(10): + file_content = fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/get.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/get.py new file mode 100644 index 0000000000000000000000000000000000000000..851ab81ee581e74cac41c64c83ef0af75826d6b0 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/get.py @@ -0,0 +1,587 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.implementations.local import make_path_posix +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractGetTests: + def test_get_file_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + target_file2 = local_join(target, "file2") + target_subfile1 = local_join(target, "subfile1") + + # Copy from source directory + fs.get(fs_join(source, "file2"), target) + assert local_fs.isfile(target_file2) + + # Copy from sub directory + fs.get(fs_join(source, "subdir", "subfile1"), target) + assert local_fs.isfile(target_subfile1) + + # Remove copied files + local_fs.rm([target_file2, target_subfile1]) + assert not local_fs.exists(target_file2) + assert not local_fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.get(fs_join(source, "file2"), target + "/") + assert local_fs.isdir(target) + assert local_fs.isfile(target_file2) + + fs.get(fs_join(source, "subdir", "subfile1"), target + "/") + assert local_fs.isfile(target_subfile1) + + def test_get_file_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/") + ) # Note trailing slash + + assert local_fs.isdir(target) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1c + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile")) + assert local_fs.isfile(local_join(target, "newfile")) + + def test_get_file_to_file_in_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1d + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), + local_join(target, "newdir", "newfile"), + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "newfile")) + + def test_get_directory_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1e + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert local_fs.isdir(local_join(target, "subdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "subdir", "nesteddir", "nestedfile") + ) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert not local_fs.exists(local_join(target, "subdir", "nesteddir")) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + def test_get_directory_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1f + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = local_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + def test_get_glob_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.isdir(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_glob_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1h + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_fs.ls(target, detail=False), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_get_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_glob_edge_cases_files, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_glob_edge_cases_files + + target = local_target + + for new_dir, target_slash in product([True, False], [True, False]): + local_fs.mkdir(target) + + t = local_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.get(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = local_fs.find(target) + if new_dir: + prefixed_expected = [ + make_path_posix(local_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + make_path_posix(local_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + local_fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_get_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.get(source_files, t) + assert local_fs.isfile(local_join(target, "file1")) + assert local_fs.isfile(local_join(target, "file2")) + assert local_fs.isfile(local_join(target, "subfile1")) + + local_fs.rm( + [ + local_join(target, "file1"), + local_join(target, "file2"), + local_join(target, "subfile1"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_list_of_files_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "file1")) + assert local_fs.isfile(local_join(target, "newdir", "file2")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_directory_recursive( + self, fs, fs_join, fs_path, local_fs, local_join, local_target + ): + # https://github.com/fsspec/filesystem_spec/issues/1062 + # Recursive cp/get/put of source directory into non-existent target directory. + src = fs_join(fs_path, "src") + src_file = fs_join(src, "file") + fs.mkdir(src) + fs.touch(src_file) + + target = local_target + + # get without slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src, target, recursive=True) + assert local_fs.isdir(target) + + if loop == 0: + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + else: + assert local_fs.isfile(local_join(target, "file")) + assert local_fs.isdir(local_join(target, "src")) + assert local_fs.isfile(local_join(target, "src", "file")) + + local_fs.rm(target, recursive=True) + + # get with slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src + "/", target, recursive=True) + assert local_fs.isdir(target) + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + + def test_get_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_dir_and_file_with_same_name_prefix, + ): + # Create the test dirs + source = fs_dir_and_file_with_same_name_prefix + target = local_target + + # Test without glob + fs.get(fs_join(source, "subdir"), target, recursive=True) + + assert local_fs.isfile(local_join(target, "subfile.txt")) + assert not local_fs.isfile(local_join(target, "subdir.txt")) + + local_fs.rm([local_join(target, "subfile.txt")]) + assert local_fs.ls(target) == [] + + # Test with glob + fs.get(fs_join(source, "subdir*"), target, recursive=True) + + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile.txt")) + assert local_fs.isfile(local_join(target, "subdir.txt")) + + def test_get_with_source_and_destination_as_list( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_10_files_with_hashed_names, + ): + # Create the test dir + source = fs_10_files_with_hashed_names + target = local_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(fs_join(source, f"{hashed_i}.txt")) + destination_files.append( + make_path_posix(local_join(target, f"{hashed_i}.txt")) + ) + + # Copy and assert order was kept + fs.get(rpath=source_files, lpath=destination_files) + + for i in range(10): + file_content = local_fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/mv.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/mv.py new file mode 100644 index 0000000000000000000000000000000000000000..39f6caa3de815e024fa84de2acecc986c823ed29 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/mv.py @@ -0,0 +1,57 @@ +import os + +import pytest + +import fsspec + + +def test_move_raises_error_with_tmpdir(tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Define a destination that simulates a protected or invalid path + destination = tmpdir.join("non_existent_directory/destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Use the actual file paths as string + with pytest.raises(FileNotFoundError): + fs.mv(str(source), str(destination)) + + +@pytest.mark.parametrize("recursive", (True, False)) +def test_move_raises_error_with_tmpdir_permission(recursive, tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Create a protected directory (non-writable) + protected_dir = tmpdir.mkdir("protected_directory") + protected_path = str(protected_dir) + + # Set the directory to read-only + if os.name == "nt": + os.system(f'icacls "{protected_path}" /deny Everyone:(W)') + else: + os.chmod(protected_path, 0o555) # Sets the directory to read-only + + # Define a destination inside the protected directory + destination = protected_dir.join("destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Try to move the file to the read-only directory, expecting a permission error + with pytest.raises(PermissionError): + fs.mv(str(source), str(destination), recursive=recursive) + + # Assert the file was not created in the destination + assert not os.path.exists(destination) + + # Cleanup: Restore permissions so the directory can be cleaned up + if os.name == "nt": + os.system(f'icacls "{protected_path}" /remove:d Everyone') + else: + os.chmod(protected_path, 0o755) # Restore write permission for cleanup diff --git a/MLPY/Lib/site-packages/fsspec/tests/abstract/put.py b/MLPY/Lib/site-packages/fsspec/tests/abstract/put.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc349977f0384d9fc86126498be5c6ad99a21d3 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/tests/abstract/put.py @@ -0,0 +1,591 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractPutTests: + def test_put_file_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1a + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + target_file2 = fs_join(target, "file2") + target_subfile1 = fs_join(target, "subfile1") + + # Copy from source directory + fs.put(local_join(source, "file2"), target) + assert fs.isfile(target_file2) + + # Copy from sub directory + fs.put(local_join(source, "subdir", "subfile1"), target) + assert fs.isfile(target_subfile1) + + # Remove copied files + fs.rm([target_file2, target_subfile1]) + assert not fs.exists(target_file2) + assert not fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.put(local_join(source, "file2"), target + "/") + assert fs.isdir(target) + assert fs.isfile(target_file2) + + fs.put(local_join(source, "subdir", "subfile1"), target + "/") + assert fs.isfile(target_subfile1) + + def test_put_file_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1b + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.put( + local_join(source, "subdir", "subfile1"), fs_join(target, "newdir/") + ) # Note trailing slash + assert fs.isdir(target) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_put_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + supports_empty_directories, + local_bulk_operations_scenario_0, + ): + # Copy scenario 1c + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + fs.put(local_join(source, "subdir", "subfile1"), fs_join(target, "newfile")) + assert fs.isfile(fs_join(target, "newfile")) + + def test_put_file_to_file_in_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1d + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.put( + local_join(source, "subdir", "subfile1"), + fs_join(target, "newdir", "newfile"), + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "newfile")) + + def test_put_directory_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1e + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.put(s, t) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + fs.put(s, t, recursive=True) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert fs.isdir(fs_join(target, "subdir", "nesteddir")) + assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.put(s, t, recursive=True, maxdepth=1) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert not fs.exists(fs_join(target, "subdir", "nesteddir")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_directory_to_new_directory( + self, + fs, + fs_join, + fs_target, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1f + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.put(s, t) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + with pytest.raises(FileNotFoundError): + fs.ls(target) + + # With recursive + fs.put(s, t, recursive=True) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.put(s, t, recursive=True, maxdepth=1) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + def test_put_glob_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + supports_empty_directories, + local_bulk_operations_scenario_0, + ): + # Copy scenario 1g + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.put(local_join(source, "subdir", "*"), t) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.isdir(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.put(local_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.put( + local_join(source, "subdir", glob), + t, + recursive=recursive, + maxdepth=1, + ) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_glob_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1h + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.put(local_join(source, "subdir", "*"), t) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.put(local_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.put( + local_join(source, "subdir", glob), + t, + recursive=recursive, + maxdepth=1, + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_put_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_target, + local_glob_edge_cases_files, + local_join, + fs_sanitize_path, + ): + # Copy scenario 1g + source = local_glob_edge_cases_files + + target = fs_target + + for new_dir, target_slash in product([True, False], [True, False]): + fs.mkdir(target) + + t = fs_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.put(local_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = fs.find(target) + if new_dir: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_put_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 2a + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + source_files = [ + local_join(source, "file1"), + local_join(source, "file2"), + local_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.put(source_files, t) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + assert fs.isfile(fs_join(target, "subfile1")) + + fs.rm( + [ + fs_join(target, "file1"), + fs_join(target, "file2"), + fs_join(target, "subfile1"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_list_of_files_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 2b + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + source_files = [ + local_join(source, "file1"), + local_join(source, "file2"), + local_join(source, "subdir", "subfile1"), + ] + + fs.put(source_files, fs_join(target, "newdir") + "/") # Note trailing slash + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "file1")) + assert fs.isfile(fs_join(target, "newdir", "file2")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_put_directory_recursive( + self, fs, fs_join, fs_target, local_fs, local_join, local_path + ): + # https://github.com/fsspec/filesystem_spec/issues/1062 + # Recursive cp/get/put of source directory into non-existent target directory. + src = local_join(local_path, "src") + src_file = local_join(src, "file") + local_fs.mkdir(src) + local_fs.touch(src_file) + + target = fs_target + + # put without slash + assert not fs.exists(target) + for loop in range(2): + fs.put(src, target, recursive=True) + assert fs.isdir(target) + + if loop == 0: + assert fs.isfile(fs_join(target, "file")) + assert not fs.exists(fs_join(target, "src")) + else: + assert fs.isfile(fs_join(target, "file")) + assert fs.isdir(fs_join(target, "src")) + assert fs.isfile(fs_join(target, "src", "file")) + + fs.rm(target, recursive=True) + + # put with slash + assert not fs.exists(target) + for loop in range(2): + fs.put(src + "/", target, recursive=True) + assert fs.isdir(target) + assert fs.isfile(fs_join(target, "file")) + assert not fs.exists(fs_join(target, "src")) + + def test_put_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + fs_target, + local_join, + local_dir_and_file_with_same_name_prefix, + supports_empty_directories, + ): + # Create the test dirs + source = local_dir_and_file_with_same_name_prefix + target = fs_target + + # Test without glob + fs.put(local_join(source, "subdir"), fs_target, recursive=True) + + assert fs.isfile(fs_join(fs_target, "subfile.txt")) + assert not fs.isfile(fs_join(fs_target, "subdir.txt")) + + fs.rm([fs_join(target, "subfile.txt")]) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + assert not fs.exists(target) + + # Test with glob + fs.put(local_join(source, "subdir*"), fs_target, recursive=True) + + assert fs.isdir(fs_join(fs_target, "subdir")) + assert fs.isfile(fs_join(fs_target, "subdir", "subfile.txt")) + assert fs.isfile(fs_join(fs_target, "subdir.txt")) + + def test_copy_with_source_and_destination_as_list( + self, fs, fs_target, fs_join, local_join, local_10_files_with_hashed_names + ): + # Create the test dir + source = local_10_files_with_hashed_names + target = fs_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(local_join(source, f"{hashed_i}.txt")) + destination_files.append(fs_join(target, f"{hashed_i}.txt")) + + # Copy and assert order was kept + fs.put(lpath=source_files, rpath=destination_files) + + for i in range(10): + file_content = fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/MLPY/Lib/site-packages/fsspec/transaction.py b/MLPY/Lib/site-packages/fsspec/transaction.py new file mode 100644 index 0000000000000000000000000000000000000000..77293f63ecc5f611e19d849ef236d53e9c258efc --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/transaction.py @@ -0,0 +1,90 @@ +from collections import deque + + +class Transaction: + """Filesystem transaction write context + + Gathers files for deferred commit or discard, so that several write + operations can be finalized semi-atomically. This works by having this + instance as the ``.transaction`` attribute of the given filesystem + """ + + def __init__(self, fs, **kwargs): + """ + Parameters + ---------- + fs: FileSystem instance + """ + self.fs = fs + self.files = deque() + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """End transaction and commit, if exit is not due to exception""" + # only commit if there was no exception + self.complete(commit=exc_type is None) + if self.fs: + self.fs._intrans = False + self.fs._transaction = None + self.fs = None + + def start(self): + """Start a transaction on this FileSystem""" + self.files = deque() # clean up after previous failed completions + self.fs._intrans = True + + def complete(self, commit=True): + """Finish transaction: commit or discard all deferred files""" + while self.files: + f = self.files.popleft() + if commit: + f.commit() + else: + f.discard() + self.fs._intrans = False + self.fs._transaction = None + self.fs = None + + +class FileActor: + def __init__(self): + self.files = [] + + def commit(self): + for f in self.files: + f.commit() + self.files.clear() + + def discard(self): + for f in self.files: + f.discard() + self.files.clear() + + def append(self, f): + self.files.append(f) + + +class DaskTransaction(Transaction): + def __init__(self, fs): + """ + Parameters + ---------- + fs: FileSystem instance + """ + import distributed + + super().__init__(fs) + client = distributed.default_client() + self.files = client.submit(FileActor, actor=True).result() + + def complete(self, commit=True): + """Finish transaction: commit or discard all deferred files""" + if commit: + self.files.commit().result() + else: + self.files.discard().result() + self.fs._intrans = False + self.fs = None diff --git a/MLPY/Lib/site-packages/fsspec/utils.py b/MLPY/Lib/site-packages/fsspec/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..703d55f4e38db48dbca078a1093183747907c972 --- /dev/null +++ b/MLPY/Lib/site-packages/fsspec/utils.py @@ -0,0 +1,740 @@ +from __future__ import annotations + +import contextlib +import logging +import math +import os +import pathlib +import re +import sys +import tempfile +from functools import partial +from hashlib import md5 +from importlib.metadata import version +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Iterable, + Iterator, + Sequence, + TypeVar, +) +from urllib.parse import urlsplit + +if TYPE_CHECKING: + from typing_extensions import TypeGuard + + from fsspec.spec import AbstractFileSystem + + +DEFAULT_BLOCK_SIZE = 5 * 2**20 + +T = TypeVar("T") + + +def infer_storage_options( + urlpath: str, inherit_storage_options: dict[str, Any] | None = None +) -> dict[str, Any]: + """Infer storage options from URL path and merge it with existing storage + options. + + Parameters + ---------- + urlpath: str or unicode + Either local absolute file path or URL (hdfs://namenode:8020/file.csv) + inherit_storage_options: dict (optional) + Its contents will get merged with the inferred information from the + given path + + Returns + ------- + Storage options dict. + + Examples + -------- + >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP + {"protocol": "file", "path", "/mnt/datasets/test.csv"} + >>> infer_storage_options( + ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1', + ... inherit_storage_options={'extra': 'value'}, + ... ) # doctest: +SKIP + {"protocol": "hdfs", "username": "username", "password": "pwd", + "host": "node", "port": 123, "path": "/mnt/datasets/test.csv", + "url_query": "q=1", "extra": "value"} + """ + # Handle Windows paths including disk name in this special case + if ( + re.match(r"^[a-zA-Z]:[\\/]", urlpath) + or re.match(r"^[a-zA-Z0-9]+://", urlpath) is None + ): + return {"protocol": "file", "path": urlpath} + + parsed_path = urlsplit(urlpath) + protocol = parsed_path.scheme or "file" + if parsed_path.fragment: + path = "#".join([parsed_path.path, parsed_path.fragment]) + else: + path = parsed_path.path + if protocol == "file": + # Special case parsing file protocol URL on Windows according to: + # https://msdn.microsoft.com/en-us/library/jj710207.aspx + windows_path = re.match(r"^/([a-zA-Z])[:|]([\\/].*)$", path) + if windows_path: + path = "%s:%s" % windows_path.groups() + + if protocol in ["http", "https"]: + # for HTTP, we don't want to parse, as requests will anyway + return {"protocol": protocol, "path": urlpath} + + options: dict[str, Any] = {"protocol": protocol, "path": path} + + if parsed_path.netloc: + # Parse `hostname` from netloc manually because `parsed_path.hostname` + # lowercases the hostname which is not always desirable (e.g. in S3): + # https://github.com/dask/dask/issues/1417 + options["host"] = parsed_path.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0] + + if protocol in ("s3", "s3a", "gcs", "gs"): + options["path"] = options["host"] + options["path"] + else: + options["host"] = options["host"] + if parsed_path.port: + options["port"] = parsed_path.port + if parsed_path.username: + options["username"] = parsed_path.username + if parsed_path.password: + options["password"] = parsed_path.password + + if parsed_path.query: + options["url_query"] = parsed_path.query + if parsed_path.fragment: + options["url_fragment"] = parsed_path.fragment + + if inherit_storage_options: + update_storage_options(options, inherit_storage_options) + + return options + + +def update_storage_options( + options: dict[str, Any], inherited: dict[str, Any] | None = None +) -> None: + if not inherited: + inherited = {} + collisions = set(options) & set(inherited) + if collisions: + for collision in collisions: + if options.get(collision) != inherited.get(collision): + raise KeyError( + f"Collision between inferred and specified storage " + f"option:\n{collision}" + ) + options.update(inherited) + + +# Compression extensions registered via fsspec.compression.register_compression +compressions: dict[str, str] = {} + + +def infer_compression(filename: str) -> str | None: + """Infer compression, if available, from filename. + + Infer a named compression type, if registered and available, from filename + extension. This includes builtin (gz, bz2, zip) compressions, as well as + optional compressions. See fsspec.compression.register_compression. + """ + extension = os.path.splitext(filename)[-1].strip(".").lower() + if extension in compressions: + return compressions[extension] + return None + + +def build_name_function(max_int: float) -> Callable[[int], str]: + """Returns a function that receives a single integer + and returns it as a string padded by enough zero characters + to align with maximum possible integer + + >>> name_f = build_name_function(57) + + >>> name_f(7) + '07' + >>> name_f(31) + '31' + >>> build_name_function(1000)(42) + '0042' + >>> build_name_function(999)(42) + '042' + >>> build_name_function(0)(0) + '0' + """ + # handle corner cases max_int is 0 or exact power of 10 + max_int += 1e-8 + + pad_length = int(math.ceil(math.log10(max_int))) + + def name_function(i: int) -> str: + return str(i).zfill(pad_length) + + return name_function + + +def seek_delimiter(file: IO[bytes], delimiter: bytes, blocksize: int) -> bool: + r"""Seek current file to file start, file end, or byte after delimiter seq. + + Seeks file to next chunk delimiter, where chunks are defined on file start, + a delimiting sequence, and file end. Use file.tell() to see location afterwards. + Note that file start is a valid split, so must be at offset > 0 to seek for + delimiter. + + Parameters + ---------- + file: a file + delimiter: bytes + a delimiter like ``b'\n'`` or message sentinel, matching file .read() type + blocksize: int + Number of bytes to read from the file at once. + + + Returns + ------- + Returns True if a delimiter was found, False if at file start or end. + + """ + + if file.tell() == 0: + # beginning-of-file, return without seek + return False + + # Interface is for binary IO, with delimiter as bytes, but initialize last + # with result of file.read to preserve compatibility with text IO. + last: bytes | None = None + while True: + current = file.read(blocksize) + if not current: + # end-of-file without delimiter + return False + full = last + current if last else current + try: + if delimiter in full: + i = full.index(delimiter) + file.seek(file.tell() - (len(full) - i) + len(delimiter)) + return True + elif len(current) < blocksize: + # end-of-file without delimiter + return False + except (OSError, ValueError): + pass + last = full[-len(delimiter) :] + + +def read_block( + f: IO[bytes], + offset: int, + length: int | None, + delimiter: bytes | None = None, + split_before: bool = False, +) -> bytes: + """Read a block of bytes from a file + + Parameters + ---------- + f: File + Open file + offset: int + Byte offset to start read + length: int + Number of bytes to read, read through end of file if None + delimiter: bytes (optional) + Ensure reading starts and stops at delimiter bytestring + split_before: bool (optional) + Start/stop read *before* delimiter bytestring. + + + If using the ``delimiter=`` keyword argument we ensure that the read + starts and stops at delimiter boundaries that follow the locations + ``offset`` and ``offset + length``. If ``offset`` is zero then we + start at zero, regardless of delimiter. The bytestring returned WILL + include the terminating delimiter string. + + Examples + -------- + + >>> from io import BytesIO # doctest: +SKIP + >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP + >>> read_block(f, 0, 13) # doctest: +SKIP + b'Alice, 100\\nBo' + + >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\n' + + >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP + b'Bob, 200\\nCharlie, 300' + """ + if delimiter: + f.seek(offset) + found_start_delim = seek_delimiter(f, delimiter, 2**16) + if length is None: + return f.read() + start = f.tell() + length -= start - offset + + f.seek(start + length) + found_end_delim = seek_delimiter(f, delimiter, 2**16) + end = f.tell() + + # Adjust split location to before delimiter if seek found the + # delimiter sequence, not start or end of file. + if found_start_delim and split_before: + start -= len(delimiter) + + if found_end_delim and split_before: + end -= len(delimiter) + + offset = start + length = end - start + + f.seek(offset) + + # TODO: allow length to be None and read to the end of the file? + assert length is not None + b = f.read(length) + return b + + +def tokenize(*args: Any, **kwargs: Any) -> str: + """Deterministic token + + (modified from dask.base) + + >>> tokenize([1, 2, '3']) + '9d71491b50023b06fc76928e6eddb952' + + >>> tokenize('Hello') == tokenize('Hello') + True + """ + if kwargs: + args += (kwargs,) + try: + h = md5(str(args).encode()) + except ValueError: + # FIPS systems: https://github.com/fsspec/filesystem_spec/issues/380 + h = md5(str(args).encode(), usedforsecurity=False) + return h.hexdigest() + + +def stringify_path(filepath: str | os.PathLike[str] | pathlib.Path) -> str: + """Attempt to convert a path-like object to a string. + + Parameters + ---------- + filepath: object to be converted + + Returns + ------- + filepath_str: maybe a string version of the object + + Notes + ----- + Objects supporting the fspath protocol are coerced according to its + __fspath__ method. + + For backwards compatibility with older Python version, pathlib.Path + objects are specially coerced. + + Any other object is passed through unchanged, which includes bytes, + strings, buffers, or anything else that's not even path-like. + """ + if isinstance(filepath, str): + return filepath + elif hasattr(filepath, "__fspath__"): + return filepath.__fspath__() + elif hasattr(filepath, "path"): + return filepath.path + else: + return filepath # type: ignore[return-value] + + +def make_instance( + cls: Callable[..., T], args: Sequence[Any], kwargs: dict[str, Any] +) -> T: + inst = cls(*args, **kwargs) + inst._determine_worker() # type: ignore[attr-defined] + return inst + + +def common_prefix(paths: Iterable[str]) -> str: + """For a list of paths, find the shortest prefix common to all""" + parts = [p.split("/") for p in paths] + lmax = min(len(p) for p in parts) + end = 0 + for i in range(lmax): + end = all(p[i] == parts[0][i] for p in parts) + if not end: + break + i += end + return "/".join(parts[0][:i]) + + +def other_paths( + paths: list[str], + path2: str | list[str], + exists: bool = False, + flatten: bool = False, +) -> list[str]: + """In bulk file operations, construct a new file tree from a list of files + + Parameters + ---------- + paths: list of str + The input file tree + path2: str or list of str + Root to construct the new list in. If this is already a list of str, we just + assert it has the right number of elements. + exists: bool (optional) + For a str destination, it is already exists (and is a dir), files should + end up inside. + flatten: bool (optional) + Whether to flatten the input directory tree structure so that the output files + are in the same directory. + + Returns + ------- + list of str + """ + + if isinstance(path2, str): + path2 = path2.rstrip("/") + + if flatten: + path2 = ["/".join((path2, p.split("/")[-1])) for p in paths] + else: + cp = common_prefix(paths) + if exists: + cp = cp.rsplit("/", 1)[0] + if not cp and all(not s.startswith("/") for s in paths): + path2 = ["/".join([path2, p]) for p in paths] + else: + path2 = [p.replace(cp, path2, 1) for p in paths] + else: + assert len(paths) == len(path2) + return path2 + + +def is_exception(obj: Any) -> bool: + return isinstance(obj, BaseException) + + +def isfilelike(f: Any) -> TypeGuard[IO[bytes]]: + for attr in ["read", "close", "tell"]: + if not hasattr(f, attr): + return False + return True + + +def get_protocol(url: str) -> str: + url = stringify_path(url) + parts = re.split(r"(\:\:|\://)", url, maxsplit=1) + if len(parts) > 1: + return parts[0] + return "file" + + +def can_be_local(path: str) -> bool: + """Can the given URL be used with open_local?""" + from fsspec import get_filesystem_class + + try: + return getattr(get_filesystem_class(get_protocol(path)), "local_file", False) + except (ValueError, ImportError): + # not in registry or import failed + return False + + +def get_package_version_without_import(name: str) -> str | None: + """For given package name, try to find the version without importing it + + Import and package.__version__ is still the backup here, so an import + *might* happen. + + Returns either the version string, or None if the package + or the version was not readily found. + """ + if name in sys.modules: + mod = sys.modules[name] + if hasattr(mod, "__version__"): + return mod.__version__ + try: + return version(name) + except: # noqa: E722 + pass + try: + import importlib + + mod = importlib.import_module(name) + return mod.__version__ + except (ImportError, AttributeError): + return None + + +def setup_logging( + logger: logging.Logger | None = None, + logger_name: str | None = None, + level: str = "DEBUG", + clear: bool = True, +) -> logging.Logger: + if logger is None and logger_name is None: + raise ValueError("Provide either logger object or logger name") + logger = logger or logging.getLogger(logger_name) + handle = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s -- %(message)s" + ) + handle.setFormatter(formatter) + if clear: + logger.handlers.clear() + logger.addHandler(handle) + logger.setLevel(level) + return logger + + +def _unstrip_protocol(name: str, fs: AbstractFileSystem) -> str: + return fs.unstrip_protocol(name) + + +def mirror_from( + origin_name: str, methods: Iterable[str] +) -> Callable[[type[T]], type[T]]: + """Mirror attributes and methods from the given + origin_name attribute of the instance to the + decorated class""" + + def origin_getter(method: str, self: Any) -> Any: + origin = getattr(self, origin_name) + return getattr(origin, method) + + def wrapper(cls: type[T]) -> type[T]: + for method in methods: + wrapped_method = partial(origin_getter, method) + setattr(cls, method, property(wrapped_method)) + return cls + + return wrapper + + +@contextlib.contextmanager +def nullcontext(obj: T) -> Iterator[T]: + yield obj + + +def merge_offset_ranges( + paths: list[str], + starts: list[int] | int, + ends: list[int] | int, + max_gap: int = 0, + max_block: int | None = None, + sort: bool = True, +) -> tuple[list[str], list[int], list[int]]: + """Merge adjacent byte-offset ranges when the inter-range + gap is <= `max_gap`, and when the merged byte range does not + exceed `max_block` (if specified). By default, this function + will re-order the input paths and byte ranges to ensure sorted + order. If the user can guarantee that the inputs are already + sorted, passing `sort=False` will skip the re-ordering. + """ + # Check input + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, list): + starts = [starts] * len(paths) + if not isinstance(ends, list): + ends = [ends] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + + # Early Return + if len(starts) <= 1: + return paths, starts, ends + + starts = [s or 0 for s in starts] + # Sort by paths and then ranges if `sort=True` + if sort: + paths, starts, ends = ( + list(v) + for v in zip( + *sorted( + zip(paths, starts, ends), + ) + ) + ) + + if paths: + # Loop through the coupled `paths`, `starts`, and + # `ends`, and merge adjacent blocks when appropriate + new_paths = paths[:1] + new_starts = starts[:1] + new_ends = ends[:1] + for i in range(1, len(paths)): + if paths[i] == paths[i - 1] and new_ends[-1] is None: + continue + elif ( + paths[i] != paths[i - 1] + or ((starts[i] - new_ends[-1]) > max_gap) + or (max_block is not None and (ends[i] - new_starts[-1]) > max_block) + ): + # Cannot merge with previous block. + # Add new `paths`, `starts`, and `ends` elements + new_paths.append(paths[i]) + new_starts.append(starts[i]) + new_ends.append(ends[i]) + else: + # Merge with previous block by updating the + # last element of `ends` + new_ends[-1] = ends[i] + return new_paths, new_starts, new_ends + + # `paths` is empty. Just return input lists + return paths, starts, ends + + +def file_size(filelike: IO[bytes]) -> int: + """Find length of any open read-mode file-like""" + pos = filelike.tell() + try: + return filelike.seek(0, 2) + finally: + filelike.seek(pos) + + +@contextlib.contextmanager +def atomic_write(path: str, mode: str = "wb"): + """ + A context manager that opens a temporary file next to `path` and, on exit, + replaces `path` with the temporary file, thereby updating `path` + atomically. + """ + fd, fn = tempfile.mkstemp( + dir=os.path.dirname(path), prefix=os.path.basename(path) + "-" + ) + try: + with open(fd, mode) as fp: + yield fp + except BaseException: + with contextlib.suppress(FileNotFoundError): + os.unlink(fn) + raise + else: + os.replace(fn, path) + + +def _translate(pat, STAR, QUESTION_MARK): + # Copied from: https://github.com/python/cpython/pull/106703. + res: list[str] = [] + add = res.append + i, n = 0, len(pat) + while i < n: + c = pat[i] + i = i + 1 + if c == "*": + # compress consecutive `*` into one + if (not res) or res[-1] is not STAR: + add(STAR) + elif c == "?": + add(QUESTION_MARK) + elif c == "[": + j = i + if j < n and pat[j] == "!": + j = j + 1 + if j < n and pat[j] == "]": + j = j + 1 + while j < n and pat[j] != "]": + j = j + 1 + if j >= n: + add("\\[") + else: + stuff = pat[i:j] + if "-" not in stuff: + stuff = stuff.replace("\\", r"\\") + else: + chunks = [] + k = i + 2 if pat[i] == "!" else i + 1 + while True: + k = pat.find("-", k, j) + if k < 0: + break + chunks.append(pat[i:k]) + i = k + 1 + k = k + 3 + chunk = pat[i:j] + if chunk: + chunks.append(chunk) + else: + chunks[-1] += "-" + # Remove empty ranges -- invalid in RE. + for k in range(len(chunks) - 1, 0, -1): + if chunks[k - 1][-1] > chunks[k][0]: + chunks[k - 1] = chunks[k - 1][:-1] + chunks[k][1:] + del chunks[k] + # Escape backslashes and hyphens for set difference (--). + # Hyphens that create ranges shouldn't be escaped. + stuff = "-".join( + s.replace("\\", r"\\").replace("-", r"\-") for s in chunks + ) + # Escape set operations (&&, ~~ and ||). + stuff = re.sub(r"([&~|])", r"\\\1", stuff) + i = j + 1 + if not stuff: + # Empty range: never match. + add("(?!)") + elif stuff == "!": + # Negated empty range: match any character. + add(".") + else: + if stuff[0] == "!": + stuff = "^" + stuff[1:] + elif stuff[0] in ("^", "["): + stuff = "\\" + stuff + add(f"[{stuff}]") + else: + add(re.escape(c)) + assert i == n + return res + + +def glob_translate(pat): + # Copied from: https://github.com/python/cpython/pull/106703. + # The keyword parameters' values are fixed to: + # recursive=True, include_hidden=True, seps=None + """Translate a pathname with shell wildcards to a regular expression.""" + if os.path.altsep: + seps = os.path.sep + os.path.altsep + else: + seps = os.path.sep + escaped_seps = "".join(map(re.escape, seps)) + any_sep = f"[{escaped_seps}]" if len(seps) > 1 else escaped_seps + not_sep = f"[^{escaped_seps}]" + one_last_segment = f"{not_sep}+" + one_segment = f"{one_last_segment}{any_sep}" + any_segments = f"(?:.+{any_sep})?" + any_last_segments = ".*" + results = [] + parts = re.split(any_sep, pat) + last_part_idx = len(parts) - 1 + for idx, part in enumerate(parts): + if part == "*": + results.append(one_segment if idx < last_part_idx else one_last_segment) + continue + if part == "**": + results.append(any_segments if idx < last_part_idx else any_last_segments) + continue + elif "**" in part: + raise ValueError( + "Invalid pattern: '**' can only be an entire path component" + ) + if part: + results.extend(_translate(part, f"{not_sep}*", not_sep)) + if idx < last_part_idx: + results.append(any_sep) + res = "".join(results) + return rf"(?s:{res})\Z" diff --git a/MLPY/Lib/site-packages/functorch/_C.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/functorch/_C.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..42efe5af2a87105cf53c52377cdf611ed107188d Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_C.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/functorch/__init__.py b/MLPY/Lib/site-packages/functorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8902dccdfa98d0b5503ea3636b156cf42f36d6 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/__init__.py @@ -0,0 +1,38 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import torch + +from torch._functorch.deprecated import ( + combine_state_for_ensemble, + functionalize, + grad, + grad_and_value, + hessian, + jacfwd, + jacrev, + jvp, + make_functional, + make_functional_with_buffers, + vjp, + vmap, +) + +# utilities. Maybe these should go in their own namespace in the future? +from torch._functorch.make_functional import ( + FunctionalModule, + FunctionalModuleWithBuffers, +) + +# Top-level APIs. Please think carefully before adding something to the +# top-level namespace: +# - private helper functions should go into torch._functorch +# - very experimental things should go into functorch.experimental +# - compilation related things should go into functorch.compile + +# Was never documented +from torch._functorch.python_key import make_fx + +__version__ = torch.__version__ diff --git a/MLPY/Lib/site-packages/functorch/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11b9bb7e1cba5d9f8823b20ed6bac8dbac6a8172 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/_src/__init__.py b/MLPY/Lib/site-packages/functorch/_src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/functorch/_src/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/_src/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee52b1967102dbd6b20dbf4820f7862c377b6b0a Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_src/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__init__.py b/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bef6245f7a0f900c71b5350dcec65c19c260cd78 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__init__.py @@ -0,0 +1,8 @@ +# This file has moved to under torch/_functorch. It is not public API. +# If you are not a PyTorch developer and you are relying on the following +# imports, please file an issue. +from torch._functorch.aot_autograd import ( + aot_autograd_decompositions, + KNOWN_TYPES, + PytreeThunk, +) diff --git a/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173ae069c9a23d4158da91c5b70e4523d433eefc Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_src/aot_autograd/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__init__.py b/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..37df7424c4056760388c6b0d17288e4571fde359 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__init__.py @@ -0,0 +1,7 @@ +# This file has moved to under torch/_functorch. It is not public API. +# If you are not a PyTorch developer and you are relying on the following +# imports, please file an issue. +from torch._functorch.eager_transforms import ( + _assert_wrapped_functional, + _unwrap_functional_tensor, +) diff --git a/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..315adb93a3f1af86b6ebe07048e7565d16107254 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_src/eager_transforms/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/_src/make_functional/__init__.py b/MLPY/Lib/site-packages/functorch/_src/make_functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c96ce28510b5172edbc3941b232c2221e75397de --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/_src/make_functional/__init__.py @@ -0,0 +1,4 @@ +# This file has moved to under torch/_functorch. It is not public API. +# If you are not a PyTorch developer and you are relying on the following +# imports, please file an issue. +from torch._functorch.make_functional import _swap_state diff --git a/MLPY/Lib/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..024188ee95a22a2dddfe8d05fc50344b2f82e598 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_src/make_functional/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/_src/vmap/__init__.py b/MLPY/Lib/site-packages/functorch/_src/vmap/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec274ba24ad6b3f28798c42bfe92c8b14670a51e --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/_src/vmap/__init__.py @@ -0,0 +1,16 @@ +# This file has moved to under torch/_functorch. It is not public API. +# If you are not a PyTorch developer and you are relying on the following +# imports, please file an issue. +from torch._functorch.vmap import ( + _add_batch_dim, + _broadcast_to_and_flatten, + _create_batched_inputs, + _get_name, + _process_batched_inputs, + _remove_batch_dim, + _unwrap_batched, + _validate_and_get_batch_size, + Tensor, + tree_flatten, + tree_unflatten, +) diff --git a/MLPY/Lib/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7357d95c0e8ee5c8364b2020f95a300c8cdcc4e Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/_src/vmap/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/compile/__init__.py b/MLPY/Lib/site-packages/functorch/compile/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7fee7319cec6799e385431068747125893f62f45 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/compile/__init__.py @@ -0,0 +1,31 @@ +from torch._functorch import config +from torch._functorch.aot_autograd import ( + aot_function, + aot_module, + aot_module_simplified, + compiled_function, + compiled_module, + get_aot_compilation_context, + get_aot_graph_name, + get_graph_being_compiled, + make_boxed_compiler, + make_boxed_func, +) +from torch._functorch.compilers import ( + debug_compile, + default_decompositions, + draw_graph_compile, + memory_efficient_fusion, + nnc_jit, + nop, + print_compile, + ts_compile, +) +from torch._functorch.fx_minifier import minifier +from torch._functorch.partitioners import ( + default_partition, + draw_graph, + draw_joint_graph, + min_cut_rematerialization_partition, +) +from torch._functorch.python_key import pythonkey_decompose diff --git a/MLPY/Lib/site-packages/functorch/compile/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/compile/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5cf558c5e5d28f8ce248918a33bae0fb16942b4 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/compile/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__init__.py b/MLPY/Lib/site-packages/functorch/dim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..37000809f628401fb76ce2aca0e580a820f36a3f --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/__init__.py @@ -0,0 +1,179 @@ +import dis +import inspect +from typing import Sequence, Union + +import torch + +import functorch._C +from functorch._C import dim as _C +from .tree_map import tree_flatten, tree_map +from .wrap_type import wrap_type + +_C._patch_tensor_class() +dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists + + +class DimensionMismatchError(Exception): + pass + + +class DimensionBindError(Exception): + pass + + +from . import op_properties + +# use dict to avoid writing C++ bindings for set +pointwise = dict.fromkeys(op_properties.pointwise, True) + +use_c = True +if not use_c: + from . import reference + + +class _Tensor: + # fast path around slow wrapping/unwrapping logic for simply queries used + # by the implementation... + + @property + def dims(self): + return tuple(d for d in self._levels if isinstance(d, Dim)) + + def dim(self): + return self.ndim + + if use_c: + __torch_function__ = classmethod(_C.__torch_function__) + expand = _C._instancemethod(_C.expand) + else: + __torch_function__ = reference.__torch_function__ + expand = reference.expand + + index = _C._instancemethod(_C.index) + + def __repr__(self): + tensor, levels, ndim = self._tensor, self._levels, self.ndim + return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}" + + +TensorLike = (_Tensor, torch.Tensor) + + +class Dim(_C.Dim, _Tensor): + # note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence. + # Tensor defines format, but we want to print Dims with special formatting + __format__ = object.__format__ + + +class Tensor(_Tensor, _C.Tensor): + if not use_c: + from_batched = staticmethod(_C.Tensor_from_batched) + from_positional = staticmethod(_C.Tensor_from_positional) + sum = _C._instancemethod(_C.Tensor_sum) + + +def cat(tensors, dim, new_dim): + n = dims() + return stack(tensors, n, dim).index([n, dim], new_dim) + + +if use_c: + _wrap = _C._wrap + + def _def(name, *args, **kwargs): + orig = getattr(torch.Tensor, name) + setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs))) + + t__getitem__ = _C._instancemethod(_C.__getitem__) + stack = _C.stack + split = _C._instancemethod(_C.split) +else: + _wrap, _def = reference._wrap, reference._def + t__getitem__ = reference.t__getitem__ + stack = reference.stack + split = reference.split + +# note: there is no python reference +t__setitem__ = _C._instancemethod(_C.__setitem__) +# this is patched in the C API because otherwise torch.Tensor will +# no longer be considered a sequence and things will break +# torch.Tensor.__getitem__ = t__getitem__ + +_Tensor.__getitem__ = t__getitem__ +# torch.Tensor.__setitem__ = t__setitem__ +_Tensor.__setitem__ = t__setitem__ + +torch.Tensor.split = split +_Tensor.split = split +torch.Tensor.expand = _C._instancemethod(_C.expand) +torch.Tensor.index = _C._instancemethod(_C.index) +wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__) +del _Tensor.ndim + +if use_c: + _Tensor.order = _C._instancemethod(_C.order) +else: + _Tensor.order = reference.positional + +_def("mean") +_def("sum") +_def("all") +_def("amax") +_def("amin") +_def("aminmax") +_def("any") +_def("count_nonzero") +_def("logsumexp") +_def("nanmean") +_def("nansum") +_def("prod") +_def("std", keepdim_offset=2) +_def("var", keepdim_offset=2) +_def("max", single_dim=True) +_def("min", single_dim=True) +_def("argmax", single_dim=True) +_def("argmin", single_dim=True) +_def("kthvalue", single_dim=True) +_def("median", single_dim=True) +_def("nanmedian", single_dim=True) +_def("mode", single_dim=True) +_def("sort", reduce=False) +_def("argsort", reduce=False) +_def("unbind", single_dim=True) +_def("chunk", dim_offset=1, reduce=False) +_def("cummax", single_dim=True, reduce=False) +_def("cummin", single_dim=True, reduce=False) +_def("cumprod", single_dim=True, reduce=False) +_def("cumprod_", single_dim=True, reduce=False) +_def("cumsum", single_dim=True, reduce=False) +_def("cumsum_", single_dim=True, reduce=False) +_def("logcumsumexp", single_dim=True, reduce=False) +_def("renorm", dim_offset=1, single_dim=True, reduce=False) +_def("softmax", single_dim=True, reduce=False) +softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False) + +# stuff to handle in the future, because they require special +# binding logic for dims +# cross +# diag_embed +# diagonal +# diagonal_scatter +# diff +# nanquantile +# quantile +# roll +# rot90 +# topk (new dimes on output) +# should these all be subsumed by inplace indexing? +# index_add_ +# index_add +# index_copy +# index_copy_ +# index_fill +# index_fill_ +# index_select +# scatter +# scatter_ +# scatter_add +# scatter_add_ +# scatter_reduce diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c1a4a0cf61ce4357a0e9870a7bd02a321a66017 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8a643dbf18f48e4feb3fcae6db23a10d029d949 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21bf5d5c02d0402d667b24b3679211e0192971d0 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/dim.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/dim.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5db6ffffcf05a2b395b0a1589f408a6805b56e8d Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/dim.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/magic_trace.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/magic_trace.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f23187108b206fca8972839bf6c9d99835f6655c Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/magic_trace.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/op_properties.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/op_properties.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cbacf13149bd57b42b0befc4a3828290a2868ef Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/op_properties.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/reference.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/reference.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e8dd9334c99bdfbd60a666a556f9280e9d3015 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/reference.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/tree_map.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/tree_map.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efebc79fd1fc1d577930b5b8d943eb521d0a4cf5 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/tree_map.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/__pycache__/wrap_type.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/dim/__pycache__/wrap_type.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf1e4f4946292bd0d55e575e09ac89c67d9b06b2 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/dim/__pycache__/wrap_type.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/dim/batch_tensor.py b/MLPY/Lib/site-packages/functorch/dim/batch_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..92b10d049640d4b48dc3437fbb22a26929c68799 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/batch_tensor.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from contextlib import contextmanager + +from torch._C._functorch import _vmap_add_layers, _vmap_remove_layers + +_enabled = False + + +@contextmanager +def _enable_layers(dims): + global _enabled + assert not _enabled + input = sorted((d._level, d.size) for d in dims if not isinstance(d, int)) + n = len(input) + try: + _vmap_add_layers(input) + _enabled = True + yield + finally: + _enabled = False + _vmap_remove_layers(n) diff --git a/MLPY/Lib/site-packages/functorch/dim/delayed_mul_tensor.py b/MLPY/Lib/site-packages/functorch/dim/delayed_mul_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..397b1b56796f7be73987cfa4f7eb58fb58ea0a40 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/delayed_mul_tensor.py @@ -0,0 +1,77 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import torch + +from . import _Tensor, Tensor +from .reference import _dims, _enable_layers, llist, ltuple + + +class DelayedMulTensor(_Tensor): + def __init__(self, lhs, rhs): + self._lhs, self._rhs = lhs, rhs + self._data = None + self._levels_data = None + self._has_device = lhs._has_device or rhs._has_device + self._batchtensor_data = None + self._tensor_data = None + + @property + def _levels(self): + if self._levels_data is None: + levels = llist(self._lhs._levels) + for l in self._rhs._levels: + if l not in levels: + levels.append(l) + self._levels_data = ltuple(levels) + return self._levels_data + + @property + def _batchtensor(self): + if self._batchtensor_data is None: + with _enable_layers(self._levels): + print("bt multiply fallback") + self._batchtensor_data = self._lhs._batchtensor * self._rhs._batchtensor + return self._batchtensor_data + + @property + def _tensor(self): + if self._tensor_data is None: + self._tensor_data = Tensor.from_batched( + self._batchtensor, self._has_device + )._tensor + return self._tensor_data + + @property + def ndim(self): + return self._batchtensor.ndim + + @property + def dims(self): + return ltuple(super().dims) + + def sum(self, dim): + dims = _dims(dim, 0, False, False) + n = ord("a") + all_levels = self._levels + + def to_char(d): + return chr(n + all_levels.index(d)) + + plhs, levelslhs = self._lhs._tensor, self._lhs._levels + prhs, levelsrhs = self._rhs._tensor, self._rhs._levels + new_dims = tuple(d for d in self.dims if d not in dims) + new_levels = [l for l in self._levels if l not in dims] + fmt = "".join( + [ + *(to_char(d) for d in levelslhs), + ",", + *(to_char(d) for d in levelsrhs), + "->", + *(to_char(d) for d in new_levels), + ] + ) + result_data = torch.einsum(fmt, (plhs, prhs)) + return Tensor.from_positional(result_data, new_levels, True) diff --git a/MLPY/Lib/site-packages/functorch/dim/dim.py b/MLPY/Lib/site-packages/functorch/dim/dim.py new file mode 100644 index 0000000000000000000000000000000000000000..dbe5ba9f23e6e3ca36e3028e8d4f41b25bd5a306 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/dim.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import dis +import inspect + +from dataclasses import dataclass +from typing import Union + +from . import DimList + +_vmap_levels = [] + + +@dataclass +class LevelInfo: + level: int + alive: bool = True + + +class Dim: + def __init__(self, name: str, size: Union[None, int] = None): + self.name = name + self._size = None + self._vmap_level = None + if size is not None: + self.size = size + + def __del__(self): + if self._vmap_level is not None: + _vmap_active_levels[self._vmap_stack].alive = False # noqa: F821 + while ( + not _vmap_levels[-1].alive + and current_level() == _vmap_levels[-1].level # noqa: F821 + ): + _vmap_decrement_nesting() # noqa: F821 + _vmap_levels.pop() + + @property + def size(self): + assert self.is_bound + return self._size + + @size.setter + def size(self, size: int): + from . import DimensionBindError + + if self._size is None: + self._size = size + self._vmap_level = _vmap_increment_nesting(size, "same") # noqa: F821 + self._vmap_stack = len(_vmap_levels) + _vmap_levels.append(LevelInfo(self._vmap_level)) + + elif self._size != size: + raise DimensionBindError( + f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}" + ) + + @property + def is_bound(self): + return self._size is not None + + def __repr__(self): + return self.name + + +def extract_name(inst): + assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME" + return inst.argval + + +_cache = {} + + +def dims(lists=0): + frame = inspect.currentframe() + assert frame is not None + calling_frame = frame.f_back + assert calling_frame is not None + code, lasti = calling_frame.f_code, calling_frame.f_lasti + key = (code, lasti) + if key not in _cache: + first = lasti // 2 + 1 + instructions = list(dis.get_instructions(calling_frame.f_code)) + unpack = instructions[first] + + if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME": + # just a single dim, not a list + name = unpack.argval + ctor = Dim if lists == 0 else DimList + _cache[key] = lambda: ctor(name=name) + else: + assert unpack.opname == "UNPACK_SEQUENCE" + ndims = unpack.argval + names = tuple( + extract_name(instructions[first + 1 + i]) for i in range(ndims) + ) + first_list = len(names) - lists + _cache[key] = lambda: tuple( + Dim(n) if i < first_list else DimList(name=n) + for i, n in enumerate(names) + ) + return _cache[key]() + + +def _dim_set(positional, arg): + def convert(a): + if isinstance(a, Dim): + return a + else: + assert isinstance(a, int) + return positional[a] + + if arg is None: + return positional + elif not isinstance(arg, (Dim, int)): + return tuple(convert(a) for a in arg) + else: + return (convert(arg),) diff --git a/MLPY/Lib/site-packages/functorch/dim/magic_trace.py b/MLPY/Lib/site-packages/functorch/dim/magic_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..8ad92632a2ef08b300493d9e63c8a7d3ab8e9749 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/magic_trace.py @@ -0,0 +1,42 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import os +import signal +import subprocess +from contextlib import contextmanager + + +@contextmanager +def magic_trace(output="trace.fxt", magic_trace_cache="/tmp/magic-trace"): + pid = os.getpid() + if not os.path.exists(magic_trace_cache): + print(f"Downloading magic_trace to: {magic_trace_cache}") + subprocess.run( + [ + "wget", + "-O", + magic_trace_cache, + "-q", + "https://github.com/janestreet/magic-trace/releases/download/v1.0.2/magic-trace", + ] + ) + subprocess.run(["chmod", "+x", magic_trace_cache]) + args = [magic_trace_cache, "attach", "-pid", str(pid), "-o", output] + p = subprocess.Popen(args, stderr=subprocess.PIPE, encoding="utf-8") + while True: + x = p.stderr.readline() + print(x) + if "Attached" in x: + break + try: + yield + finally: + p.send_signal(signal.SIGINT) + r = p.wait() + print(p.stderr.read()) + p.stderr.close() + if r != 0: + raise ValueError(f"magic_trace exited abnormally: {r}") diff --git a/MLPY/Lib/site-packages/functorch/dim/op_properties.py b/MLPY/Lib/site-packages/functorch/dim/op_properties.py new file mode 100644 index 0000000000000000000000000000000000000000..137bb5d7383ae33201e38e88c254a0fad683ed1d --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/op_properties.py @@ -0,0 +1,311 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import torch + +# pointwise operators can go through a faster pathway + +tensor_magic_methods = ["add", ""] +pointwise_magic_methods_with_reverse = ( + "add", + "sub", + "mul", + "floordiv", + "div", + "truediv", + "mod", + "pow", + "lshift", + "rshift", + "and", + "or", + "xor", +) +pointwise_magic_methods = ( + *(x for m in pointwise_magic_methods_with_reverse for x in (m, "r" + m)), + "eq", + "gt", + "le", + "lt", + "ge", + "gt", + "ne", + "neg", + "pos", + "abs", + "invert", + "iadd", + "isub", + "imul", + "ifloordiv", + "idiv", + "itruediv", + "imod", + "ipow", + "ilshift", + "irshift", + "iand", + "ior", + "ixor", + "int", + "long", + "float", + "complex", +) + +pointwise_methods = (*(f"__{m}__" for m in pointwise_magic_methods),) + +pointwise = ( + *(getattr(torch.Tensor, m) for m in pointwise_methods), + torch.nn.functional.dropout, + torch.where, + torch.Tensor.abs, + torch.abs, + torch.Tensor.acos, + torch.acos, + torch.Tensor.acosh, + torch.acosh, + torch.Tensor.add, + torch.add, + torch.Tensor.addcdiv, + torch.addcdiv, + torch.Tensor.addcmul, + torch.addcmul, + torch.Tensor.addr, + torch.addr, + torch.Tensor.angle, + torch.angle, + torch.Tensor.asin, + torch.asin, + torch.Tensor.asinh, + torch.asinh, + torch.Tensor.atan, + torch.atan, + torch.Tensor.atan2, + torch.atan2, + torch.Tensor.atanh, + torch.atanh, + torch.Tensor.bitwise_and, + torch.bitwise_and, + torch.Tensor.bitwise_left_shift, + torch.bitwise_left_shift, + torch.Tensor.bitwise_not, + torch.bitwise_not, + torch.Tensor.bitwise_or, + torch.bitwise_or, + torch.Tensor.bitwise_right_shift, + torch.bitwise_right_shift, + torch.Tensor.bitwise_xor, + torch.bitwise_xor, + torch.Tensor.ceil, + torch.ceil, + torch.celu, + torch.nn.functional.celu, + torch.Tensor.clamp, + torch.clamp, + torch.Tensor.clamp_max, + torch.clamp_max, + torch.Tensor.clamp_min, + torch.clamp_min, + torch.Tensor.copysign, + torch.copysign, + torch.Tensor.cos, + torch.cos, + torch.Tensor.cosh, + torch.cosh, + torch.Tensor.deg2rad, + torch.deg2rad, + torch.Tensor.digamma, + torch.digamma, + torch.Tensor.div, + torch.div, + torch.dropout, + torch.nn.functional.dropout, + torch.nn.functional.elu, + torch.Tensor.eq, + torch.eq, + torch.Tensor.erf, + torch.erf, + torch.Tensor.erfc, + torch.erfc, + torch.Tensor.erfinv, + torch.erfinv, + torch.Tensor.exp, + torch.exp, + torch.Tensor.exp2, + torch.exp2, + torch.Tensor.expm1, + torch.expm1, + torch.feature_dropout, + torch.Tensor.float_power, + torch.float_power, + torch.Tensor.floor, + torch.floor, + torch.Tensor.floor_divide, + torch.floor_divide, + torch.Tensor.fmod, + torch.fmod, + torch.Tensor.frac, + torch.frac, + torch.Tensor.frexp, + torch.frexp, + torch.Tensor.gcd, + torch.gcd, + torch.Tensor.ge, + torch.ge, + torch.nn.functional.gelu, + torch.nn.functional.glu, + torch.Tensor.gt, + torch.gt, + torch.Tensor.hardshrink, + torch.hardshrink, + torch.nn.functional.hardshrink, + torch.nn.functional.hardsigmoid, + torch.nn.functional.hardswish, + torch.nn.functional.hardtanh, + torch.Tensor.heaviside, + torch.heaviside, + torch.Tensor.hypot, + torch.hypot, + torch.Tensor.i0, + torch.i0, + torch.Tensor.igamma, + torch.igamma, + torch.Tensor.igammac, + torch.igammac, + torch.Tensor.isclose, + torch.isclose, + torch.Tensor.isfinite, + torch.isfinite, + torch.Tensor.isinf, + torch.isinf, + torch.Tensor.isnan, + torch.isnan, + torch.Tensor.isneginf, + torch.isneginf, + torch.Tensor.isposinf, + torch.isposinf, + torch.Tensor.isreal, + torch.isreal, + torch.Tensor.kron, + torch.kron, + torch.Tensor.lcm, + torch.lcm, + torch.Tensor.ldexp, + torch.ldexp, + torch.Tensor.le, + torch.le, + torch.nn.functional.leaky_relu, + torch.Tensor.lerp, + torch.lerp, + torch.Tensor.lgamma, + torch.lgamma, + torch.Tensor.log, + torch.log, + torch.Tensor.log10, + torch.log10, + torch.Tensor.log1p, + torch.log1p, + torch.Tensor.log2, + torch.log2, + torch.nn.functional.logsigmoid, + torch.Tensor.logical_and, + torch.logical_and, + torch.Tensor.logical_not, + torch.logical_not, + torch.Tensor.logical_or, + torch.logical_or, + torch.Tensor.logical_xor, + torch.logical_xor, + torch.Tensor.logit, + torch.logit, + torch.Tensor.lt, + torch.lt, + torch.Tensor.maximum, + torch.maximum, + torch.Tensor.minimum, + torch.minimum, + torch.nn.functional.mish, + torch.Tensor.mvlgamma, + torch.mvlgamma, + torch.Tensor.nan_to_num, + torch.nan_to_num, + torch.Tensor.ne, + torch.ne, + torch.Tensor.neg, + torch.neg, + torch.Tensor.nextafter, + torch.nextafter, + torch.Tensor.outer, + torch.outer, + torch.polar, + torch.Tensor.polygamma, + torch.polygamma, + torch.Tensor.positive, + torch.positive, + torch.Tensor.pow, + torch.pow, + torch.Tensor.prelu, + torch.prelu, + torch.nn.functional.prelu, + torch.Tensor.rad2deg, + torch.rad2deg, + torch.Tensor.reciprocal, + torch.reciprocal, + torch.Tensor.relu, + torch.relu, + torch.nn.functional.relu, + torch.nn.functional.relu6, + torch.Tensor.remainder, + torch.remainder, + torch.Tensor.round, + torch.round, + torch.rrelu, + torch.nn.functional.rrelu, + torch.Tensor.rsqrt, + torch.rsqrt, + torch.rsub, + torch.selu, + torch.nn.functional.selu, + torch.Tensor.sgn, + torch.sgn, + torch.Tensor.sigmoid, + torch.sigmoid, + torch.nn.functional.sigmoid, + torch.Tensor.sign, + torch.sign, + torch.Tensor.signbit, + torch.signbit, + torch.nn.functional.silu, + torch.Tensor.sin, + torch.sin, + torch.Tensor.sinc, + torch.sinc, + torch.Tensor.sinh, + torch.sinh, + torch.nn.functional.softplus, + torch.nn.functional.softshrink, + torch.Tensor.sqrt, + torch.sqrt, + torch.Tensor.square, + torch.square, + torch.Tensor.sub, + torch.sub, + torch.Tensor.tan, + torch.tan, + torch.Tensor.tanh, + torch.tanh, + torch.nn.functional.tanh, + torch.threshold, + torch.nn.functional.threshold, + torch.trapz, + torch.Tensor.true_divide, + torch.true_divide, + torch.Tensor.trunc, + torch.trunc, + torch.Tensor.xlogy, + torch.xlogy, + torch.rand_like, +) diff --git a/MLPY/Lib/site-packages/functorch/dim/reference.py b/MLPY/Lib/site-packages/functorch/dim/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..79ec697e6e2446cf00f4493df666d83b8120b130 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/reference.py @@ -0,0 +1,645 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# reference python implementations for C ops +import torch + +from functorch._C import dim as _C +from . import op_properties +from .batch_tensor import _enable_layers +from .tree_map import tree_flatten, tree_map + +DimList = _C.DimList +import operator +from functools import reduce + + +# use dict to avoid writing C++ bindings for set +pointwise = set(op_properties.pointwise) + + +def prod(x): + return reduce(operator.mul, x, 1) + + +def _wrap_dim(d, N, keepdim): + from . import Dim + + if isinstance(d, Dim): + assert not keepdim, "cannot preserve first-class dimensions with keepdim=True" + return d + elif d >= 0: + return d - N + else: + return d + + +def _dims(d, N, keepdim, single_dim): + from . import Dim + + if isinstance(d, (Dim, int)): + return ltuple((_wrap_dim(d, N, keepdim),)) + assert not single_dim, f"expected a single dimension or int but found: {d}" + return ltuple(_wrap_dim(x, N, keepdim) for x in d) + + +def _bind_dims_to_size(lhs_size, rhs, lhs_debug): + from . import DimensionMismatchError + + not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound) + if len(not_bound) == 1: + idx, d = not_bound[0] + rhs_so_far = prod(r.size for r in rhs if r.is_bound) + if lhs_size % rhs_so_far != 0: + rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs) + raise DimensionMismatchError( + f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}" + ) + new_size = lhs_size // rhs_so_far + d.size = new_size + elif len(not_bound) > 1: + rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs) + raise DimensionMismatchError( + f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}" + ) + else: + rhs_size = prod(r.size for r in rhs) + if lhs_size != rhs_size: + raise DimensionMismatchError( + f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}" + ) + + +def _tensor_levels(inp): + from . import _Tensor + + if isinstance(inp, _Tensor): + return inp._tensor, llist(inp._levels), inp._has_device + else: + return inp, llist(range(-inp.ndim, 0)), True + + +def _match_levels(v, from_levels, to_levels): + view = [] + permute = [] + requires_view = False + size = v.size() + for t in to_levels: + try: + idx = from_levels.index(t) + permute.append(idx) + view.append(size[idx]) + except ValueError: + view.append(1) + requires_view = True + if permute != list(range(len(permute))): + v = v.permute(*permute) + if requires_view: + v = v.view(*view) + return v + + +# make a single dimension positional but do not permute it, +# used to do multi-tensor operators where the dim being acted on +# should not physically move if possible +def _positional_no_permute(self, dim, expand_dim=False): + from . import Tensor + + ptensor, levels = self._tensor, llist(self._levels) + try: + idx = levels.index(dim) + except ValueError: + if not expand_dim: + raise + idx = 0 + ptensor = ptensor.expand(dim.size, *ptensor.size()) + levels.insert(0, 0) + idx_batched = 0 + for i in range(idx): + if isinstance(levels[i], int): + levels[i] -= 1 + idx_batched += 1 + levels[idx] = -idx_batched - 1 + return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched + + +def seq(a, b): + from . import Dim + + if isinstance(a, Dim) != isinstance(b, Dim): + return False + if isinstance(a, Dim): + return a is b + else: + return a == b + + +class isin: + def __contains__(self, item): + for x in self: + if seq(item, x): + return True + return False + + def index(self, item): + for i, x in enumerate(self): + if seq(item, x): + return i + raise ValueError + + +class llist(isin, list): + pass + + +class ltuple(isin, tuple): + pass + + +empty_dict = {} + + +@classmethod +def __torch_function__(self, orig, cls, args, kwargs=empty_dict): + from . import _Tensor, Tensor, TensorLike + from .delayed_mul_tensor import DelayedMulTensor + + if orig is torch.Tensor.__mul__: + lhs, rhs = args + if ( + isinstance(lhs, _Tensor) + and isinstance(rhs, _Tensor) + and lhs.ndim == 0 + and rhs.ndim == 0 + ): + return DelayedMulTensor(lhs, rhs) + all_dims = llist() + flat_args, unflatten = tree_flatten((args, kwargs)) + device_holding_tensor = None + for f in flat_args: + if isinstance(f, _Tensor): + if f._has_device: + device_holding_tensor = f._batchtensor + for d in f.dims: + if d not in all_dims: + all_dims.append(d) + + def unwrap(t): + if isinstance(t, _Tensor): + r = t._batchtensor + if device_holding_tensor is not None and not t._has_device: + r = r.to(device=device_holding_tensor.device) + return r + return t + + if orig in pointwise: + result_levels = llist() + arg_levels = llist() + to_expand = [] + for i, f in enumerate(flat_args): + if isinstance(f, TensorLike): + ptensor, levels, _ = _tensor_levels(f) + if ( + isinstance(f, _Tensor) + and not f._has_device + and device_holding_tensor is not None + ): + ptensor = ptensor.to(device=device_holding_tensor.device) + flat_args[i] = ptensor + for l in levels: + if l not in result_levels: + result_levels.append(l) + to_expand.append((i, levels)) + + for i, levels in to_expand: + flat_args[i] = _match_levels(flat_args[i], levels, result_levels) + args, kwargs = unflatten(flat_args) + result = orig(*args, **kwargs) + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_positional( + t, result_levels, device_holding_tensor is not None + ) + return t + + return tree_map(wrap, result) + else: + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_batched(t, device_holding_tensor is not None) + return t + + with _enable_layers(all_dims): + print(f"batch_tensor for {orig}") + args, kwargs = unflatten(unwrap(f) for f in flat_args) + result = orig(*args, **kwargs) + # print("END", orig) + return tree_map(wrap, result) + + +def positional(self, *dims): + from . import Dim, DimensionBindError, Tensor + + ptensor, levels = self._tensor, llist(self._levels) + flat_dims = llist() + view = [] + needs_view = False + ndim = self.ndim + for d in dims: + if isinstance(d, DimList): + flat_dims.extend(d) + view.extend(e.size for e in d) + elif isinstance(d, Dim): + flat_dims.append(d) + view.append(d.size) + elif isinstance(d, int): + d = _wrap_dim(d, ndim, False) + flat_dims.append(d) + view.append(ptensor.size(d)) + else: + flat_dims.extend(d) + view.append(prod(e.size for e in d)) + needs_view = True + + permute = list(range(len(levels))) + nflat = len(flat_dims) + for i, d in enumerate(flat_dims): + try: + idx = levels.index(d) + except ValueError as e: + raise DimensionBindError( + f"tensor of dimensions {self.dims} does not contain dim {d}" + ) from e + p = permute[idx] + del levels[idx] + del permute[idx] + levels.insert(i, 0) + permute.insert(i, p) + ptensor = ptensor.permute(*permute) + seen = 0 + for i in range(len(levels) - 1, -1, -1): + if isinstance(levels[i], int): + seen += 1 + levels[i] = -seen + result = Tensor.from_positional(ptensor, levels, self._has_device) + if needs_view: + result = result.reshape(*view, *result.size()[len(flat_dims) :]) + return result + + +def _contains_dim(input): + from . import Dim + + for i in input: + if isinstance(i, Dim): + return True + + +def expand(self, *sizes): + if not _contains_dim(sizes): + return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes)) + dims = sizes + sizes = [d.size for d in dims] + [-1] * self.ndim + self = self.expand(*sizes) + return self[dims] + + +_not_present = object() + + +def _getarg(name, offset, args, kwargs, default): + if len(args) > offset: + return args[offset] + return kwargs.get(name, default) + + +def _patcharg(name, offset, args, kwargs, value): + if len(args) > offset: + args[offset] = value + else: + kwargs[name] = value + + +def _wrap( + orig, dim_offset=0, keepdim_offset=1, dim_name="dim", single_dim=False, reduce=True +): + from . import Dim, Tensor, TensorLike + + def fn(self, *args, **kwargs): + dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present) + if dim is _not_present or (single_dim and not isinstance(dim, Dim)): + with _enable_layers(self.dims): + print(f"dim fallback batch_tensor for {orig}") + return Tensor.from_batched( + orig(self._batchtensor, *args, **kwargs), self._has_device + ) + keepdim = ( + _getarg("keepdim", keepdim_offset, args, kwargs, False) if reduce else False + ) + t, levels = self._tensor, llist(self._levels) + dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim) + dim_indices = tuple(levels.index(d) for d in dims) + if reduce and not keepdim: + new_levels = [l for i, l in enumerate(levels) if i not in dim_indices] + else: + new_levels = levels + + if len(dim_indices) == 1: + dim_indices = dim_indices[ + 0 + ] # so that dims that really only take a single argument work... + args = list(args) + _patcharg(dim_name, dim_offset, args, kwargs, dim_indices) + + def wrap(t): + if isinstance(t, TensorLike): + return Tensor.from_positional(t, new_levels, self._has_device) + return t + + with _enable_layers(new_levels): + print(f"dim used batch_tensor for {orig}") + r = orig(t, *args, **kwargs) + return tree_map(wrap, r) + + return fn + + +def _def(name, *args, **kwargs): + from . import _Tensor + + orig = getattr(torch.Tensor, name) + setattr(_Tensor, name, _wrap(orig, *args, **kwargs)) + + +no_slice = slice(None) + +_orig_getitem = torch.Tensor.__getitem__ + + +class dim_tracker: + def __init__(self): + self.dims = llist() + self.count = [] + + def record(self, d): + if d not in self.dims: + self.dims.append(d) + self.count.append(1) + + def __getitem__(self, d): + return self.count[self.dims.index(d)] + + +def t__getitem__(self, input): + from . import _Tensor, Dim, DimensionBindError, DimList, Tensor, TensorLike + + # * bail to original example if we have a single non-Dim tensor, or a non-tensor + # * locate ... or an unbound tensor list, and determine its size, bind dim list + # (remember that None does not count to the total dim count) + # * bind simple dims and dim-packs to their sizes, count the number of uses of each dim, + # produce the re-view if needed + # * for each single-use dim index, replace with no_slice and mark that it will be added + # (keep track of whether we have to call super) + # * call super if needed + # * if we have dims to bind, bind them (it will help if we eliminated ... and None before) + + # this handles bool indexing handling, as well as some other simple cases. + + is_simple = ( + not isinstance(input, Dim) + and not isinstance(input, (tuple, list)) + and + # WAR for functorch bug where zero time tensors in getitem are not handled correctly. + not (isinstance(input, TensorLike) and input.ndim == 0) + ) + + if is_simple: + if isinstance(self, _Tensor): + return _Tensor.__torch_function__(_orig_getitem, None, (self, input)) + else: + return _orig_getitem(self, input) + + # can further optimize this case + if not isinstance(input, tuple): + input = [input] + else: + input = list(input) + + dims_indexed = 0 + expanding_object = None + dimlists = [] + for i, s in enumerate(input): + if s is ... or isinstance(s, DimList) and not s.is_bound: + if expanding_object is not None: + msg = ( + "at most one ... or unbound dimension list can exist in indexing list but" + f" found 2 at offsets {i} and {expanding_object}" + ) + raise DimensionBindError(msg) + expanding_object = i + + if isinstance(s, DimList): + dims_indexed += len(s) if s.is_bound else 0 + dimlists.append(i) + elif s is not None and s is not ...: + dims_indexed += 1 + + ndim = self.ndim + if dims_indexed > ndim: + raise IndexError( + f"at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions." + ) + if expanding_object is not None: + expanding_ndims = ndim - dims_indexed + obj = input[expanding_object] + if obj is ...: + input[expanding_object : expanding_object + 1] = [ + no_slice + ] * expanding_ndims + else: + obj.bind_len(expanding_ndims) + # flatten the dimslists into the indexing + for i in reversed(dimlists): + input[i : i + 1] = input[i] + dims_indexed = 0 + requires_view = False + size = self.size() + view_sizes = [] + dims_seen = dim_tracker() + + def add_dims(t): + if not isinstance(t, _Tensor): + return + for d in t.dims: + dims_seen.record(d) + + add_dims(self) + dim_packs = [] + for i, idx in enumerate(input): + if idx is None: + input[i] = no_slice + view_sizes.append(1) + requires_view = True + else: + sz = size[dims_indexed] + if isinstance(idx, Dim): + idx.size = sz + dims_seen.record(idx) + view_sizes.append(sz) + elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim): + for d in idx: + dims_seen.record(idx) + _bind_dims_to_size(sz, idx, f"offset {i}") + view_sizes.extend(d.size for d in idx) + requires_view = True + dim_packs.append(i) + else: + add_dims(idx) + view_sizes.append(sz) + dims_indexed += 1 + if requires_view: + self = self.view(*view_sizes) + for i in reversed(dim_packs): + input[i : i + 1] = input[i] + + # currenty: + # input is flat, containing either Dim, or Tensor, or something valid for standard indexing + # self may have first-class dims as well. + + # to index: + # drop the first class dims from self, they just become direct indices of their positions + + # figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index. + # these dimensions will appear and need to be bound at the first place tensor occures + + if isinstance(self, _Tensor): + ptensor_self, levels = self._tensor, list(self._levels) + # indices to ptensor rather than self which has first-class dimensions + input_it = iter(input) + flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels] + has_device = self._has_device + to_pad = 0 + else: + ptensor_self, flat_inputs = self, input + to_pad = ptensor_self.ndim - len(flat_inputs) + has_device = True + + result_levels = [] + index_levels = [] + tensor_insert_point = None + to_expand = {} + requires_getindex = False + for i, inp in enumerate(flat_inputs): + if isinstance(inp, Dim) and dims_seen[inp] == 1: + flat_inputs[i] = no_slice + result_levels.append(inp) + elif isinstance(inp, TensorLike): + requires_getindex = True + if tensor_insert_point is None: + tensor_insert_point = len(result_levels) + ptensor, levels, _ = _tensor_levels(inp) + to_expand[i] = levels + flat_inputs[i] = ptensor + for l in levels: + if l not in index_levels: + index_levels.append(l) + else: + requires_getindex = True + result_levels.append(0) + + if tensor_insert_point is not None: + result_levels[tensor_insert_point:tensor_insert_point] = index_levels + + for i, levels in to_expand.items(): + flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels) + + if requires_getindex: + result = _orig_getitem(ptensor_self, flat_inputs) + else: + result = ptensor_self + + next_positional = -1 + if to_pad > 0: + result_levels.extend([0] * to_pad) + for i, r in enumerate(reversed(result_levels)): + if isinstance(r, int): + result_levels[-1 - i] = next_positional + next_positional -= 1 + + return Tensor.from_positional(result, result_levels, has_device) + + +# XXX - dim is optional and can be the outer-most dimension... +def stack(tensors, new_dim, dim=0, out=None): + if isinstance(dim, int): + return torch.stack(tensors, dim, out).index(dim, new_dim) + index = None + if out is not None: + out, index = _positional_no_permute(out, dim, expand_dim=True) + ptensors = [] + for t in tensors: + pt, pi = _positional_no_permute(t, dim, expand_dim=True) + if index is not None and pi != index: + pt = pt.move_dim(pi, index) + else: + index = pi + ptensors.append(pt) + pr = torch.stack(ptensors, index, out=out) + return pr.index((index, index + 1), (new_dim, dim)) + + +_orig_split = torch.Tensor.split + + +def split(self, split_size_or_sections, dim=0): + from . import _Tensor, Dim + + if isinstance(split_size_or_sections, int) or any( + isinstance(t, int) for t in split_size_or_sections + ): + if isinstance(dim, Dim): + raise ValueError( + "when dim is specified as a Dim object, split sizes must also be dimensions." + ) + return _orig_split(self, split_size_or_sections, dim=dim) + + if isinstance(dim, Dim): + assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}" + self, dim = _positional_no_permute(self, dim) + + size = self.size(dim) + total_bound_size = 0 + unbound = [] + sizes = [] + for i, d in enumerate(split_size_or_sections): + if d.is_bound: + sizes.append(d.size) + total_bound_size += d.size + else: + sizes.append(0) + unbound.append(i) + + if unbound: + assert ( + total_bound_size <= size + ), f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})" + remaining_size = size - total_bound_size + chunk_size = -(-remaining_size // len(unbound)) + for u in unbound: + sz = min(chunk_size, remaining_size) + split_size_or_sections[u].size = sz + sizes[u] = sz + remaining_size -= sz + else: + assert ( + total_bound_size == size + ), f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})" + return tuple( + t.index(dim, d) + for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim)) + ) diff --git a/MLPY/Lib/site-packages/functorch/dim/tree_map.py b/MLPY/Lib/site-packages/functorch/dim/tree_map.py new file mode 100644 index 0000000000000000000000000000000000000000..b8f24a5617969eaa101e1dc6267cbb2805c53d3e --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/tree_map.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from functorch._C import dim + +tree_flatten = dim.tree_flatten + + +def tree_map(fn, tree): + vs, unflatten = tree_flatten(tree) + return unflatten(fn(v) for v in vs) diff --git a/MLPY/Lib/site-packages/functorch/dim/wrap_type.py b/MLPY/Lib/site-packages/functorch/dim/wrap_type.py new file mode 100644 index 0000000000000000000000000000000000000000..349743ec000093f9337c09130eaf31e59b8d75ec --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/dim/wrap_type.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from types import ( + BuiltinMethodType, + FunctionType, + GetSetDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, +) + +from functorch._C import dim as _C + +_wrap_method = _C._wrap_method + +FUNC_TYPES = ( + FunctionType, + MethodDescriptorType, + BuiltinMethodType, + WrapperDescriptorType, +) +PROPERTY_TYPES = (GetSetDescriptorType, property) + + +def _py_wrap_method(orig, __torch_function__): + def impl(*args, **kwargs): + return __torch_function__(orig, None, args, kwargs) + + return impl + + +def wrap_type(use_c, to_patch, pattern, __torch_function__): + if use_c: + wrap_method = _wrap_method + else: + wrap_method = _py_wrap_method + + all = {} + for t in reversed(pattern.mro()[:-1]): # skip object + all.update(t.__dict__) + + def wrap_attr(orig): + return property(wrap_method(orig.__get__, __torch_function__)) + + for name, obj in all.items(): + if name in ( + "__dict__", + "__new__", + "__init__", + "__repr__", + "__weakref__", + "__doc__", + "__module__", + "__dir__", + ): + continue + + # skip things that have been overloaded + # things that come from object like `__eq__` still need to be patched, however. + if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr( + object, name, None + ): + continue + + if isinstance(obj, FUNC_TYPES): + setattr(to_patch, name, wrap_method(obj, __torch_function__)) + elif isinstance(obj, PROPERTY_TYPES): + setattr(to_patch, name, wrap_attr(obj)) diff --git a/MLPY/Lib/site-packages/functorch/einops/__init__.py b/MLPY/Lib/site-packages/functorch/einops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d423f16537091985182f7d787e6eb672000a88a5 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/einops/__init__.py @@ -0,0 +1,3 @@ +from .rearrange import rearrange + +__all__ = ["rearrange"] diff --git a/MLPY/Lib/site-packages/functorch/einops/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/einops/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..155234c9a36aea6e14720b4ec534def873a11dd4 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/einops/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/einops/__pycache__/_parsing.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/einops/__pycache__/_parsing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a463246015c19ce326bfc70383b9e3fe0a196119 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/einops/__pycache__/_parsing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/einops/__pycache__/rearrange.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/einops/__pycache__/rearrange.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa8e69c38461797677327cccc15c3492905fb4b6 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/einops/__pycache__/rearrange.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/einops/_parsing.py b/MLPY/Lib/site-packages/functorch/einops/_parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..8da2bc9bf2afb80fa74227e5f23f77ab44638e7e --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/einops/_parsing.py @@ -0,0 +1,302 @@ +"""Adapted from https://github.com/arogozhnikov/einops/blob/36c7bb16e57d6e57f8f3050f9e07abdf3f00469f/einops/parsing.py. + +MIT License + +Copyright (c) 2018 Alex Rogozhnikov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +from __future__ import annotations + +import keyword +import warnings +from typing import Collection, List, Mapping, Optional, Set, Tuple, Union + +_ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated + + +class AnonymousAxis: + """Used by `ParsedExpression` to represent an axis with a size (> 1), but no associated identifier. + + Note: Different instances of this class are not equal to each other, even if they have the same value. + """ + + def __init__(self, value: str) -> None: + self.value = int(value) + if self.value < 1: + raise ValueError( + f"Anonymous axis should have positive length, not {self.value}" + ) + + def __repr__(self) -> str: + return f"{self.value}-axis" + + +class ParsedExpression: + """Structure containing information about one side of an `einops`-style pattern (e.g. 'b c (h w)').""" + + def __init__( + self, + expression: str, + *, + allow_underscore: bool = False, + allow_duplicates: bool = False, + ) -> None: + """Parse the expression and store relevant metadata. + + Args: + expression (str): the `einops`-pattern to parse + allow_underscore (bool): whether to allow axis identifier names to begin with an underscore + allow_duplicates (bool): whether to allow an identifier to appear more than once in the expression + """ + self.has_ellipsis: bool = False + self.has_ellipsis_parenthesized: Optional[bool] = None + self.identifiers: Set[Union[str, AnonymousAxis]] = set() + # that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition + self.has_non_unitary_anonymous_axes: bool = False + # composition keeps structure of composite axes, see how different corner cases are handled in tests + self.composition: List[Union[List[Union[str, AnonymousAxis]], str]] = [] + if "." in expression: + if "..." not in expression: + raise ValueError( + "Expression may contain dots only inside ellipsis (...)" + ) + if str.count(expression, "...") != 1 or str.count(expression, ".") != 3: + raise ValueError( + "Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor " + ) + expression = expression.replace("...", _ellipsis) + self.has_ellipsis = True + + bracket_group: Optional[List[Union[str, AnonymousAxis]]] = None + + def add_axis_name(x: str) -> None: + if x in self.identifiers: + if not (allow_underscore and x == "_") and not allow_duplicates: + raise ValueError( + f"Indexing expression contains duplicate dimension '{x}'" + ) + if x == _ellipsis: + self.identifiers.add(_ellipsis) + if bracket_group is None: + self.composition.append(_ellipsis) + self.has_ellipsis_parenthesized = False + else: + bracket_group.append(_ellipsis) + self.has_ellipsis_parenthesized = True + else: + is_number = str.isdecimal(x) + if is_number and int(x) == 1: + # handling the case of anonymous axis of length 1 + if bracket_group is None: + self.composition.append([]) + else: + pass # no need to think about 1s inside parenthesis + return + is_axis_name, reason = self.check_axis_name_return_reason( + x, allow_underscore=allow_underscore + ) + if not (is_number or is_axis_name): + raise ValueError(f"Invalid axis identifier: {x}\n{reason}") + axis_name: Union[str, AnonymousAxis] = ( + AnonymousAxis(x) if is_number else x + ) + self.identifiers.add(axis_name) + if is_number: + self.has_non_unitary_anonymous_axes = True + if bracket_group is None: + self.composition.append([axis_name]) + else: + bracket_group.append(axis_name) + + current_identifier = None + for char in expression: + if char in "() ": + if current_identifier is not None: + add_axis_name(current_identifier) + current_identifier = None + if char == "(": + if bracket_group is not None: + raise ValueError( + "Axis composition is one-level (brackets inside brackets not allowed)" + ) + bracket_group = [] + elif char == ")": + if bracket_group is None: + raise ValueError("Brackets are not balanced") + self.composition.append(bracket_group) + bracket_group = None + elif str.isalnum(char) or char in ["_", _ellipsis]: + if current_identifier is None: + current_identifier = char + else: + current_identifier += char + else: + raise ValueError(f"Unknown character '{char}'") + + if bracket_group is not None: + raise ValueError(f"Imbalanced parentheses in expression: '{expression}'") + if current_identifier is not None: + add_axis_name(current_identifier) + + @staticmethod + def check_axis_name_return_reason( + name: str, allow_underscore: bool = False + ) -> Tuple[bool, str]: + """Check if the given axis name is valid, and a message explaining why if not. + + Valid axes names are python identifiers except keywords, and should not start or end with an underscore. + + Args: + name (str): the axis name to check + allow_underscore (bool): whether axis names are allowed to start with an underscore + + Returns: + Tuple[bool, str]: whether the axis name is valid, a message explaining why if not + """ + if not str.isidentifier(name): + return False, "not a valid python identifier" + elif name[0] == "_" or name[-1] == "_": + if name == "_" and allow_underscore: + return True, "" + return False, "axis name should should not start or end with underscore" + else: + if keyword.iskeyword(name): + warnings.warn( + f"It is discouraged to use axes names that are keywords: {name}", + RuntimeWarning, + ) + if name in ["axis"]: + warnings.warn( + "It is discouraged to use 'axis' as an axis name and will raise an error in future", + FutureWarning, + ) + return True, "" + + @staticmethod + def check_axis_name(name: str) -> bool: + """Check if the name is a valid axis name. + + Args: + name (str): the axis name to check + + Returns: + bool: whether the axis name is valid + """ + is_valid, _ = ParsedExpression.check_axis_name_return_reason(name) + return is_valid + + +def parse_pattern( + pattern: str, axes_lengths: Mapping[str, int] +) -> Tuple[ParsedExpression, ParsedExpression]: + """Parse an `einops`-style pattern into a left-hand side and right-hand side `ParsedExpression` object. + + Args: + pattern (str): the `einops`-style rearrangement pattern + axes_lengths (Mapping[str, int]): any additional length specifications for dimensions + + Returns: + Tuple[ParsedExpression, ParsedExpression]: a tuple containing the left-hand side and right-hand side expressions + """ + # adapted from einops.einops._prepare_transformation_recipe + # https://github.com/arogozhnikov/einops/blob/230ac1526c1f42c9e1f7373912c7f8047496df11/einops/einops.py + try: + left_str, right_str = pattern.split("->") + except ValueError: + raise ValueError("Pattern must contain a single '->' separator") from None + + if _ellipsis in axes_lengths: + raise ValueError(f"'{_ellipsis}' is not an allowed axis identifier") + + left = ParsedExpression(left_str) + right = ParsedExpression(right_str) + + if not left.has_ellipsis and right.has_ellipsis: + raise ValueError( + f"Ellipsis found in right side, but not left side of a pattern {pattern}" + ) + if left.has_ellipsis and left.has_ellipsis_parenthesized: + raise ValueError( + f"Ellipsis is parenthesis in the left side is not allowed: {pattern}" + ) + + return left, right + + +def validate_rearrange_expressions( + left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int] +) -> None: + """Perform expression validations that are specific to the `rearrange` operation. + + Args: + left (ParsedExpression): left-hand side expression + right (ParsedExpression): right-hand side expression + axes_lengths (Mapping[str, int]): any additional length specifications for dimensions + """ + for length in axes_lengths.values(): + if (length_type := type(length)) is not int: + raise TypeError( + f"rearrange axis lengths must be integers, got: {length_type}" + ) + + if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes: + raise ValueError("rearrange only supports unnamed axes of size 1") + + difference = set.symmetric_difference(left.identifiers, right.identifiers) + if len(difference) > 0: + raise ValueError( + f"Identifiers only on one side of rearrange expression (should be on both): {difference}" + ) + + unmatched_axes = axes_lengths.keys() - left.identifiers + if len(unmatched_axes) > 0: + raise ValueError( + f"Identifiers not found in rearrange expression: {unmatched_axes}" + ) + + +def comma_separate(collection: Collection[Union[str, Collection[str]]]) -> str: + """Convert a collection of strings representing first class dims into a comma-separated string. + + Args: + collection (Collection[Union[str, Collection[str]]]): the collection of strings to convert + + Returns: + str: the comma-separated string + + Examples: + >>> comma_separate(('d0',)) + 'd0' + + >>> comma_separate(('d0', 'd1', 'd2', 'd3')) + 'd0, d1, d2, d3' + + >>> comma_separate([('d1', 'd4')]) + '(d1, d4)' + + >>> comma_separate([('d0',), (), ('d1',), ('d2',), ('d3', 'd4')]) + '(d0,), (), (d1,), (d2,), (d3, d4)' + """ + return ", ".join( + item + if isinstance(item, str) + else f"({comma_separate(item)}{',' if len(item) == 1 else ''})" + for item in collection + ) diff --git a/MLPY/Lib/site-packages/functorch/einops/rearrange.py b/MLPY/Lib/site-packages/functorch/einops/rearrange.py new file mode 100644 index 0000000000000000000000000000000000000000..b5040d529e8c3ab1464a0fa95766e4f956de6cce --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/einops/rearrange.py @@ -0,0 +1,207 @@ +from __future__ import annotations + +import functools +from typing import Callable, Dict, List, Sequence, Tuple, Union + +import torch + +from functorch._C import dim as _C +from ._parsing import ( + _ellipsis, + AnonymousAxis, + comma_separate, + parse_pattern, + validate_rearrange_expressions, +) + +__all__ = ["rearrange"] + +dims = _C.dims + + +@functools.lru_cache(256) +def _create_rearrange_callable( + tensor_ndim: int, pattern: str, **axes_lengths: int +) -> Callable[[torch.Tensor], torch.Tensor]: + r"""Translate an `einops`-style pattern into a callable that performs the rearrange using first-class dimensions. + + Since the an equivalent result is computed for tensors with the same number of dimensions, with the same pattern and + specified axes lengths, this function can be memoized. + + Args: + tensor_ndim (int): the number of dimensions in the tensor to rearrange + pattern (str): the `einops`-style rearrangement pattern + axes_lengths (int): any additional length specifications for dimensions + + Returns: + Callable[[torch.Tensor], torch.Tensor]: a callable that performs the rearrangement + """ + left, right = parse_pattern(pattern, axes_lengths) + validate_rearrange_expressions(left, right, axes_lengths) + + n_anon_dims = sum(not dim for dim in left.composition) + if left.has_ellipsis: + n_ellipsis_dims = tensor_ndim - (len(left.composition) - 1) + n_named_dims = len(left.identifiers) - 1 + + if (pattern_ndim := n_anon_dims + n_named_dims) > tensor_ndim: + raise ValueError( + f"Number of dimensions in pattern ({pattern_ndim}) must be less than or equal to the number of " + f"dimensions in the tensor ({tensor_ndim})" + ) + else: + n_ellipsis_dims = 0 + n_named_dims = len(left.identifiers) + + if (pattern_ndim := len(left.composition)) != tensor_ndim: + raise ValueError( + f"Number of dimensions in pattern ({pattern_ndim}) must be equal to the number of dimensions in " + f"the tensor ({tensor_ndim})" + ) + n_dims = n_named_dims + n_ellipsis_dims + n_anon_dims + + if n_dims == 0: + # an identity rearrangement on a 0-dimension tensor + return lambda tensor: tensor + + first_class_dims: Tuple[str, ...] = tuple(f"d{i}" for i in range(n_dims)) + identifier_dim_map: Dict[Union[str, AnonymousAxis], Tuple[str, ...]] = {} + anon_axes: List[AnonymousAxis] = [] + + # map the left-hand side identifiers to strings representing first class dims + dims_i = 0 + for dimension in left.composition: + if isinstance(dimension, list): + for identifier in dimension: + # non-unitary anon axes are not allowed in rearrange & unitary anon axes are represented as empty lists + assert isinstance(identifier, str) + identifier_dim_map[identifier] = (first_class_dims[dims_i],) + dims_i += 1 + if not dimension: + # unitary anonymous axis + anon_axis = AnonymousAxis("1") + identifier_dim_map[anon_axis] = (first_class_dims[dims_i],) + anon_axes.append(anon_axis) + dimension.append(anon_axis) + dims_i += 1 + elif dimension == _ellipsis: + identifier = _ellipsis + identifier_dim_map[identifier] = tuple( + first_class_dims[dims_i + j] for j in range(n_ellipsis_dims) + ) + dims_i += n_ellipsis_dims + else: + raise ValueError(f"Unexpected dimension: {dimension}") + + def composition_to_dims( + composition: Sequence[Union[List[Union[str, AnonymousAxis]], str]] + ) -> List[Union[str, Tuple[str, ...]]]: + """Convert a `ParsedExpression.composition` into a `Tensor.__getitem__` index of strings representing first + class dims.""" + dim_composition: List[Union[str, Tuple[str, ...]]] = [] + for dimension in composition: + if isinstance(dimension, list): + dim_composition.append( + tuple( + dim + for identifier in dimension + for dim in identifier_dim_map[identifier] + ) + ) + elif dimension == _ellipsis: + dim_composition.extend(identifier_dim_map[_ellipsis]) + else: + raise ValueError(f"Unexpected dimension: {dimension}") + return dim_composition + + left_dims = composition_to_dims(left.composition) + right_dims = composition_to_dims(right.composition) + anon_dims = tuple(identifier_dim_map[axis][0] for axis in anon_axes) + specified_lengths = tuple( + (identifier_dim_map[axis][0], length) for axis, length in axes_lengths.items() + ) + + custom_rearrange_callable_name = "do_rearrange" + custom_rearrange_callable_code = ( + ( + f"def {custom_rearrange_callable_name}(tensor):\n" + f" {comma_separate(first_class_dims)} = dims({n_dims})\n" + ) + + ( + "".join( + f" {dim}.size = {length}\n" for (dim, length) in specified_lengths + ) + if specified_lengths + else "" + ) + + f" tensor = tensor[{comma_separate(left_dims)}].order({comma_separate(right_dims)})\n" + + ( + f" return tensor.sum({comma_separate([anon_dims])}, keepdim=False)\n" + if anon_dims + else " return tensor\n" + ) + ) + + exec(custom_rearrange_callable_code) + return locals()[custom_rearrange_callable_name] + + +def rearrange( + tensor: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor, ...]], + pattern: str, + **axes_lengths: int, +) -> torch.Tensor: + r"""A native implementation of `einops.rearrange`, a reader-friendly smart element reordering for multidimensional + tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze, + stack, concatenate and other operations. + + See: https://einops.rocks/api/rearrange/ + + Args: + tensor (Tensor or sequence of Tensor): the tensor(s) to rearrange + pattern (str): the rearrangement pattern + axes_lengths (int): any additional length specifications for dimensions + + Returns: + Tensor: the rearranged tensor + + Examples: + >>> # suppose we have a set of 32 images in "h w c" format (height-width-channel) + >>> images = torch.randn((32, 30, 40, 3)) + + >>> # stack along first (batch) axis, output is a single array + >>> rearrange(images, 'b h w c -> b h w c').shape + torch.Size([32, 30, 40, 3]) + + >>> # concatenate images along height (vertical axis), 960 = 32 * 30 + >>> rearrange(images, 'b h w c -> (b h) w c').shape + torch.Size([960, 40, 3]) + + >>> # concatenated images along horizontal axis, 1280 = 32 * 40 + >>> rearrange(images, 'b h w c -> h (b w) c').shape + torch.Size([30, 1280, 3]) + + >>> # reordered axes to "b c h w" format for deep learning + >>> rearrange(images, 'b h w c -> b c h w').shape + torch.Size([32, 3, 30, 40]) + + >>> # flattened each image into a vector, 3600 = 30 * 40 * 3 + >>> rearrange(images, 'b h w c -> b (c h w)').shape + torch.Size([32, 3600]) + + >>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2 + >>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape + torch.Size([128, 15, 20, 3]) + + >>> # space-to-depth operation + >>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape + torch.Size([32, 15, 20, 12]) + """ + if not isinstance(tensor, torch.Tensor): + tensor = torch.stack(tensor) + + rearrange_callable = _create_rearrange_callable( + tensor.ndim, pattern, **axes_lengths + ) + + return rearrange_callable(tensor) diff --git a/MLPY/Lib/site-packages/functorch/experimental/__init__.py b/MLPY/Lib/site-packages/functorch/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c0a43ae10a0334d6c48b0cbc4a8aaf63ede85a --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/experimental/__init__.py @@ -0,0 +1,6 @@ +# PyTorch forward-mode is not mature yet +from torch._functorch.apis import chunk_vmap +from torch._functorch.batch_norm_replacement import replace_all_batch_norm_modules_ +from torch._functorch.eager_transforms import hessian, jacfwd, jvp + +from functorch import functionalize diff --git a/MLPY/Lib/site-packages/functorch/experimental/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f88e58a9d94c93cba4319c536f7386a8b53cb417 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/experimental/__pycache__/control_flow.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/control_flow.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..424a96a779f406716022bdadf7558a3d0b6b27e6 Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/control_flow.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/experimental/__pycache__/ops.cpython-39.pyc b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..303a51935ef2b105907925b4e3dbe5087342bbbc Binary files /dev/null and b/MLPY/Lib/site-packages/functorch/experimental/__pycache__/ops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/functorch/experimental/control_flow.py b/MLPY/Lib/site-packages/functorch/experimental/control_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..1815c78847b7550f831b15ba0492658bcceaac3b --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/experimental/control_flow.py @@ -0,0 +1,8 @@ +from torch import cond # noqa: F401 +from torch._higher_order_ops.cond import UnsupportedAliasMutationException # noqa: F401 + +from torch._higher_order_ops.map import ( # noqa: F401 + _stack_pytree, + _unstack_pytree, + map, +) diff --git a/MLPY/Lib/site-packages/functorch/experimental/ops.py b/MLPY/Lib/site-packages/functorch/experimental/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..144515478f0c537ee28ba46f72e3a70f08858f38 --- /dev/null +++ b/MLPY/Lib/site-packages/functorch/experimental/ops.py @@ -0,0 +1 @@ +from torch._ops import HigherOrderOperator # noqa: F401 diff --git a/MLPY/Lib/site-packages/google/protobuf/__init__.py b/MLPY/Lib/site-packages/google/protobuf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02955a0b4b25f365f4858ef294d2f4efee1a4748 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/__init__.py @@ -0,0 +1,33 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Copyright 2007 Google Inc. All Rights Reserved. + +__version__ = '3.20.3' diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb2cb044ebbc49b8aed14f978813721dc5cbf695 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/any_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/any_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3063d174db7aec1d9f9c8cbee353fdd170a510bb Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/any_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/api_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/api_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cadcc00952d57a1cc06296895a678350131b3738 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/api_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55aafac494e86af09b6ac0e6b0525afa99f32cef Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_database.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_database.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ac1c8ea00be04e2b7caa644f6ba9bee308c9b50 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_database.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d3d319d407c09e958688bfda5b580d6e2fe712 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pool.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pool.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b6222343021ca37bf98e6b9fb69d871dbd1220 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/descriptor_pool.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4f8bd2c4896a69693bfc3f80dca72a76b347a91 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/empty_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/empty_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b11dedab2265b33b17ffb89b8f0cde1299fb168 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/empty_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c554635da83982abe1417cfcc3b733851aaadfa4 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/json_format.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/json_format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84adac60d4dcc639361523f39407b0591bfcff2b Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/json_format.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/message.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/message.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ea31e3e05ef084aeef72e2f9f2a22e1dcbb033 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/message.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/message_factory.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/message_factory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fad30f8a19f0f11d474730cf9d5235ee97043bd4 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/message_factory.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/proto_builder.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/proto_builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..791b0c3599d9cafefcfcf572e12c037be5cd3dc6 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/proto_builder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/reflection.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/reflection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e82573dfc538939dc996d40a5e376aed4b72aee Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/reflection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/service.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/service.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bcc41add0cbccfd3a3e97f2093174f1db0bc3fa Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/service.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/service_reflection.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/service_reflection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b062f8c6d664496941b20f17fc76d2885519cc1 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/service_reflection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/source_context_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/source_context_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fee5a8b2c8757f62e8b529d4c2b8df9fbf7d156 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/source_context_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/struct_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/struct_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f97187b58e4820746468a6cef6e5f21f91531eb Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/struct_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/symbol_database.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/symbol_database.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..116274ec8cfb0f87abb0bcfc44999bf2528dc1b3 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/symbol_database.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_encoding.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_encoding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8b359f8e5076215fbdb63968801c11975d4c6ed Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_encoding.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_format.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..394088ffce4314928c6089c69d276707f37ea3d1 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/text_format.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/timestamp_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/timestamp_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6ef9f2b2568c918fde17c9fa53bd1867691d8e0 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/timestamp_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/type_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/type_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c0044d15d8ef843bfdcb3268ba2135e994924ea Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/type_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/__pycache__/wrappers_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/__pycache__/wrappers_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fccbfad8fa8291176e6e7bc11c70a6c2174b9bf Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/__pycache__/wrappers_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/any_pb2.py b/MLPY/Lib/site-packages/google/protobuf/any_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..9121193d1104e6c1742c6bee68f38aa0d68e26c1 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/any_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/any.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _ANY._serialized_start=46 + _ANY._serialized_end=84 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/api_pb2.py b/MLPY/Lib/site-packages/google/protobuf/api_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..1721b10a750bb66cb600526ffa13e85e1390c69d --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/api_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/api.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 +from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\x81\x02\n\x03\x41pi\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Method\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12&\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.Mixin\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x01\n\x06Method\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10request_type_url\x18\x02 \x01(\t\x12\x19\n\x11request_streaming\x18\x03 \x01(\x08\x12\x19\n\x11response_type_url\x18\x04 \x01(\t\x12\x1a\n\x12response_streaming\x18\x05 \x01(\x08\x12(\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.Option\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"#\n\x05Mixin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _API._serialized_start=113 + _API._serialized_end=370 + _METHOD._serialized_start=373 + _METHOD._serialized_end=586 + _MIXIN._serialized_start=588 + _MIXIN._serialized_end=623 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/compiler/__init__.py b/MLPY/Lib/site-packages/google/protobuf/compiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b9c03ac9fa8806ed2a6f5010d0f09435be48f4b Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/plugin_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/plugin_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dbfecfd300e1af3cde4d26ce5fc2bdc07121bf2 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/compiler/__pycache__/plugin_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/compiler/plugin_pb2.py b/MLPY/Lib/site-packages/google/protobuf/compiler/plugin_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..715a8913705fe62913ab66084a5a9090bbc5da2b --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/compiler/plugin_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/compiler/plugin.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"F\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x0e\n\x06suffix\x18\x04 \x01(\t\"\xba\x01\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\x12;\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.Version\"\xc1\x02\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x1a\n\x12supported_features\x18\x02 \x01(\x04\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a\x7f\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t\x12?\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfo\"8\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x42W\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb' + _VERSION._serialized_start=101 + _VERSION._serialized_end=171 + _CODEGENERATORREQUEST._serialized_start=174 + _CODEGENERATORREQUEST._serialized_end=360 + _CODEGENERATORRESPONSE._serialized_start=363 + _CODEGENERATORRESPONSE._serialized_end=684 + _CODEGENERATORRESPONSE_FILE._serialized_start=499 + _CODEGENERATORRESPONSE_FILE._serialized_end=626 + _CODEGENERATORRESPONSE_FEATURE._serialized_start=628 + _CODEGENERATORRESPONSE_FEATURE._serialized_end=684 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/descriptor.py b/MLPY/Lib/site-packages/google/protobuf/descriptor.py new file mode 100644 index 0000000000000000000000000000000000000000..18fa582da77975ea18c18c7d0f64f83a658282f7 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/descriptor.py @@ -0,0 +1,1224 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Descriptors essentially contain exactly the information found in a .proto +file, in types that make this information accessible in Python. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import threading +import warnings + +from google.protobuf.internal import api_implementation + +_USE_C_DESCRIPTORS = False +if api_implementation.Type() == 'cpp': + # Used by MakeDescriptor in cpp mode + import binascii + import os + from google.protobuf.pyext import _message + _USE_C_DESCRIPTORS = True + + +class Error(Exception): + """Base error for this module.""" + + +class TypeTransformationError(Error): + """Error transforming between python proto type and corresponding C++ type.""" + + +if _USE_C_DESCRIPTORS: + # This metaclass allows to override the behavior of code like + # isinstance(my_descriptor, FieldDescriptor) + # and make it return True when the descriptor is an instance of the extension + # type written in C++. + class DescriptorMetaclass(type): + def __instancecheck__(cls, obj): + if super(DescriptorMetaclass, cls).__instancecheck__(obj): + return True + if isinstance(obj, cls._C_DESCRIPTOR_CLASS): + return True + return False +else: + # The standard metaclass; nothing changes. + DescriptorMetaclass = type + + +class _Lock(object): + """Wrapper class of threading.Lock(), which is allowed by 'with'.""" + + def __new__(cls): + self = object.__new__(cls) + self._lock = threading.Lock() # pylint: disable=protected-access + return self + + def __enter__(self): + self._lock.acquire() + + def __exit__(self, exc_type, exc_value, exc_tb): + self._lock.release() + + +_lock = threading.Lock() + + +def _Deprecated(name): + if _Deprecated.count > 0: + _Deprecated.count -= 1 + warnings.warn( + 'Call to deprecated create function %s(). Note: Create unlinked ' + 'descriptors is going to go away. Please use get/find descriptors from ' + 'generated code or query the descriptor_pool.' + % name, + category=DeprecationWarning, stacklevel=3) + + +# Deprecated warnings will print 100 times at most which should be enough for +# users to notice and do not cause timeout. +_Deprecated.count = 100 + + +_internal_create_key = object() + + +class DescriptorBase(metaclass=DescriptorMetaclass): + + """Descriptors base class. + + This class is the base of all descriptor classes. It provides common options + related functionality. + + Attributes: + has_options: True if the descriptor has non-default options. Usually it + is not necessary to read this -- just call GetOptions() which will + happily return the default instance. However, it's sometimes useful + for efficiency, and also useful inside the protobuf implementation to + avoid some bootstrapping issues. + """ + + if _USE_C_DESCRIPTORS: + # The class, or tuple of classes, that are considered as "virtual + # subclasses" of this descriptor class. + _C_DESCRIPTOR_CLASS = () + + def __init__(self, options, serialized_options, options_class_name): + """Initialize the descriptor given its options message and the name of the + class of the options message. The name of the class is required in case + the options message is None and has to be created. + """ + self._options = options + self._options_class_name = options_class_name + self._serialized_options = serialized_options + + # Does this descriptor have non-default options? + self.has_options = (options is not None) or (serialized_options is not None) + + def _SetOptions(self, options, options_class_name): + """Sets the descriptor's options + + This function is used in generated proto2 files to update descriptor + options. It must not be used outside proto2. + """ + self._options = options + self._options_class_name = options_class_name + + # Does this descriptor have non-default options? + self.has_options = options is not None + + def GetOptions(self): + """Retrieves descriptor options. + + This method returns the options set or creates the default options for the + descriptor. + """ + if self._options: + return self._options + + from google.protobuf import descriptor_pb2 + try: + options_class = getattr(descriptor_pb2, + self._options_class_name) + except AttributeError: + raise RuntimeError('Unknown options class name %s!' % + (self._options_class_name)) + + with _lock: + if self._serialized_options is None: + self._options = options_class() + else: + self._options = _ParseOptions(options_class(), + self._serialized_options) + + return self._options + + +class _NestedDescriptorBase(DescriptorBase): + """Common class for descriptors that can be nested.""" + + def __init__(self, options, options_class_name, name, full_name, + file, containing_type, serialized_start=None, + serialized_end=None, serialized_options=None): + """Constructor. + + Args: + options: Protocol message options or None + to use default message options. + options_class_name (str): The class name of the above options. + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + file (FileDescriptor): Reference to file info. + containing_type: if provided, this is a nested descriptor, with this + descriptor as parent, otherwise None. + serialized_start: The start index (inclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_end: The end index (exclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_options: Protocol message serialized options or None. + """ + super(_NestedDescriptorBase, self).__init__( + options, serialized_options, options_class_name) + + self.name = name + # TODO(falk): Add function to calculate full_name instead of having it in + # memory? + self.full_name = full_name + self.file = file + self.containing_type = containing_type + + self._serialized_start = serialized_start + self._serialized_end = serialized_end + + def CopyToProto(self, proto): + """Copies this to the matching proto in descriptor_pb2. + + Args: + proto: An empty proto instance from descriptor_pb2. + + Raises: + Error: If self couldn't be serialized, due to to few constructor + arguments. + """ + if (self.file is not None and + self._serialized_start is not None and + self._serialized_end is not None): + proto.ParseFromString(self.file.serialized_pb[ + self._serialized_start:self._serialized_end]) + else: + raise Error('Descriptor does not contain serialization.') + + +class Descriptor(_NestedDescriptorBase): + + """Descriptor for a protocol message type. + + Attributes: + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + containing_type (Descriptor): Reference to the descriptor of the type + containing us, or None if this is top-level. + fields (list[FieldDescriptor]): Field descriptors for all fields in + this type. + fields_by_number (dict(int, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed + by "number" attribute in each FieldDescriptor. + fields_by_name (dict(str, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by + "name" attribute in each :class:`FieldDescriptor`. + nested_types (list[Descriptor]): Descriptor references + for all protocol message types nested within this one. + nested_types_by_name (dict(str, Descriptor)): Same Descriptor + objects as in :attr:`nested_types`, but indexed by "name" attribute + in each Descriptor. + enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references + for all enums contained within this type. + enum_types_by_name (dict(str, EnumDescriptor)): Same + :class:`EnumDescriptor` objects as in :attr:`enum_types`, but + indexed by "name" attribute in each EnumDescriptor. + enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping + from enum value name to :class:`EnumValueDescriptor` for that value. + extensions (list[FieldDescriptor]): All extensions defined directly + within this message type (NOT within a nested type). + extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor + objects as :attr:`extensions`, but indexed by "name" attribute of each + FieldDescriptor. + is_extendable (bool): Does this type define any extension ranges? + oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields + in this message. + oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in + :attr:`oneofs`, but indexed by "name" attribute. + file (FileDescriptor): Reference to file descriptor. + + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.Descriptor + + def __new__( + cls, + name=None, + full_name=None, + filename=None, + containing_type=None, + fields=None, + nested_types=None, + enum_types=None, + extensions=None, + options=None, + serialized_options=None, + is_extendable=True, + extension_ranges=None, + oneofs=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + syntax=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindMessageTypeByName(full_name) + + # NOTE(tmarek): The file argument redefining a builtin is nothing we can + # fix right now since we don't know how many clients already rely on the + # name of the argument. + def __init__(self, name, full_name, filename, containing_type, fields, + nested_types, enum_types, extensions, options=None, + serialized_options=None, + is_extendable=True, extension_ranges=None, oneofs=None, + file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin + syntax=None, create_key=None): + """Arguments to __init__() are as described in the description + of Descriptor fields above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('Descriptor') + + super(Descriptor, self).__init__( + options, 'MessageOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + # We have fields in addition to fields_by_name and fields_by_number, + # so that: + # 1. Clients can index fields by "order in which they're listed." + # 2. Clients can easily iterate over all fields with the terse + # syntax: for f in descriptor.fields: ... + self.fields = fields + for field in self.fields: + field.containing_type = self + self.fields_by_number = dict((f.number, f) for f in fields) + self.fields_by_name = dict((f.name, f) for f in fields) + self._fields_by_camelcase_name = None + + self.nested_types = nested_types + for nested_type in nested_types: + nested_type.containing_type = self + self.nested_types_by_name = dict((t.name, t) for t in nested_types) + + self.enum_types = enum_types + for enum_type in self.enum_types: + enum_type.containing_type = self + self.enum_types_by_name = dict((t.name, t) for t in enum_types) + self.enum_values_by_name = dict( + (v.name, v) for t in enum_types for v in t.values) + + self.extensions = extensions + for extension in self.extensions: + extension.extension_scope = self + self.extensions_by_name = dict((f.name, f) for f in extensions) + self.is_extendable = is_extendable + self.extension_ranges = extension_ranges + self.oneofs = oneofs if oneofs is not None else [] + self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) + for oneof in self.oneofs: + oneof.containing_type = self + self.syntax = syntax or "proto2" + + @property + def fields_by_camelcase_name(self): + """Same FieldDescriptor objects as in :attr:`fields`, but indexed by + :attr:`FieldDescriptor.camelcase_name`. + """ + if self._fields_by_camelcase_name is None: + self._fields_by_camelcase_name = dict( + (f.camelcase_name, f) for f in self.fields) + return self._fields_by_camelcase_name + + def EnumValueName(self, enum, value): + """Returns the string name of an enum value. + + This is just a small helper method to simplify a common operation. + + Args: + enum: string name of the Enum. + value: int, value of the enum. + + Returns: + string name of the enum value. + + Raises: + KeyError if either the Enum doesn't exist or the value is not a valid + value for the enum. + """ + return self.enum_types_by_name[enum].values_by_number[value].name + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.DescriptorProto. + + Args: + proto: An empty descriptor_pb2.DescriptorProto. + """ + # This function is overridden to give a better doc comment. + super(Descriptor, self).CopyToProto(proto) + + +# TODO(robinson): We should have aggressive checking here, +# for example: +# * If you specify a repeated field, you should not be allowed +# to specify a default value. +# * [Other examples here as needed]. +# +# TODO(robinson): for this and other *Descriptor classes, we +# might also want to lock things down aggressively (e.g., +# prevent clients from setting the attributes). Having +# stronger invariants here in general will reduce the number +# of runtime checks we must do in reflection.py... +class FieldDescriptor(DescriptorBase): + + """Descriptor for a single field in a .proto file. + + Attributes: + name (str): Name of this field, exactly as it appears in .proto. + full_name (str): Name of this field, including containing scope. This is + particularly relevant for extensions. + index (int): Dense, 0-indexed index giving the order that this + field textually appears within its message in the .proto file. + number (int): Tag number declared for this field in the .proto file. + + type (int): (One of the TYPE_* constants below) Declared type. + cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to + represent this field. + + label (int): (One of the LABEL_* constants below) Tells whether this + field is optional, required, or repeated. + has_default_value (bool): True if this field has a default value defined, + otherwise false. + default_value (Varies): Default value of this field. Only + meaningful for non-repeated scalar fields. Repeated fields + should always set this to [], and non-repeated composite + fields should always set this to None. + + containing_type (Descriptor): Descriptor of the protocol message + type that contains this field. Set by the Descriptor constructor + if we're passed into one. + Somewhat confusingly, for extension fields, this is the + descriptor of the EXTENDED message, not the descriptor + of the message containing this field. (See is_extension and + extension_scope below). + message_type (Descriptor): If a composite field, a descriptor + of the message type contained in this field. Otherwise, this is None. + enum_type (EnumDescriptor): If this field contains an enum, a + descriptor of that enum. Otherwise, this is None. + + is_extension: True iff this describes an extension field. + extension_scope (Descriptor): Only meaningful if is_extension is True. + Gives the message that immediately contains this extension field. + Will be None iff we're a top-level (file-level) extension field. + + options (descriptor_pb2.FieldOptions): Protocol message field options or + None to use default field options. + + containing_oneof (OneofDescriptor): If the field is a member of a oneof + union, contains its descriptor. Otherwise, None. + + file (FileDescriptor): Reference to file descriptor. + """ + + # Must be consistent with C++ FieldDescriptor::Type enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + TYPE_DOUBLE = 1 + TYPE_FLOAT = 2 + TYPE_INT64 = 3 + TYPE_UINT64 = 4 + TYPE_INT32 = 5 + TYPE_FIXED64 = 6 + TYPE_FIXED32 = 7 + TYPE_BOOL = 8 + TYPE_STRING = 9 + TYPE_GROUP = 10 + TYPE_MESSAGE = 11 + TYPE_BYTES = 12 + TYPE_UINT32 = 13 + TYPE_ENUM = 14 + TYPE_SFIXED32 = 15 + TYPE_SFIXED64 = 16 + TYPE_SINT32 = 17 + TYPE_SINT64 = 18 + MAX_TYPE = 18 + + # Must be consistent with C++ FieldDescriptor::CppType enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + CPPTYPE_INT32 = 1 + CPPTYPE_INT64 = 2 + CPPTYPE_UINT32 = 3 + CPPTYPE_UINT64 = 4 + CPPTYPE_DOUBLE = 5 + CPPTYPE_FLOAT = 6 + CPPTYPE_BOOL = 7 + CPPTYPE_ENUM = 8 + CPPTYPE_STRING = 9 + CPPTYPE_MESSAGE = 10 + MAX_CPPTYPE = 10 + + _PYTHON_TO_CPP_PROTO_TYPE_MAP = { + TYPE_DOUBLE: CPPTYPE_DOUBLE, + TYPE_FLOAT: CPPTYPE_FLOAT, + TYPE_ENUM: CPPTYPE_ENUM, + TYPE_INT64: CPPTYPE_INT64, + TYPE_SINT64: CPPTYPE_INT64, + TYPE_SFIXED64: CPPTYPE_INT64, + TYPE_UINT64: CPPTYPE_UINT64, + TYPE_FIXED64: CPPTYPE_UINT64, + TYPE_INT32: CPPTYPE_INT32, + TYPE_SFIXED32: CPPTYPE_INT32, + TYPE_SINT32: CPPTYPE_INT32, + TYPE_UINT32: CPPTYPE_UINT32, + TYPE_FIXED32: CPPTYPE_UINT32, + TYPE_BYTES: CPPTYPE_STRING, + TYPE_STRING: CPPTYPE_STRING, + TYPE_BOOL: CPPTYPE_BOOL, + TYPE_MESSAGE: CPPTYPE_MESSAGE, + TYPE_GROUP: CPPTYPE_MESSAGE + } + + # Must be consistent with C++ FieldDescriptor::Label enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + LABEL_OPTIONAL = 1 + LABEL_REQUIRED = 2 + LABEL_REPEATED = 3 + MAX_LABEL = 3 + + # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, + # and kLastReservedNumber in descriptor.h + MAX_FIELD_NUMBER = (1 << 29) - 1 + FIRST_RESERVED_FIELD_NUMBER = 19000 + LAST_RESERVED_FIELD_NUMBER = 19999 + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FieldDescriptor + + def __new__(cls, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + _message.Message._CheckCalledFromGeneratedFile() + if is_extension: + return _message.default_pool.FindExtensionByName(full_name) + else: + return _message.default_pool.FindFieldByName(full_name) + + def __init__(self, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + """The arguments are as described in the description of FieldDescriptor + attributes above. + + Note that containing_type may be None, and may be set later if necessary + (to deal with circular references between message types, for example). + Likewise for extension_scope. + """ + if create_key is not _internal_create_key: + _Deprecated('FieldDescriptor') + + super(FieldDescriptor, self).__init__( + options, serialized_options, 'FieldOptions') + self.name = name + self.full_name = full_name + self.file = file + self._camelcase_name = None + if json_name is None: + self.json_name = _ToJsonName(name) + else: + self.json_name = json_name + self.index = index + self.number = number + self.type = type + self.cpp_type = cpp_type + self.label = label + self.has_default_value = has_default_value + self.default_value = default_value + self.containing_type = containing_type + self.message_type = message_type + self.enum_type = enum_type + self.is_extension = is_extension + self.extension_scope = extension_scope + self.containing_oneof = containing_oneof + if api_implementation.Type() == 'cpp': + if is_extension: + self._cdescriptor = _message.default_pool.FindExtensionByName(full_name) + else: + self._cdescriptor = _message.default_pool.FindFieldByName(full_name) + else: + self._cdescriptor = None + + @property + def camelcase_name(self): + """Camelcase name of this field. + + Returns: + str: the name in CamelCase. + """ + if self._camelcase_name is None: + self._camelcase_name = _ToCamelCase(self.name) + return self._camelcase_name + + @property + def has_presence(self): + """Whether the field distinguishes between unpopulated and default values. + + Raises: + RuntimeError: singular field that is not linked with message nor file. + """ + if self.label == FieldDescriptor.LABEL_REPEATED: + return False + if (self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE or + self.containing_oneof): + return True + if hasattr(self.file, 'syntax'): + return self.file.syntax == 'proto2' + if hasattr(self.message_type, 'syntax'): + return self.message_type.syntax == 'proto2' + raise RuntimeError( + 'has_presence is not ready to use because field %s is not' + ' linked with message type nor file' % self.full_name) + + @staticmethod + def ProtoTypeToCppProtoType(proto_type): + """Converts from a Python proto type to a C++ Proto Type. + + The Python ProtocolBuffer classes specify both the 'Python' datatype and the + 'C++' datatype - and they're not the same. This helper method should + translate from one to another. + + Args: + proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) + Returns: + int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. + Raises: + TypeTransformationError: when the Python proto type isn't known. + """ + try: + return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] + except KeyError: + raise TypeTransformationError('Unknown proto_type: %s' % proto_type) + + +class EnumDescriptor(_NestedDescriptorBase): + + """Descriptor for an enum defined in a .proto file. + + Attributes: + name (str): Name of the enum type. + full_name (str): Full name of the type, including package name + and any enclosing type(s). + + values (list[EnumValueDescriptor]): List of the values + in this enum. + values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "name" field of each EnumValueDescriptor. + values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "number" field of each EnumValueDescriptor. + containing_type (Descriptor): Descriptor of the immediate containing + type of this enum, or None if this is an enum defined at the + top level in a .proto file. Set by Descriptor's constructor + if we're passed into one. + file (FileDescriptor): Reference to file descriptor. + options (descriptor_pb2.EnumOptions): Enum options message or + None to use default enum options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumDescriptor + + def __new__(cls, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindEnumTypeByName(full_name) + + def __init__(self, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + """Arguments are as described in the attribute description above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('EnumDescriptor') + + super(EnumDescriptor, self).__init__( + options, 'EnumOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + self.values = values + for value in self.values: + value.type = self + self.values_by_name = dict((v.name, v) for v in values) + # Values are reversed to ensure that the first alias is retained. + self.values_by_number = dict((v.number, v) for v in reversed(values)) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.EnumDescriptorProto. + + Args: + proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(EnumDescriptor, self).CopyToProto(proto) + + +class EnumValueDescriptor(DescriptorBase): + + """Descriptor for a single value within an enum. + + Attributes: + name (str): Name of this value. + index (int): Dense, 0-indexed index giving the order that this + value appears textually within its enum in the .proto file. + number (int): Actual number assigned to this enum value. + type (EnumDescriptor): :class:`EnumDescriptor` to which this value + belongs. Set by :class:`EnumDescriptor`'s constructor if we're + passed into one. + options (descriptor_pb2.EnumValueOptions): Enum value options message or + None to use default enum value options options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor + + def __new__(cls, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + # There is no way we can build a complete EnumValueDescriptor with the + # given parameters (the name of the Enum is not known, for example). + # Fortunately generated files just pass it to the EnumDescriptor() + # constructor, which will ignore it, so returning None is good enough. + return None + + def __init__(self, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('EnumValueDescriptor') + + super(EnumValueDescriptor, self).__init__( + options, serialized_options, 'EnumValueOptions') + self.name = name + self.index = index + self.number = number + self.type = type + + +class OneofDescriptor(DescriptorBase): + """Descriptor for a oneof field. + + Attributes: + name (str): Name of the oneof field. + full_name (str): Full name of the oneof field, including package name. + index (int): 0-based index giving the order of the oneof field inside + its containing type. + containing_type (Descriptor): :class:`Descriptor` of the protocol message + type that contains this field. Set by the :class:`Descriptor` constructor + if we're passed into one. + fields (list[FieldDescriptor]): The list of field descriptors this + oneof can contain. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.OneofDescriptor + + def __new__( + cls, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindOneofByName(full_name) + + def __init__( + self, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('OneofDescriptor') + + super(OneofDescriptor, self).__init__( + options, serialized_options, 'OneofOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_type = containing_type + self.fields = fields + + +class ServiceDescriptor(_NestedDescriptorBase): + + """Descriptor for a service. + + Attributes: + name (str): Name of the service. + full_name (str): Full name of the service, including package name. + index (int): 0-indexed index giving the order that this services + definition appears within the .proto file. + methods (list[MethodDescriptor]): List of methods provided by this + service. + methods_by_name (dict(str, MethodDescriptor)): Same + :class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but + indexed by "name" attribute in each :class:`MethodDescriptor`. + options (descriptor_pb2.ServiceOptions): Service options message or + None to use default service options. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.ServiceDescriptor + + def __new__( + cls, + name=None, + full_name=None, + index=None, + methods=None, + options=None, + serialized_options=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindServiceByName(full_name) + + def __init__(self, name, full_name, index, methods, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + if create_key is not _internal_create_key: + _Deprecated('ServiceDescriptor') + + super(ServiceDescriptor, self).__init__( + options, 'ServiceOptions', name, full_name, file, + None, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + self.index = index + self.methods = methods + self.methods_by_name = dict((m.name, m) for m in methods) + # Set the containing service for each method in this service. + for method in self.methods: + method.containing_service = self + + def FindMethodByName(self, name): + """Searches for the specified method, and returns its descriptor. + + Args: + name (str): Name of the method. + Returns: + MethodDescriptor or None: the descriptor for the requested method, if + found. + """ + return self.methods_by_name.get(name, None) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.ServiceDescriptorProto. + + Args: + proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(ServiceDescriptor, self).CopyToProto(proto) + + +class MethodDescriptor(DescriptorBase): + + """Descriptor for a method in a service. + + Attributes: + name (str): Name of the method within the service. + full_name (str): Full name of method. + index (int): 0-indexed index of the method inside the service. + containing_service (ServiceDescriptor): The service that contains this + method. + input_type (Descriptor): The descriptor of the message that this method + accepts. + output_type (Descriptor): The descriptor of the message that this method + returns. + client_streaming (bool): Whether this method uses client streaming. + server_streaming (bool): Whether this method uses server streaming. + options (descriptor_pb2.MethodOptions or None): Method options message, or + None to use default method options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.MethodDescriptor + + def __new__(cls, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindMethodByName(full_name) + + def __init__(self, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + """The arguments are as described in the description of MethodDescriptor + attributes above. + + Note that containing_service may be None, and may be set later if necessary. + """ + if create_key is not _internal_create_key: + _Deprecated('MethodDescriptor') + + super(MethodDescriptor, self).__init__( + options, serialized_options, 'MethodOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_service = containing_service + self.input_type = input_type + self.output_type = output_type + self.client_streaming = client_streaming + self.server_streaming = server_streaming + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.MethodDescriptorProto. + + Args: + proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto. + + Raises: + Error: If self couldn't be serialized, due to too few constructor + arguments. + """ + if self.containing_service is not None: + from google.protobuf import descriptor_pb2 + service_proto = descriptor_pb2.ServiceDescriptorProto() + self.containing_service.CopyToProto(service_proto) + proto.CopyFrom(service_proto.method[self.index]) + else: + raise Error('Descriptor does not contain a service.') + + +class FileDescriptor(DescriptorBase): + """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. + + Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and + :attr:`dependencies` fields are only set by the + :py:mod:`google.protobuf.message_factory` module, and not by the generated + proto code. + + Attributes: + name (str): Name of file, relative to root of source tree. + package (str): Name of the package + syntax (str): string indicating syntax of the file (can be "proto2" or + "proto3") + serialized_pb (bytes): Byte string of serialized + :class:`descriptor_pb2.FileDescriptorProto`. + dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor` + objects this :class:`FileDescriptor` depends on. + public_dependencies (list[FileDescriptor]): A subset of + :attr:`dependencies`, which were declared as "public". + message_types_by_name (dict(str, Descriptor)): Mapping from message names + to their :class:`Descriptor`. + enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to + their :class:`EnumDescriptor`. + extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension + names declared at file scope to their :class:`FieldDescriptor`. + services_by_name (dict(str, ServiceDescriptor)): Mapping from services' + names to their :class:`ServiceDescriptor`. + pool (DescriptorPool): The pool this descriptor belongs to. When not + passed to the constructor, the global default pool is used. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FileDescriptor + + def __new__(cls, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + # FileDescriptor() is called from various places, not only from generated + # files, to register dynamic proto files and messages. + # pylint: disable=g-explicit-bool-comparison + if serialized_pb == b'': + # Cpp generated code must be linked in if serialized_pb is '' + try: + return _message.default_pool.FindFileByName(name) + except KeyError: + raise RuntimeError('Please link in cpp generated lib for %s' % (name)) + elif serialized_pb: + return _message.default_pool.AddSerializedFile(serialized_pb) + else: + return super(FileDescriptor, cls).__new__(cls) + + def __init__(self, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + """Constructor.""" + if create_key is not _internal_create_key: + _Deprecated('FileDescriptor') + + super(FileDescriptor, self).__init__( + options, serialized_options, 'FileOptions') + + if pool is None: + from google.protobuf import descriptor_pool + pool = descriptor_pool.Default() + self.pool = pool + self.message_types_by_name = {} + self.name = name + self.package = package + self.syntax = syntax or "proto2" + self.serialized_pb = serialized_pb + + self.enum_types_by_name = {} + self.extensions_by_name = {} + self.services_by_name = {} + self.dependencies = (dependencies or []) + self.public_dependencies = (public_dependencies or []) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.FileDescriptorProto. + + Args: + proto: An empty descriptor_pb2.FileDescriptorProto. + """ + proto.ParseFromString(self.serialized_pb) + + +def _ParseOptions(message, string): + """Parses serialized options. + + This helper function is used to parse serialized options in generated + proto2 files. It must not be used outside proto2. + """ + message.ParseFromString(string) + return message + + +def _ToCamelCase(name): + """Converts name to camel-case and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + if result: + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + # Lower-case the first letter. + if result and result[0].isupper(): + result[0] = result[0].lower() + return ''.join(result) + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _ToJsonName(name): + """Converts name to Json name and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + return ''.join(result) + + +def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, + syntax=None): + """Make a protobuf Descriptor given a DescriptorProto protobuf. + + Handles nested descriptors. Note that this is limited to the scope of defining + a message inside of another message. Composite fields can currently only be + resolved if the message is defined in the same scope as the field. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: Optional package name for the new message Descriptor (string). + build_file_if_cpp: Update the C++ descriptor pool if api matches. + Set to False on recursion, so no duplicates are created. + syntax: The syntax/semantics that should be used. Set to "proto3" to get + proto3 field presence semantics. + Returns: + A Descriptor for protobuf messages. + """ + if api_implementation.Type() == 'cpp' and build_file_if_cpp: + # The C++ implementation requires all descriptors to be backed by the same + # definition in the C++ descriptor pool. To do this, we build a + # FileDescriptorProto with the same definition as this descriptor and build + # it into the pool. + from google.protobuf import descriptor_pb2 + file_descriptor_proto = descriptor_pb2.FileDescriptorProto() + file_descriptor_proto.message_type.add().MergeFrom(desc_proto) + + # Generate a random name for this proto file to prevent conflicts with any + # imported ones. We need to specify a file name so the descriptor pool + # accepts our FileDescriptorProto, but it is not important what that file + # name is actually set to. + proto_name = binascii.hexlify(os.urandom(16)).decode('ascii') + + if package: + file_descriptor_proto.name = os.path.join(package.replace('.', '/'), + proto_name + '.proto') + file_descriptor_proto.package = package + else: + file_descriptor_proto.name = proto_name + '.proto' + + _message.default_pool.Add(file_descriptor_proto) + result = _message.default_pool.FindFileByName(file_descriptor_proto.name) + + if _USE_C_DESCRIPTORS: + return result.message_types_by_name[desc_proto.name] + + full_message_name = [desc_proto.name] + if package: full_message_name.insert(0, package) + + # Create Descriptors for enum types + enum_types = {} + for enum_proto in desc_proto.enum_type: + full_name = '.'.join(full_message_name + [enum_proto.name]) + enum_desc = EnumDescriptor( + enum_proto.name, full_name, None, [ + EnumValueDescriptor(enum_val.name, ii, enum_val.number, + create_key=_internal_create_key) + for ii, enum_val in enumerate(enum_proto.value)], + create_key=_internal_create_key) + enum_types[full_name] = enum_desc + + # Create Descriptors for nested types + nested_types = {} + for nested_proto in desc_proto.nested_type: + full_name = '.'.join(full_message_name + [nested_proto.name]) + # Nested types are just those defined inside of the message, not all types + # used by fields in the message, so no loops are possible here. + nested_desc = MakeDescriptor(nested_proto, + package='.'.join(full_message_name), + build_file_if_cpp=False, + syntax=syntax) + nested_types[full_name] = nested_desc + + fields = [] + for field_proto in desc_proto.field: + full_name = '.'.join(full_message_name + [field_proto.name]) + enum_desc = None + nested_desc = None + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + if field_proto.HasField('type_name'): + type_name = field_proto.type_name + full_type_name = '.'.join(full_message_name + + [type_name[type_name.rfind('.')+1:]]) + if full_type_name in nested_types: + nested_desc = nested_types[full_type_name] + elif full_type_name in enum_types: + enum_desc = enum_types[full_type_name] + # Else type_name references a non-local type, which isn't implemented + field = FieldDescriptor( + field_proto.name, full_name, field_proto.number - 1, + field_proto.number, field_proto.type, + FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), + field_proto.label, None, nested_desc, enum_desc, None, False, None, + options=_OptionsOrNone(field_proto), has_default_value=False, + json_name=json_name, create_key=_internal_create_key) + fields.append(field) + + desc_name = '.'.join(full_message_name) + return Descriptor(desc_proto.name, desc_name, None, None, fields, + list(nested_types.values()), list(enum_types.values()), [], + options=_OptionsOrNone(desc_proto), + create_key=_internal_create_key) diff --git a/MLPY/Lib/site-packages/google/protobuf/descriptor_database.py b/MLPY/Lib/site-packages/google/protobuf/descriptor_database.py new file mode 100644 index 0000000000000000000000000000000000000000..3e95a3b80cc1e3bb79c5a8c2b1ef2a9758c7185e --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/descriptor_database.py @@ -0,0 +1,177 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a container for DescriptorProtos.""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + + +class Error(Exception): + pass + + +class DescriptorDatabaseConflictingDefinitionError(Error): + """Raised when a proto is added with the same name & different descriptor.""" + + +class DescriptorDatabase(object): + """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" + + def __init__(self): + self._file_desc_protos_by_file = {} + self._file_desc_protos_by_symbol = {} + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this database. + + Args: + file_desc_proto: The FileDescriptorProto to add. + Raises: + DescriptorDatabaseConflictingDefinitionError: if an attempt is made to + add a proto with the same name but different definition than an + existing proto in the database. + """ + proto_name = file_desc_proto.name + if proto_name not in self._file_desc_protos_by_file: + self._file_desc_protos_by_file[proto_name] = file_desc_proto + elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: + raise DescriptorDatabaseConflictingDefinitionError( + '%s already added, but with different descriptor.' % proto_name) + else: + return + + # Add all the top-level descriptors to the index. + package = file_desc_proto.package + for message in file_desc_proto.message_type: + for name in _ExtractSymbols(message, package): + self._AddSymbol(name, file_desc_proto) + for enum in file_desc_proto.enum_type: + self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) + for enum_value in enum.value: + self._file_desc_protos_by_symbol[ + '.'.join((package, enum_value.name))] = file_desc_proto + for extension in file_desc_proto.extension: + self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) + for service in file_desc_proto.service: + self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) + + def FindFileByName(self, name): + """Finds the file descriptor proto by file name. + + Typically the file name is a relative path ending to a .proto file. The + proto with the given name will have to have been added to this database + using the Add method or else an error will be raised. + + Args: + name: The file name to find. + + Returns: + The file descriptor proto matching the name. + + Raises: + KeyError if no file by the given name was added. + """ + + return self._file_desc_protos_by_file[name] + + def FindFileContainingSymbol(self, symbol): + """Finds the file descriptor proto containing the specified symbol. + + The symbol should be a fully qualified name including the file descriptor's + package and any containing messages. Some examples: + + 'some.package.name.Message' + 'some.package.name.Message.NestedEnum' + 'some.package.name.Message.some_field' + + The file descriptor proto containing the specified symbol must be added to + this database using the Add method or else an error will be raised. + + Args: + symbol: The fully qualified symbol name. + + Returns: + The file descriptor proto containing the symbol. + + Raises: + KeyError if no file contains the specified symbol. + """ + try: + return self._file_desc_protos_by_symbol[symbol] + except KeyError: + # Fields, enum values, and nested extensions are not in + # _file_desc_protos_by_symbol. Try to find the top level + # descriptor. Non-existent nested symbol under a valid top level + # descriptor can also be found. The behavior is the same with + # protobuf C++. + top_level, _, _ = symbol.rpartition('.') + try: + return self._file_desc_protos_by_symbol[top_level] + except KeyError: + # Raise the original symbol as a KeyError for better diagnostics. + raise KeyError(symbol) + + def FindFileContainingExtension(self, extendee_name, extension_number): + # TODO(jieluo): implement this API. + return None + + def FindAllExtensionNumbers(self, extendee_name): + # TODO(jieluo): implement this API. + return [] + + def _AddSymbol(self, name, file_desc_proto): + if name in self._file_desc_protos_by_symbol: + warn_msg = ('Conflict register for file "' + file_desc_proto.name + + '": ' + name + + ' is already defined in file "' + + self._file_desc_protos_by_symbol[name].name + '"') + warnings.warn(warn_msg, RuntimeWarning) + self._file_desc_protos_by_symbol[name] = file_desc_proto + + +def _ExtractSymbols(desc_proto, package): + """Pulls out all the symbols from a descriptor proto. + + Args: + desc_proto: The proto to extract symbols from. + package: The package containing the descriptor type. + + Yields: + The fully qualified name found in the descriptor. + """ + message_name = package + '.' + desc_proto.name if package else desc_proto.name + yield message_name + for nested_type in desc_proto.nested_type: + for symbol in _ExtractSymbols(nested_type, message_name): + yield symbol + for enum_type in desc_proto.enum_type: + yield '.'.join((message_name, enum_type.name)) diff --git a/MLPY/Lib/site-packages/google/protobuf/descriptor_pb2.py b/MLPY/Lib/site-packages/google/protobuf/descriptor_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..f57038643260fb13b70705c7f807a465f1f1129b --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/descriptor_pb2.py @@ -0,0 +1,1925 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/descriptor.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR = _descriptor.FileDescriptor( + name='google/protobuf/descriptor.proto', + package='google.protobuf', + syntax='proto2', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' + ) +else: + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') + +if _descriptor._USE_C_DESCRIPTORS == False: + _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.protobuf.FieldDescriptorProto.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_DOUBLE', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FLOAT', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT64', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT64', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT32', index=4, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED64', index=5, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED32', index=6, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BOOL', index=7, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_STRING', index=8, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_GROUP', index=9, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_MESSAGE', index=10, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BYTES', index=11, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT32', index=12, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_ENUM', index=13, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED32', index=14, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED64', index=15, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT32', index=16, number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT64', index=17, number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) + + _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( + name='Label', + full_name='google.protobuf.FieldDescriptorProto.Label', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='LABEL_OPTIONAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REQUIRED', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REPEATED', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) + + _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( + name='OptimizeMode', + full_name='google.protobuf.FileOptions.OptimizeMode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SPEED', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CODE_SIZE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LITE_RUNTIME', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) + + _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( + name='CType', + full_name='google.protobuf.FieldOptions.CType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='STRING', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CORD', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='STRING_PIECE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) + + _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( + name='JSType', + full_name='google.protobuf.FieldOptions.JSType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JS_NORMAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_STRING', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_NUMBER', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) + + _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( + name='IdempotencyLevel', + full_name='google.protobuf.MethodOptions.IdempotencyLevel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IDEMPOTENCY_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NO_SIDE_EFFECTS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IDEMPOTENT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) + + + _FILEDESCRIPTORSET = _descriptor.Descriptor( + name='FileDescriptorSet', + full_name='google.protobuf.FileDescriptorSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEDESCRIPTORPROTO = _descriptor.Descriptor( + name='FileDescriptorProto', + full_name='google.protobuf.FileDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, + number=11, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( + name='ExtensionRange', + full_name='google.protobuf.DescriptorProto.ExtensionRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( + name='ReservedRange', + full_name='google.protobuf.DescriptorProto.ReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO = _descriptor.Descriptor( + name='DescriptorProto', + full_name='google.protobuf.DescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.DescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='field', full_name='google.protobuf.DescriptorProto.field', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.options', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, + number=10, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( + name='ExtensionRangeOptions', + full_name='google.protobuf.ExtensionRangeOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( + name='FieldDescriptorProto', + full_name='google.protobuf.FieldDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDDESCRIPTORPROTO_TYPE, + _FIELDDESCRIPTORPROTO_LABEL, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( + name='OneofDescriptorProto', + full_name='google.protobuf.OneofDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( + name='EnumReservedRange', + full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumDescriptorProto', + full_name='google.protobuf.EnumDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumValueDescriptorProto', + full_name='google.protobuf.EnumValueDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( + name='ServiceDescriptorProto', + full_name='google.protobuf.ServiceDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _METHODDESCRIPTORPROTO = _descriptor.Descriptor( + name='MethodDescriptorProto', + full_name='google.protobuf.MethodDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEOPTIONS = _descriptor.Descriptor( + name='FileOptions', + full_name='google.protobuf.FileOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, + number=27, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, + number=9, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, + number=17, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_generic_services', full_name='google.protobuf.FileOptions.php_generic_services', index=10, + number=42, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=11, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=12, + number=31, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=13, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=14, + number=37, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=15, + number=39, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=16, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=17, + number=41, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=18, + number=44, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=19, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FILEOPTIONS_OPTIMIZEMODE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _MESSAGEOPTIONS = _descriptor.Descriptor( + name='MessageOptions', + full_name='google.protobuf.MessageOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDOPTIONS = _descriptor.Descriptor( + name='FieldOptions', + full_name='google.protobuf.FieldOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, + number=6, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=7, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDOPTIONS_CTYPE, + _FIELDOPTIONS_JSTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ONEOFOPTIONS = _descriptor.Descriptor( + name='OneofOptions', + full_name='google.protobuf.OneofOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMOPTIONS = _descriptor.Descriptor( + name='EnumOptions', + full_name='google.protobuf.EnumOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMVALUEOPTIONS = _descriptor.Descriptor( + name='EnumValueOptions', + full_name='google.protobuf.EnumValueOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _SERVICEOPTIONS = _descriptor.Descriptor( + name='ServiceOptions', + full_name='google.protobuf.ServiceOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _METHODOPTIONS = _descriptor.Descriptor( + name='MethodOptions', + full_name='google.protobuf.MethodOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, + number=34, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METHODOPTIONS_IDEMPOTENCYLEVEL, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( + name='NamePart', + full_name='google.protobuf.UninterpretedOption.NamePart', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, + number=2, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _UNINTERPRETEDOPTION = _descriptor.Descriptor( + name='UninterpretedOption', + full_name='google.protobuf.UninterpretedOption', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='google.protobuf.SourceCodeInfo.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _SOURCECODEINFO = _descriptor.Descriptor( + name='SourceCodeInfo', + full_name='google.protobuf.SourceCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SOURCECODEINFO_LOCATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='google.protobuf.GeneratedCodeInfo.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _GENERATEDCODEINFO = _descriptor.Descriptor( + name='GeneratedCodeInfo', + full_name='google.protobuf.GeneratedCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS + _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO + _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS + _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE + _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS + _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE + _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL + _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE + _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS + _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO + _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO + _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS + _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE + _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS + _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO + _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS + _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS + _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE + _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS + _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE + _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE + _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS + _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL + _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS + _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION + _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART + _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO + _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION + _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO + _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION + DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET + DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS + DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS + DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS + DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS + DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS + DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS + DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS + DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION + DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO + DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO + _sym_db.RegisterFileDescriptor(DESCRIPTOR) + +else: + _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _FILEDESCRIPTORSET._serialized_start=53 + _FILEDESCRIPTORSET._serialized_end=124 + _FILEDESCRIPTORPROTO._serialized_start=127 + _FILEDESCRIPTORPROTO._serialized_end=602 + _DESCRIPTORPROTO._serialized_start=605 + _DESCRIPTORPROTO._serialized_end=1286 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_start=1140 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_end=1241 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_start=1243 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_end=1286 + _EXTENSIONRANGEOPTIONS._serialized_start=1288 + _EXTENSIONRANGEOPTIONS._serialized_end=1391 + _FIELDDESCRIPTORPROTO._serialized_start=1394 + _FIELDDESCRIPTORPROTO._serialized_end=2119 + _FIELDDESCRIPTORPROTO_TYPE._serialized_start=1740 + _FIELDDESCRIPTORPROTO_TYPE._serialized_end=2050 + _FIELDDESCRIPTORPROTO_LABEL._serialized_start=2052 + _FIELDDESCRIPTORPROTO_LABEL._serialized_end=2119 + _ONEOFDESCRIPTORPROTO._serialized_start=2121 + _ONEOFDESCRIPTORPROTO._serialized_end=2205 + _ENUMDESCRIPTORPROTO._serialized_start=2208 + _ENUMDESCRIPTORPROTO._serialized_end=2500 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_start=2453 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_end=2500 + _ENUMVALUEDESCRIPTORPROTO._serialized_start=2502 + _ENUMVALUEDESCRIPTORPROTO._serialized_end=2610 + _SERVICEDESCRIPTORPROTO._serialized_start=2613 + _SERVICEDESCRIPTORPROTO._serialized_end=2757 + _METHODDESCRIPTORPROTO._serialized_start=2760 + _METHODDESCRIPTORPROTO._serialized_end=2953 + _FILEOPTIONS._serialized_start=2956 + _FILEOPTIONS._serialized_end=3761 + _FILEOPTIONS_OPTIMIZEMODE._serialized_start=3686 + _FILEOPTIONS_OPTIMIZEMODE._serialized_end=3744 + _MESSAGEOPTIONS._serialized_start=3764 + _MESSAGEOPTIONS._serialized_end=4024 + _FIELDOPTIONS._serialized_start=4027 + _FIELDOPTIONS._serialized_end=4473 + _FIELDOPTIONS_CTYPE._serialized_start=4354 + _FIELDOPTIONS_CTYPE._serialized_end=4401 + _FIELDOPTIONS_JSTYPE._serialized_start=4403 + _FIELDOPTIONS_JSTYPE._serialized_end=4456 + _ONEOFOPTIONS._serialized_start=4475 + _ONEOFOPTIONS._serialized_end=4569 + _ENUMOPTIONS._serialized_start=4572 + _ENUMOPTIONS._serialized_end=4719 + _ENUMVALUEOPTIONS._serialized_start=4721 + _ENUMVALUEOPTIONS._serialized_end=4846 + _SERVICEOPTIONS._serialized_start=4848 + _SERVICEOPTIONS._serialized_end=4971 + _METHODOPTIONS._serialized_start=4974 + _METHODOPTIONS._serialized_end=5275 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_start=5184 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_end=5264 + _UNINTERPRETEDOPTION._serialized_start=5278 + _UNINTERPRETEDOPTION._serialized_end=5564 + _UNINTERPRETEDOPTION_NAMEPART._serialized_start=5513 + _UNINTERPRETEDOPTION_NAMEPART._serialized_end=5564 + _SOURCECODEINFO._serialized_start=5567 + _SOURCECODEINFO._serialized_end=5780 + _SOURCECODEINFO_LOCATION._serialized_start=5646 + _SOURCECODEINFO_LOCATION._serialized_end=5780 + _GENERATEDCODEINFO._serialized_start=5783 + _GENERATEDCODEINFO._serialized_end=5950 + _GENERATEDCODEINFO_ANNOTATION._serialized_start=5871 + _GENERATEDCODEINFO_ANNOTATION._serialized_end=5950 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/descriptor_pool.py b/MLPY/Lib/site-packages/google/protobuf/descriptor_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..096ed8eea9ad70b3b87625a2ed2d507537350ad0 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/descriptor_pool.py @@ -0,0 +1,1295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides DescriptorPool to use as a container for proto2 descriptors. + +The DescriptorPool is used in conjection with a DescriptorDatabase to maintain +a collection of protocol buffer descriptors for use when dynamically creating +message types at runtime. + +For most applications protocol buffers should be used via modules generated by +the protocol buffer compiler tool. This should only be used when the type of +protocol buffers used in an application or library cannot be predetermined. + +Below is a straightforward example on how to use this class:: + + pool = DescriptorPool() + file_descriptor_protos = [ ... ] + for file_descriptor_proto in file_descriptor_protos: + pool.Add(file_descriptor_proto) + my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') + +The message descriptor can be used in conjunction with the message_factory +module in order to create a protocol buffer class that can be encoded and +decoded. + +If you want to get a Python class for the specified proto, use the +helper functions inside google.protobuf.message_factory +directly instead of this class. +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import collections +import warnings + +from google.protobuf import descriptor +from google.protobuf import descriptor_database +from google.protobuf import text_encoding + + +_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access + + +def _Deprecated(func): + """Mark functions as deprecated.""" + + def NewFunc(*args, **kwargs): + warnings.warn( + 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' + 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' + 'instead.' % func.__name__, + category=DeprecationWarning) + return func(*args, **kwargs) + NewFunc.__name__ = func.__name__ + NewFunc.__doc__ = func.__doc__ + NewFunc.__dict__.update(func.__dict__) + return NewFunc + + +def _NormalizeFullyQualifiedName(name): + """Remove leading period from fully-qualified type name. + + Due to b/13860351 in descriptor_database.py, types in the root namespace are + generated with a leading period. This function removes that prefix. + + Args: + name (str): The fully-qualified symbol name. + + Returns: + str: The normalized fully-qualified symbol name. + """ + return name.lstrip('.') + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) + + +class DescriptorPool(object): + """A collection of protobufs dynamically constructed by descriptor protos.""" + + if _USE_C_DESCRIPTORS: + + def __new__(cls, descriptor_db=None): + # pylint: disable=protected-access + return descriptor._message.DescriptorPool(descriptor_db) + + def __init__(self, descriptor_db=None): + """Initializes a Pool of proto buffs. + + The descriptor_db argument to the constructor is provided to allow + specialized file descriptor proto lookup code to be triggered on demand. An + example would be an implementation which will read and compile a file + specified in a call to FindFileByName() and not require the call to Add() + at all. Results from this database will be cached internally here as well. + + Args: + descriptor_db: A secondary source of file descriptors. + """ + + self._internal_db = descriptor_database.DescriptorDatabase() + self._descriptor_db = descriptor_db + self._descriptors = {} + self._enum_descriptors = {} + self._service_descriptors = {} + self._file_descriptors = {} + self._toplevel_extensions = {} + # TODO(jieluo): Remove _file_desc_by_toplevel_extension after + # maybe year 2020 for compatibility issue (with 3.4.1 only). + self._file_desc_by_toplevel_extension = {} + self._top_enum_values = {} + # We store extensions in two two-level mappings: The first key is the + # descriptor of the message being extended, the second key is the extension + # full name or its tag number. + self._extensions_by_name = collections.defaultdict(dict) + self._extensions_by_number = collections.defaultdict(dict) + + def _CheckConflictRegister(self, desc, desc_name, file_name): + """Check if the descriptor name conflicts with another of the same name. + + Args: + desc: Descriptor of a message, enum, service, extension or enum value. + desc_name (str): the full name of desc. + file_name (str): The file name of descriptor. + """ + for register, descriptor_type in [ + (self._descriptors, descriptor.Descriptor), + (self._enum_descriptors, descriptor.EnumDescriptor), + (self._service_descriptors, descriptor.ServiceDescriptor), + (self._toplevel_extensions, descriptor.FieldDescriptor), + (self._top_enum_values, descriptor.EnumValueDescriptor)]: + if desc_name in register: + old_desc = register[desc_name] + if isinstance(old_desc, descriptor.EnumValueDescriptor): + old_file = old_desc.type.file.name + else: + old_file = old_desc.file.name + + if not isinstance(desc, descriptor_type) or ( + old_file != file_name): + error_msg = ('Conflict register for file "' + file_name + + '": ' + desc_name + + ' is already defined in file "' + + old_file + '". Please fix the conflict by adding ' + 'package name on the proto file, or use different ' + 'name for the duplication.') + if isinstance(desc, descriptor.EnumValueDescriptor): + error_msg += ('\nNote: enum values appear as ' + 'siblings of the enum type instead of ' + 'children of it.') + + raise TypeError(error_msg) + + return + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + file_desc_proto (FileDescriptorProto): The file descriptor to add. + """ + + self._internal_db.Add(file_desc_proto) + + def AddSerializedFile(self, serialized_file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + serialized_file_desc_proto (bytes): A bytes string, serialization of the + :class:`FileDescriptorProto` to add. + + Returns: + FileDescriptor: Descriptor for the added file. + """ + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( + serialized_file_desc_proto) + file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto) + file_desc.serialized_pb = serialized_file_desc_proto + return file_desc + + # Add Descriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddDescriptor(self, desc): + self._AddDescriptor(desc) + + # Never call this method. It is for internal usage only. + def _AddDescriptor(self, desc): + """Adds a Descriptor to the pool, non-recursively. + + If the Descriptor contains nested messages or enums, the caller must + explicitly register them. This method also registers the FileDescriptor + associated with the message. + + Args: + desc: A Descriptor. + """ + if not isinstance(desc, descriptor.Descriptor): + raise TypeError('Expected instance of descriptor.Descriptor.') + + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + + self._descriptors[desc.full_name] = desc + self._AddFileDescriptor(desc.file) + + # Add EnumDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddEnumDescriptor(self, enum_desc): + self._AddEnumDescriptor(enum_desc) + + # Never call this method. It is for internal usage only. + def _AddEnumDescriptor(self, enum_desc): + """Adds an EnumDescriptor to the pool. + + This method also registers the FileDescriptor associated with the enum. + + Args: + enum_desc: An EnumDescriptor. + """ + + if not isinstance(enum_desc, descriptor.EnumDescriptor): + raise TypeError('Expected instance of descriptor.EnumDescriptor.') + + file_name = enum_desc.file.name + self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) + self._enum_descriptors[enum_desc.full_name] = enum_desc + + # Top enum values need to be indexed. + # Count the number of dots to see whether the enum is toplevel or nested + # in a message. We cannot use enum_desc.containing_type at this stage. + if enum_desc.file.package: + top_level = (enum_desc.full_name.count('.') + - enum_desc.file.package.count('.') == 1) + else: + top_level = enum_desc.full_name.count('.') == 0 + if top_level: + file_name = enum_desc.file.name + package = enum_desc.file.package + for enum_value in enum_desc.values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, enum_value.name))) + self._CheckConflictRegister(enum_value, full_name, file_name) + self._top_enum_values[full_name] = enum_value + self._AddFileDescriptor(enum_desc.file) + + # Add ServiceDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddServiceDescriptor(self, service_desc): + self._AddServiceDescriptor(service_desc) + + # Never call this method. It is for internal usage only. + def _AddServiceDescriptor(self, service_desc): + """Adds a ServiceDescriptor to the pool. + + Args: + service_desc: A ServiceDescriptor. + """ + + if not isinstance(service_desc, descriptor.ServiceDescriptor): + raise TypeError('Expected instance of descriptor.ServiceDescriptor.') + + self._CheckConflictRegister(service_desc, service_desc.full_name, + service_desc.file.name) + self._service_descriptors[service_desc.full_name] = service_desc + + # Add ExtensionDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddExtensionDescriptor(self, extension): + self._AddExtensionDescriptor(extension) + + # Never call this method. It is for internal usage only. + def _AddExtensionDescriptor(self, extension): + """Adds a FieldDescriptor describing an extension to the pool. + + Args: + extension: A FieldDescriptor. + + Raises: + AssertionError: when another extension with the same number extends the + same message. + TypeError: when the specified extension is not a + descriptor.FieldDescriptor. + """ + if not (isinstance(extension, descriptor.FieldDescriptor) and + extension.is_extension): + raise TypeError('Expected an extension descriptor.') + + if extension.extension_scope is None: + self._toplevel_extensions[extension.full_name] = extension + + try: + existing_desc = self._extensions_by_number[ + extension.containing_type][extension.number] + except KeyError: + pass + else: + if extension is not existing_desc: + raise AssertionError( + 'Extensions "%s" and "%s" both try to extend message type "%s" ' + 'with field number %d.' % + (extension.full_name, existing_desc.full_name, + extension.containing_type.full_name, extension.number)) + + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension + + # Also register MessageSet extensions with the type name. + if _IsMessageSetExtension(extension): + self._extensions_by_name[extension.containing_type][ + extension.message_type.full_name] = extension + + @_Deprecated + def AddFileDescriptor(self, file_desc): + self._InternalAddFileDescriptor(file_desc) + + # Never call this method. It is for internal usage only. + def _InternalAddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + self._AddFileDescriptor(file_desc) + # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. + # FieldDescriptor.file is added in code gen. Remove this solution after + # maybe 2020 for compatibility reason (with 3.4.1 only). + for extension in file_desc.extensions_by_name.values(): + self._file_desc_by_toplevel_extension[ + extension.full_name] = file_desc + + def _AddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + if not isinstance(file_desc, descriptor.FileDescriptor): + raise TypeError('Expected instance of descriptor.FileDescriptor.') + self._file_descriptors[file_desc.name] = file_desc + + def FindFileByName(self, file_name): + """Gets a FileDescriptor by file name. + + Args: + file_name (str): The path to the file to get a descriptor for. + + Returns: + FileDescriptor: The descriptor for the named file. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + try: + return self._file_descriptors[file_name] + except KeyError: + pass + + try: + file_proto = self._internal_db.FindFileByName(file_name) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file named %s' % file_name) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def FindFileContainingSymbol(self, symbol): + """Gets the FileDescriptor for the file containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + symbol = _NormalizeFullyQualifiedName(symbol) + try: + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + pass + + try: + # Try fallback database. Build and find again if possible. + self._FindFileContainingSymbolInDb(symbol) + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + raise KeyError('Cannot find a file containing %s' % symbol) + + def _InternalFindFileContainingSymbol(self, symbol): + """Gets the already built FileDescriptor containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + try: + return self._descriptors[symbol].file + except KeyError: + pass + + try: + return self._enum_descriptors[symbol].file + except KeyError: + pass + + try: + return self._service_descriptors[symbol].file + except KeyError: + pass + + try: + return self._top_enum_values[symbol].type.file + except KeyError: + pass + + try: + return self._file_desc_by_toplevel_extension[symbol] + except KeyError: + pass + + # Try fields, enum values and nested extensions inside a message. + top_name, _, sub_name = symbol.rpartition('.') + try: + message = self.FindMessageTypeByName(top_name) + assert (sub_name in message.extensions_by_name or + sub_name in message.fields_by_name or + sub_name in message.enum_values_by_name) + return message.file + except (KeyError, AssertionError): + raise KeyError('Cannot find a file containing %s' % symbol) + + def FindMessageTypeByName(self, full_name): + """Loads the named descriptor from the pool. + + Args: + full_name (str): The full name of the descriptor to load. + + Returns: + Descriptor: The descriptor for the named type. + + Raises: + KeyError: if the message cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._descriptors[full_name] + + def FindEnumTypeByName(self, full_name): + """Loads the named enum descriptor from the pool. + + Args: + full_name (str): The full name of the enum descriptor to load. + + Returns: + EnumDescriptor: The enum descriptor for the named type. + + Raises: + KeyError: if the enum cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._enum_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._enum_descriptors[full_name] + + def FindFieldByName(self, full_name): + """Loads the named field descriptor from the pool. + + Args: + full_name (str): The full name of the field descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named field. + + Raises: + KeyError: if the field cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, field_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.fields_by_name[field_name] + + def FindOneofByName(self, full_name): + """Loads the named oneof descriptor from the pool. + + Args: + full_name (str): The full name of the oneof descriptor to load. + + Returns: + OneofDescriptor: The oneof descriptor for the named oneof. + + Raises: + KeyError: if the oneof cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, oneof_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.oneofs_by_name[oneof_name] + + def FindExtensionByName(self, full_name): + """Loads the named extension descriptor from the pool. + + Args: + full_name (str): The full name of the extension descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named extension. + + Raises: + KeyError: if the extension cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + try: + # The proto compiler does not give any link between the FileDescriptor + # and top-level extensions unless the FileDescriptorProto is added to + # the DescriptorDatabase, but this can impact memory usage. + # So we registered these extensions by name explicitly. + return self._toplevel_extensions[full_name] + except KeyError: + pass + message_name, _, extension_name = full_name.rpartition('.') + try: + # Most extensions are nested inside a message. + scope = self.FindMessageTypeByName(message_name) + except KeyError: + # Some extensions are defined at file scope. + scope = self._FindFileContainingSymbolInDb(full_name) + return scope.extensions_by_name[extension_name] + + def FindExtensionByNumber(self, message_descriptor, number): + """Gets the extension of the specified message with the specified number. + + Extensions have to be registered to this pool by calling :func:`Add` or + :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): descriptor of the extended message. + number (int): Number of the extension field. + + Returns: + FieldDescriptor: The descriptor for the extension. + + Raises: + KeyError: when no extension with the given number is known for the + specified message. + """ + try: + return self._extensions_by_number[message_descriptor][number] + except KeyError: + self._TryLoadExtensionFromDB(message_descriptor, number) + return self._extensions_by_number[message_descriptor][number] + + def FindAllExtensions(self, message_descriptor): + """Gets all the known extensions of a given message. + + Extensions have to be registered to this pool by build related + :func:`Add` or :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): Descriptor of the extended message. + + Returns: + list[FieldDescriptor]: Field descriptors describing the extensions. + """ + # Fallback to descriptor db if FindAllExtensionNumbers is provided. + if self._descriptor_db and hasattr( + self._descriptor_db, 'FindAllExtensionNumbers'): + full_name = message_descriptor.full_name + all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) + for number in all_numbers: + if number in self._extensions_by_number[message_descriptor]: + continue + self._TryLoadExtensionFromDB(message_descriptor, number) + + return list(self._extensions_by_number[message_descriptor].values()) + + def _TryLoadExtensionFromDB(self, message_descriptor, number): + """Try to Load extensions from descriptor db. + + Args: + message_descriptor: descriptor of the extended message. + number: the extension number that needs to be loaded. + """ + if not self._descriptor_db: + return + # Only supported when FindFileContainingExtension is provided. + if not hasattr( + self._descriptor_db, 'FindFileContainingExtension'): + return + + full_name = message_descriptor.full_name + file_proto = self._descriptor_db.FindFileContainingExtension( + full_name, number) + + if file_proto is None: + return + + try: + self._ConvertFileProtoToFileDescriptor(file_proto) + except: + warn_msg = ('Unable to load proto file %s for extension number %d.' % + (file_proto.name, number)) + warnings.warn(warn_msg, RuntimeWarning) + + def FindServiceByName(self, full_name): + """Loads the named service descriptor from the pool. + + Args: + full_name (str): The full name of the service descriptor to load. + + Returns: + ServiceDescriptor: The service descriptor for the named service. + + Raises: + KeyError: if the service cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._service_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._service_descriptors[full_name] + + def FindMethodByName(self, full_name): + """Loads the named service method descriptor from the pool. + + Args: + full_name (str): The full name of the method descriptor to load. + + Returns: + MethodDescriptor: The method descriptor for the service method. + + Raises: + KeyError: if the method cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + service_name, _, method_name = full_name.rpartition('.') + service_descriptor = self.FindServiceByName(service_name) + return service_descriptor.methods_by_name[method_name] + + def _FindFileContainingSymbolInDb(self, symbol): + """Finds the file in descriptor DB containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: The file that contains the specified symbol. + + Raises: + KeyError: if the file cannot be found in the descriptor database. + """ + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file containing %s' % symbol) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def _ConvertFileProtoToFileDescriptor(self, file_proto): + """Creates a FileDescriptor from a proto or returns a cached copy. + + This method also has the side effect of loading all the symbols found in + the file into the appropriate dictionaries in the pool. + + Args: + file_proto: The proto to convert. + + Returns: + A FileDescriptor matching the passed in proto. + """ + if file_proto.name not in self._file_descriptors: + built_deps = list(self._GetDeps(file_proto.dependency)) + direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] + public_deps = [direct_deps[i] for i in file_proto.public_dependency] + + file_descriptor = descriptor.FileDescriptor( + pool=self, + name=file_proto.name, + package=file_proto.package, + syntax=file_proto.syntax, + options=_OptionsOrNone(file_proto), + serialized_pb=file_proto.SerializeToString(), + dependencies=direct_deps, + public_dependencies=public_deps, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope = {} + + # This loop extracts all the message and enum types from all the + # dependencies of the file_proto. This is necessary to create the + # scope of available message types when defining the passed in + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( + dependency.message_types_by_name.values())) + scope.update((_PrefixWithDot(enum.full_name), enum) + for enum in dependency.enum_types_by_name.values()) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( + message_type, file_proto.package, file_descriptor, scope, + file_proto.syntax) + file_descriptor.message_types_by_name[message_desc.name] = ( + message_desc) + + for enum_type in file_proto.enum_type: + file_descriptor.enum_types_by_name[enum_type.name] = ( + self._ConvertEnumDescriptor(enum_type, file_proto.package, + file_descriptor, None, scope, True)) + + for index, extension_proto in enumerate(file_proto.extension): + extension_desc = self._MakeFieldDescriptor( + extension_proto, file_proto.package, index, file_descriptor, + is_extension=True) + extension_desc.containing_type = self._GetTypeFromScope( + file_descriptor.package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, + file_descriptor.package, scope) + file_descriptor.extensions_by_name[extension_desc.name] = ( + extension_desc) + self._file_desc_by_toplevel_extension[extension_desc.full_name] = ( + file_descriptor) + + for desc_proto in file_proto.message_type: + self._SetAllFieldTypes(file_proto.package, desc_proto, scope) + + if file_proto.package: + desc_proto_prefix = _PrefixWithDot(file_proto.package) + else: + desc_proto_prefix = '' + + for desc_proto in file_proto.message_type: + desc = self._GetTypeFromScope( + desc_proto_prefix, desc_proto.name, scope) + file_descriptor.message_types_by_name[desc_proto.name] = desc + + for index, service_proto in enumerate(file_proto.service): + file_descriptor.services_by_name[service_proto.name] = ( + self._MakeServiceDescriptor(service_proto, index, scope, + file_proto.package, file_descriptor)) + + self._file_descriptors[file_proto.name] = file_descriptor + + # Add extensions to the pool + file_desc = self._file_descriptors[file_proto.name] + for extension in file_desc.extensions_by_name.values(): + self._AddExtensionDescriptor(extension) + for message_type in file_desc.message_types_by_name.values(): + for extension in message_type.extensions: + self._AddExtensionDescriptor(extension) + + return file_desc + + def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, + scope=None, syntax=None): + """Adds the proto to the pool in the specified package. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: The package the proto should be located in. + file_desc: The file containing this message. + scope: Dict mapping short and full symbols to message and enum types. + syntax: string indicating syntax of the file ("proto2" or "proto3") + + Returns: + The added descriptor. + """ + + if package: + desc_name = '.'.join((package, desc_proto.name)) + else: + desc_name = desc_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + if scope is None: + scope = {} + + nested = [ + self._ConvertMessageDescriptor( + nested, desc_name, file_desc, scope, syntax) + for nested in desc_proto.nested_type] + enums = [ + self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, + scope, False) + for enum in desc_proto.enum_type] + fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) + for index, field in enumerate(desc_proto.field)] + extensions = [ + self._MakeFieldDescriptor(extension, desc_name, index, file_desc, + is_extension=True) + for index, extension in enumerate(desc_proto.extension)] + oneofs = [ + # pylint: disable=g-complex-comprehension + descriptor.OneofDescriptor( + desc.name, + '.'.join((desc_name, desc.name)), + index, + None, + [], + _OptionsOrNone(desc), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for index, desc in enumerate(desc_proto.oneof_decl) + ] + extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] + if extension_ranges: + is_extendable = True + else: + is_extendable = False + desc = descriptor.Descriptor( + name=desc_proto.name, + full_name=desc_name, + filename=file_name, + containing_type=None, + fields=fields, + oneofs=oneofs, + nested_types=nested, + enum_types=enums, + extensions=extensions, + options=_OptionsOrNone(desc_proto), + is_extendable=is_extendable, + extension_ranges=extension_ranges, + file=file_desc, + serialized_start=None, + serialized_end=None, + syntax=syntax, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for nested in desc.nested_types: + nested.containing_type = desc + for enum in desc.enum_types: + enum.containing_type = desc + for field_index, field_desc in enumerate(desc_proto.field): + if field_desc.HasField('oneof_index'): + oneof_index = field_desc.oneof_index + oneofs[oneof_index].fields.append(fields[field_index]) + fields[field_index].containing_oneof = oneofs[oneof_index] + + scope[_PrefixWithDot(desc_name)] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._descriptors[desc_name] = desc + return desc + + def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, + containing_type=None, scope=None, top_level=False): + """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. + + Args: + enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the enum descriptor. + containing_type: The type containing this enum. + scope: Scope containing available types. + top_level: If True, the enum is a top level symbol. If False, the enum + is defined inside a message. + + Returns: + The added descriptor + """ + + if package: + enum_name = '.'.join((package, enum_proto.name)) + else: + enum_name = enum_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + values = [self._MakeEnumValueDescriptor(value, index) + for index, value in enumerate(enum_proto.value)] + desc = descriptor.EnumDescriptor(name=enum_proto.name, + full_name=enum_name, + filename=file_name, + file=file_desc, + values=values, + containing_type=containing_type, + options=_OptionsOrNone(enum_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope['.%s' % enum_name] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._enum_descriptors[enum_name] = desc + + # Add top level enum values. + if top_level: + for value in values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, value.name))) + self._CheckConflictRegister(value, full_name, file_name) + self._top_enum_values[full_name] = value + + return desc + + def _MakeFieldDescriptor(self, field_proto, message_name, index, + file_desc, is_extension=False): + """Creates a field descriptor from a FieldDescriptorProto. + + For message and enum type fields, this method will do a look up + in the pool for the appropriate descriptor for that type. If it + is unavailable, it will fall back to the _source function to + create it. If this type is still unavailable, construction will + fail. + + Args: + field_proto: The proto describing the field. + message_name: The name of the containing message. + index: Index of the field + file_desc: The file containing the field descriptor. + is_extension: Indication that this field is for an extension. + + Returns: + An initialized FieldDescriptor object + """ + + if message_name: + full_name = '.'.join((message_name, field_proto.name)) + else: + full_name = field_proto.name + + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + + return descriptor.FieldDescriptor( + name=field_proto.name, + full_name=full_name, + index=index, + number=field_proto.number, + type=field_proto.type, + cpp_type=None, + message_type=None, + enum_type=None, + containing_type=None, + label=field_proto.label, + has_default_value=False, + default_value=None, + is_extension=is_extension, + extension_scope=None, + options=_OptionsOrNone(field_proto), + json_name=json_name, + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _SetAllFieldTypes(self, package, desc_proto, scope): + """Sets all the descriptor's fields's types. + + This method also sets the containing types on any extensions. + + Args: + package: The current package of desc_proto. + desc_proto: The message descriptor to update. + scope: Enclosing scope of available types. + """ + + package = _PrefixWithDot(package) + + main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) + + if package == '.': + nested_package = _PrefixWithDot(desc_proto.name) + else: + nested_package = '.'.join([package, desc_proto.name]) + + for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( + zip(desc_proto.extension, main_desc.extensions)): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) + + for nested_type in desc_proto.nested_type: + self._SetAllFieldTypes(nested_package, nested_type, scope) + + def _SetFieldType(self, field_proto, field_desc, package, scope): + """Sets the field's type, cpp_type, message_type and enum_type. + + Args: + field_proto: Data about the field in proto format. + field_desc: The descriptor to modify. + package: The package the field's container is in. + scope: Enclosing scope of available types. + """ + if field_proto.type_name: + desc = self._GetTypeFromScope(package, field_proto.type_name, scope) + else: + desc = None + + if not field_proto.HasField('type'): + if isinstance(desc, descriptor.Descriptor): + field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE + else: + field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM + + field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( + field_proto.type) + + if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): + field_desc.message_type = desc + + if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.enum_type = desc + + if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: + field_desc.has_default_value = False + field_desc.default_value = [] + elif field_proto.HasField('default_value'): + field_desc.has_default_value = True + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = float(field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = field_proto.default_value + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = field_proto.default_value.lower() == 'true' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values_by_name[ + field_proto.default_value].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = text_encoding.CUnescape( + field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = int(field_proto.default_value) + else: + field_desc.has_default_value = False + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = u'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values[0].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = b'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = 0 + + field_desc.type = field_proto.type + + def _MakeEnumValueDescriptor(self, value_proto, index): + """Creates a enum value descriptor object from a enum value proto. + + Args: + value_proto: The proto describing the enum value. + index: The index of the enum value. + + Returns: + An initialized EnumValueDescriptor object. + """ + + return descriptor.EnumValueDescriptor( + name=value_proto.name, + index=index, + number=value_proto.number, + options=_OptionsOrNone(value_proto), + type=None, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _MakeServiceDescriptor(self, service_proto, service_index, scope, + package, file_desc): + """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. + + Args: + service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. + service_index: The index of the service in the File. + scope: Dict mapping short and full symbols to message and enum types. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the service descriptor. + + Returns: + The added descriptor. + """ + + if package: + service_name = '.'.join((package, service_proto.name)) + else: + service_name = service_proto.name + + methods = [self._MakeMethodDescriptor(method_proto, service_name, package, + scope, index) + for index, method_proto in enumerate(service_proto.method)] + desc = descriptor.ServiceDescriptor( + name=service_proto.name, + full_name=service_name, + index=service_index, + methods=methods, + options=_OptionsOrNone(service_proto), + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._service_descriptors[service_name] = desc + return desc + + def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, + index): + """Creates a method descriptor from a MethodDescriptorProto. + + Args: + method_proto: The proto describing the method. + service_name: The name of the containing service. + package: Optional package name to look up for types. + scope: Scope containing available types. + index: Index of the method in the service. + + Returns: + An initialized MethodDescriptor object. + """ + full_name = '.'.join((service_name, method_proto.name)) + input_type = self._GetTypeFromScope( + package, method_proto.input_type, scope) + output_type = self._GetTypeFromScope( + package, method_proto.output_type, scope) + return descriptor.MethodDescriptor( + name=method_proto.name, + full_name=full_name, + index=index, + containing_service=None, + input_type=input_type, + output_type=output_type, + client_streaming=method_proto.client_streaming, + server_streaming=method_proto.server_streaming, + options=_OptionsOrNone(method_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _ExtractSymbols(self, descriptors): + """Pulls out all the symbols from descriptor protos. + + Args: + descriptors: The messages to extract descriptors from. + Yields: + A two element tuple of the type name and descriptor object. + """ + + for desc in descriptors: + yield (_PrefixWithDot(desc.full_name), desc) + for symbol in self._ExtractSymbols(desc.nested_types): + yield symbol + for enum in desc.enum_types: + yield (_PrefixWithDot(enum.full_name), enum) + + def _GetDeps(self, dependencies, visited=None): + """Recursively finds dependencies for file protos. + + Args: + dependencies: The names of the files being depended on. + visited: The names of files already found. + + Yields: + Each direct and indirect dependency. + """ + + visited = visited or set() + for dependency in dependencies: + if dependency not in visited: + visited.add(dependency) + dep_desc = self.FindFileByName(dependency) + yield dep_desc + public_files = [d.name for d in dep_desc.public_dependencies] + yield from self._GetDeps(public_files, visited) + + def _GetTypeFromScope(self, package, type_name, scope): + """Finds a given type name in the current scope. + + Args: + package: The package the proto should be located in. + type_name: The name of the type to be found in the scope. + scope: Dict mapping short and full symbols to message and enum types. + + Returns: + The descriptor for the requested type. + """ + if type_name not in scope: + components = _PrefixWithDot(package).split('.') + while components: + possible_match = '.'.join(components + [type_name]) + if possible_match in scope: + type_name = possible_match + break + else: + components.pop(-1) + return scope[type_name] + + +def _PrefixWithDot(name): + return name if name.startswith('.') else '.%s' % name + + +if _USE_C_DESCRIPTORS: + # TODO(amauryfa): This pool could be constructed from Python code, when we + # support a flag like 'use_cpp_generated_pool=True'. + # pylint: disable=protected-access + _DEFAULT = descriptor._message.default_pool +else: + _DEFAULT = DescriptorPool() + + +def Default(): + return _DEFAULT diff --git a/MLPY/Lib/site-packages/google/protobuf/duration_pb2.py b/MLPY/Lib/site-packages/google/protobuf/duration_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ecc07bdf79992915c3c6dab5e9d8302b5ffb8d --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/duration_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/duration.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DURATION._serialized_start=51 + _DURATION._serialized_end=93 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/empty_pb2.py b/MLPY/Lib/site-packages/google/protobuf/empty_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..0b4d554db317b55bfdaf23f8f6359d836d52337a --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/empty_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/empty.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _EMPTY._serialized_start=48 + _EMPTY._serialized_end=55 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/field_mask_pb2.py b/MLPY/Lib/site-packages/google/protobuf/field_mask_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..80a4e96e598f1b88a4359dbaf7589c1490fed0be --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/field_mask_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/field_mask.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"\x1a\n\tFieldMask\x12\r\n\x05paths\x18\x01 \x03(\tB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _FIELDMASK._serialized_start=53 + _FIELDMASK._serialized_end=79 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__init__.py b/MLPY/Lib/site-packages/google/protobuf/internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9daa58b45eccc5c01f76ac068cbd95353975809 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/_parameterized.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/_parameterized.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c48e9f742e72599557f0e1160b37644039a162ca Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/_parameterized.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/api_implementation.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/api_implementation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e129f41e83e07dc035e0dd3293c014069a477b6 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/api_implementation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/builder.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9db4f9f266fb24f43141ecb7ca4eb6ed5763e6f7 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/builder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/containers.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/containers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dac354ceecdea60cad3e5a5550341e5900d64bc Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/containers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/decoder.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/decoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52d36f8fe10ebf9e806d199087738183257eb9ce Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/decoder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/encoder.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/encoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e69245b8caa0e35b7ecfbb09069932e206a2fdf7 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/encoder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/enum_type_wrapper.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/enum_type_wrapper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b3d94d01158737a788f07e4ead831bc8ddf4bad Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/enum_type_wrapper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/extension_dict.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/extension_dict.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b57d0d67dfa9a022f0260b3b41422869d149af84 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/extension_dict.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_listener.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_listener.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eea08a66952fd79b1e85db1a90b90049fa05490b Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_listener.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_set_extensions_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_set_extensions_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eebeffe90c6b63c57642cb62aa605318c222b731 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/message_set_extensions_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/missing_enum_values_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/missing_enum_values_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bf237deaa538c4595698ece1348c3d82c340882 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/missing_enum_values_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_dynamic_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_dynamic_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cf704cca7dc6697904970ec7673e8139787d1b6 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_dynamic_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e52a646abc2a2cc1359e9916b86ff872cb86880 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_extensions_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_messages_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_messages_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31d945da2735d425649f03921528b5162cea081f Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/more_messages_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/no_package_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/no_package_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e22e73d0b530aa2907e8ac17de1a542414eef194 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/no_package_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/python_message.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/python_message.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6cd925acae2033e37e6884839892b31c7f5f8d5 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/python_message.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/type_checkers.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/type_checkers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d01dd34bc8d854cf1ce70ae30605d81343f1a4c Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/type_checkers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/well_known_types.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/well_known_types.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d3c49171ef4746c2f6fc6e174a8cfb48e34d026 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/well_known_types.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/wire_format.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/wire_format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d7d24acc272a70cc3efd20ad98dd6c9dd60a0c8 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/__pycache__/wire_format.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/_api_implementation.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/google/protobuf/internal/_api_implementation.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8afe8fce88888a8a6257843dc13794c4d85f74ee Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/internal/_api_implementation.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/_parameterized.py b/MLPY/Lib/site-packages/google/protobuf/internal/_parameterized.py new file mode 100644 index 0000000000000000000000000000000000000000..647d2d99cec50074e19e134cf55832ad6a26a315 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/_parameterized.py @@ -0,0 +1,443 @@ +#! /usr/bin/env python +# +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters): + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name (which is modified internally) and the arguments +for the specific invocation, which are part of the string returned by +the shortDescription() method on test cases. + +The id method of the test, used internally by the unittest framework, +is also modified to show the arguments. To make sure that test names +stay the same across several invocations, object representations like + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into '<__main__.Foo>'. For even more descriptive names, +especially in test logs, you can use the named_parameters decorator. In +this case, only tuples are supported, and the first parameters has to +be a string (or an object that returns an apt name when converted via +str()): + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, strings.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated: + + @parameterized.parameters( + (1, 2, 3) + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg2, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ +If a test method takes only one argument, the single argument does not need to +be wrapped into a tuple: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) +""" + +__author__ = 'tmarek@google.com (Torsten Marek)' + +import functools +import re +import types +import unittest +import uuid + +try: + # Since python 3 + import collections.abc as collections_abc +except ImportError: + # Won't work after python 3.8 + import collections as collections_abc + +ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_SEPARATOR = uuid.uuid1().hex +_FIRST_ARG = object() +_ARGUMENT_REPR = object() + + +def _CleanRepr(obj): + return ADDR_RE.sub(r'<\1>', repr(obj)) + + +# Helper function formerly from the unittest module, removed from it in +# Python 2.7. +def _StrClass(cls): + return '%s.%s' % (cls.__module__, cls.__name__) + + +def _NonStringIterable(obj): + return (isinstance(obj, collections_abc.Iterable) and + not isinstance(obj, str)) + + +def _FormatParameterList(testcase_params): + if isinstance(testcase_params, collections_abc.Mapping): + return ', '.join('%s=%s' % (argname, _CleanRepr(value)) + for argname, value in testcase_params.items()) + elif _NonStringIterable(testcase_params): + return ', '.join(map(_CleanRepr, testcase_params)) + else: + return _FormatParameterList((testcase_params,)) + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter + tuples/dicts for individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def MakeBoundParamTest(testcase_params): + @functools.wraps(test_method) + def BoundParamTest(self): + if isinstance(testcase_params, collections_abc.Mapping): + test_method(self, **testcase_params) + elif _NonStringIterable(testcase_params): + test_method(self, *testcase_params) + else: + test_method(self, testcase_params) + + if naming_type is _FIRST_ARG: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + BoundParamTest.__x_use_name__ = True + BoundParamTest.__name__ += str(testcase_params[0]) + testcase_params = testcase_params[1:] + elif naming_type is _ARGUMENT_REPR: + # __x_extra_id__ is used to pass naming information to the __new__ + # method of TestGeneratorMetaclass. + # The metaclass will make sure to create a unique, but nondescriptive + # name for this test. + BoundParamTest.__x_extra_id__ = '(%s)' % ( + _FormatParameterList(testcase_params),) + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + BoundParamTest.__doc__ = '%s(%s)' % ( + BoundParamTest.__name__, _FormatParameterList(testcase_params)) + if test_method.__doc__: + BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,) + return BoundParamTest + return (MakeBoundParamTest(c) for c in self.testcases) + + +def _IsSingletonList(testcases): + """True iff testcases contains only a single non-tuple element.""" + return len(testcases) == 1 and not isinstance(testcases[0], tuple) + + +def _ModifyClass(class_object, testcases, naming_type): + assert not getattr(class_object, '_id_suffix', None), ( + 'Cannot add parameters to %s,' + ' which already has parameterized methods.' % (class_object,)) + class_object._id_suffix = id_suffix = {} + # We change the size of __dict__ while we iterate over it, + # which Python 3.x will complain about, so use copy(). + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _UpdateClassDictForParamTestCase( + methods, id_suffix, name, + _ParameterizedTestIter(obj, testcases, naming_type)) + for name, meth in methods.items(): + setattr(class_object, name, meth) + + +def _ParameterDecorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Returns: + A function for modifying the decorated object. + """ + def _Apply(obj): + if isinstance(obj, type): + _ModifyClass( + obj, + list(testcases) if not isinstance(testcases, collections_abc.Sequence) + else testcases, + naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if _IsSingletonList(testcases): + assert _NonStringIterable(testcases[0]), ( + 'Single parameter argument must be a non-string iterable') + testcases = testcases[0] + + return _Apply + + +def parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests + with only one argument). + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): # pylint: disable=invalid-name + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. The first element of + each parameter tuple should be a string and will be appended to the + name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _ParameterDecorator(_FIRST_ARG, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for test cases with test generators. + + A test generator is an iterable in a testcase that produces callables. These + callables must be single-argument methods. These methods are injected into + the class namespace and the original iterable is removed. If the name of the + iterable conforms to the test pattern, the injected methods will be picked + up as tests by the unittest framework. + + In general, it is supposed to be used in conjunction with the + parameters decorator. + """ + + def __new__(mcs, class_name, bases, dct): + dct['_id_suffix'] = id_suffix = {} + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _NonStringIterable(obj)): + iterator = iter(obj) + dct.pop(name) + _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator) + + return type.__new__(mcs, class_name, bases, dct) + + +def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + dct: The target dictionary. + id_suffix: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if getattr(func, '__x_use_name__', False): + new_name = func.__name__ + else: + new_name = '%s%s%d' % (name, _SEPARATOR, idx) + assert new_name not in dct, ( + 'Name of parameterized test case "%s" not unique' % (new_name,)) + dct[new_name] = func + id_suffix[new_name] = getattr(func, '__x_extra_id__', '') + + +class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + def _OriginalName(self): + return self._testMethodName.split(_SEPARATOR)[0] + + def __str__(self): + return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__)) + + def id(self): # pylint: disable=invalid-name + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + return '%s.%s%s' % (_StrClass(self.__class__), + self._OriginalName(), + self._id_suffix.get(self._testMethodName, '')) + + +def CoopTestCase(other_base_class): + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + mox.MoxTestBase. + + Only works with metaclasses that do not override type.__new__. + + Example: + + import google3 + import mox + + from google3.testing.pybase import parameterized + + class ExampleTest(parameterized.CoopTestCase(mox.MoxTestBase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + metaclass = type( + 'CoopMetaclass', + (other_base_class.__metaclass__, + TestGeneratorMetaclass), {}) + return metaclass( + 'CoopTestCase', + (other_base_class, TestCase), {}) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/api_implementation.py b/MLPY/Lib/site-packages/google/protobuf/internal/api_implementation.py new file mode 100644 index 0000000000000000000000000000000000000000..401951eac97dcfd86d3ffb073520484932e5c2bb --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/api_implementation.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Determine which implementation of the protobuf API is used in this process. +""" + +import os +import sys +import warnings + +try: + # pylint: disable=g-import-not-at-top + from google.protobuf.internal import _api_implementation + # The compile-time constants in the _api_implementation module can be used to + # switch to a certain implementation of the Python API at build time. + _api_version = _api_implementation.api_version +except ImportError: + _api_version = -1 # Unspecified by compiler flags. + +if _api_version == 1: + raise ValueError('api_version=1 is no longer supported.') + + +_default_implementation_type = ('cpp' if _api_version > 0 else 'python') + + +# This environment variable can be used to switch to a certain implementation +# of the Python API, overriding the compile-time constants in the +# _api_implementation module. Right now only 'python' and 'cpp' are valid +# values. Any other value will be ignored. +_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', + _default_implementation_type) + +if _implementation_type != 'python': + _implementation_type = 'cpp' + +if 'PyPy' in sys.version and _implementation_type == 'cpp': + warnings.warn('PyPy does not work yet with cpp protocol buffers. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + + +# Detect if serialization should be deterministic by default +try: + # The presence of this module in a build allows the proto implementation to + # be upgraded merely via build deps. + # + # NOTE: Merely importing this automatically enables deterministic proto + # serialization for C++ code, but we still need to export it as a boolean so + # that we can do the same for `_implementation_type == 'python'`. + # + # NOTE2: It is possible for C++ code to enable deterministic serialization by + # default _without_ affecting Python code, if the C++ implementation is not in + # use by this module. That is intended behavior, so we don't actually expose + # this boolean outside of this module. + # + # pylint: disable=g-import-not-at-top,unused-import + from google.protobuf import enable_deterministic_proto_serialization + _python_deterministic_proto_serialization = True +except ImportError: + _python_deterministic_proto_serialization = False + + +# Usage of this function is discouraged. Clients shouldn't care which +# implementation of the API is in use. Note that there is no guarantee +# that differences between APIs will be maintained. +# Please don't use this function if possible. +def Type(): + return _implementation_type + + +def _SetType(implementation_type): + """Never use! Only for protobuf benchmark.""" + global _implementation_type + _implementation_type = implementation_type + + +# See comment on 'Type' above. +def Version(): + return 2 + + +# For internal use only +def IsPythonDefaultSerializationDeterministic(): + return _python_deterministic_proto_serialization diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/builder.py b/MLPY/Lib/site-packages/google/protobuf/internal/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f7fb537a7f76155ceb522cbf0842403cd3baa77e --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/builder.py @@ -0,0 +1,130 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Builds descriptors, message classes and services for generated _pb2.py. + +This file is only called in python generated _pb2.py files. It builds +descriptors, message classes and services that users can directly use +in generated code. +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +_sym_db = _symbol_database.Default() + + +def BuildMessageAndEnumDescriptors(file_des, module): + """Builds message and enum descriptors. + + Args: + file_des: FileDescriptor of the .proto file + module: Generated _pb2 module + """ + + def BuildNestedDescriptors(msg_des, prefix): + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + module_name = prefix + name.upper() + module[module_name] = nested_msg + BuildNestedDescriptors(nested_msg, module_name + '_') + for enum_des in msg_des.enum_types: + module[prefix + enum_des.name.upper()] = enum_des + + for (name, msg_des) in file_des.message_types_by_name.items(): + module_name = '_' + name.upper() + module[module_name] = msg_des + BuildNestedDescriptors(msg_des, module_name + '_') + + +def BuildTopDescriptorsAndMessages(file_des, module_name, module): + """Builds top level descriptors and message classes. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + + def BuildMessage(msg_des): + create_dict = {} + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + create_dict[name] = BuildMessage(nested_msg) + create_dict['DESCRIPTOR'] = msg_des + create_dict['__module__'] = module_name + message_class = _reflection.GeneratedProtocolMessageType( + msg_des.name, (_message.Message,), create_dict) + _sym_db.RegisterMessage(message_class) + return message_class + + # top level enums + for (name, enum_des) in file_des.enum_types_by_name.items(): + module['_' + name.upper()] = enum_des + module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des) + for enum_value in enum_des.values: + module[enum_value.name] = enum_value.number + + # top level extensions + for (name, extension_des) in file_des.extensions_by_name.items(): + module[name.upper() + '_FIELD_NUMBER'] = extension_des.number + module[name] = extension_des + + # services + for (name, service) in file_des.services_by_name.items(): + module['_' + name.upper()] = service + + # Build messages. + for (name, msg_des) in file_des.message_types_by_name.items(): + module[name] = BuildMessage(msg_des) + + +def BuildServices(file_des, module_name, module): + """Builds services classes and services stub class. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import service as _service + from google.protobuf import service_reflection + # pylint: enable=g-import-not-at-top + for (name, service) in file_des.services_by_name.items(): + module[name] = service_reflection.GeneratedServiceType( + name, (_service.Service,), + dict(DESCRIPTOR=service, __module__=module_name)) + stub_name = name + '_Stub' + module[stub_name] = service_reflection.GeneratedServiceStubType( + stub_name, (module[name],), + dict(DESCRIPTOR=service, __module__=module_name)) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/containers.py b/MLPY/Lib/site-packages/google/protobuf/internal/containers.py new file mode 100644 index 0000000000000000000000000000000000000000..99d0d6fb994bfb8d1e27528444bf0a3e97a7aecf --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/containers.py @@ -0,0 +1,710 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains container classes to represent different protocol buffer types. + +This file defines container classes which represent categories of protocol +buffer field types which need extra maintenance. Currently these categories +are: + +- Repeated scalar fields - These are all repeated fields which aren't + composite (e.g. they are of simple types like int32, string, etc). +- Repeated composite fields - Repeated fields which are composite. This + includes groups and nested messages. +""" + +import collections.abc +import copy +import pickle +from typing import ( + Any, + Iterable, + Iterator, + List, + MutableMapping, + MutableSequence, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + overload, +) + + +_T = TypeVar('_T') +_K = TypeVar('_K') +_V = TypeVar('_V') + + +class BaseContainer(Sequence[_T]): + """Base container class.""" + + # Minimizes memory usage and disallows assignment to other attributes. + __slots__ = ['_message_listener', '_values'] + + def __init__(self, message_listener: Any) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The RepeatedScalarFieldContainer will call this object's + Modified() method when it is modified. + """ + self._message_listener = message_listener + self._values = [] + + @overload + def __getitem__(self, key: int) -> _T: + ... + + @overload + def __getitem__(self, key: slice) -> List[_T]: + ... + + def __getitem__(self, key): + """Retrieves item by the specified key.""" + return self._values[key] + + def __len__(self) -> int: + """Returns the number of elements in the container.""" + return len(self._values) + + def __ne__(self, other: Any) -> bool: + """Checks if another instance isn't equal to this one.""" + # The concrete classes should define __eq__. + return not self == other + + __hash__ = None + + def __repr__(self) -> str: + return repr(self._values) + + def sort(self, *args, **kwargs) -> None: + # Continue to support the old sort_function keyword argument. + # This is expected to be a rare occurrence, so use LBYL to avoid + # the overhead of actually catching KeyError. + if 'sort_function' in kwargs: + kwargs['cmp'] = kwargs.pop('sort_function') + self._values.sort(*args, **kwargs) + + def reverse(self) -> None: + self._values.reverse() + + +# TODO(slebedev): Remove this. BaseContainer does *not* conform to +# MutableSequence, only its subclasses do. +collections.abc.MutableSequence.register(BaseContainer) + + +class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, type-checked, list-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_type_checker'] + + def __init__( + self, + message_listener: Any, + type_checker: Any, + ) -> None: + """Args: + + message_listener: A MessageListener implementation. The + RepeatedScalarFieldContainer will call this object's Modified() method + when it is modified. + type_checker: A type_checkers.ValueChecker instance to run on elements + inserted into this container. + """ + super().__init__(message_listener) + self._type_checker = type_checker + + def append(self, value: _T) -> None: + """Appends an item to the list. Similar to list.append().""" + self._values.append(self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position. Similar to list.insert().""" + self._values.insert(key, self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given iterable. Similar to list.extend().""" + if elem_seq is None: + return + try: + elem_seq_iter = iter(elem_seq) + except TypeError: + if not elem_seq: + # silently ignore falsy inputs :-/. + # TODO(ptucker): Deprecate this behavior. b/18413862 + return + raise + + new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter] + if new_values: + self._values.extend(new_values) + self._message_listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one. We do not check the types of the individual fields. + """ + self._values.extend(other) + self._message_listener.Modified() + + def remove(self, elem: _T): + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value) -> None: + """Sets the item on the specified position.""" + if isinstance(key, slice): + if key.step is not None: + raise ValueError('Extended slices not supported') + self._values[key] = map(self._type_checker.CheckValue, value) + self._message_listener.Modified() + else: + self._values[key] = self._type_checker.CheckValue(value) + self._message_listener.Modified() + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + # Special case for the same type which should be common and fast. + if isinstance(other, self.__class__): + return other._values == self._values + # We are presumably comparing against some other sequence type. + return other == self._values + + def __deepcopy__( + self, + unused_memo: Any = None, + ) -> 'RepeatedScalarFieldContainer[_T]': + clone = RepeatedScalarFieldContainer( + copy.deepcopy(self._message_listener), self._type_checker) + clone.MergeFrom(self) + return clone + + def __reduce__(self, **kwargs) -> NoReturn: + raise pickle.PickleError( + "Can't pickle repeated scalar fields, convert to list first") + + +# TODO(slebedev): Constrain T to be a subtype of Message. +class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, list-like container for holding repeated composite fields.""" + + # Disallows assignment to other attributes. + __slots__ = ['_message_descriptor'] + + def __init__(self, message_listener: Any, message_descriptor: Any) -> None: + """ + Note that we pass in a descriptor instead of the generated directly, + since at the time we construct a _RepeatedCompositeFieldContainer we + haven't yet necessarily initialized the type that will be contained in the + container. + + Args: + message_listener: A MessageListener implementation. + The RepeatedCompositeFieldContainer will call this object's + Modified() method when it is modified. + message_descriptor: A Descriptor instance describing the protocol type + that should be present in this container. We'll use the + _concrete_class field of this descriptor when the client calls add(). + """ + super().__init__(message_listener) + self._message_descriptor = message_descriptor + + def add(self, **kwargs: Any) -> _T: + """Adds a new element at the end of the list and returns it. Keyword + arguments may be used to initialize the element. + """ + new_element = self._message_descriptor._concrete_class(**kwargs) + new_element._SetListener(self._message_listener) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + return new_element + + def append(self, value: _T) -> None: + """Appends one element by copying the message.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position by copying.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.insert(key, new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given sequence of elements of the same type + + as this one, copying each individual message. + """ + message_class = self._message_descriptor._concrete_class + listener = self._message_listener + values = self._values + for message in elem_seq: + new_element = message_class() + new_element._SetListener(listener) + new_element.MergeFrom(message) + values.append(new_element) + listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one, copying each individual message. + """ + self.extend(other) + + def remove(self, elem: _T) -> None: + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value): + # This method is implemented to make RepeatedCompositeFieldContainer + # structurally compatible with typing.MutableSequence. It is + # otherwise unsupported and will always raise an error. + raise TypeError( + f'{self.__class__.__name__} object does not support item assignment') + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + if not isinstance(other, self.__class__): + raise TypeError('Can only compare repeated composite fields against ' + 'other repeated composite fields.') + return self._values == other._values + + +class ScalarMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener', + '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + key_checker: Any, + value_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._key_checker = key_checker + self._value_checker = value_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + try: + return self._values[key] + except KeyError: + key = self._key_checker.CheckValue(key) + val = self._value_checker.DefaultValue() + self._values[key] = val + return val + + def __contains__(self, item: _K) -> bool: + # We check the key's type to match the strong-typing flavor of the API. + # Also this makes it easier to match the behavior of the C++ implementation. + self._key_checker.CheckValue(item) + return item in self._values + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __setitem__(self, key: _K, value: _V) -> _T: + checked_key = self._key_checker.CheckValue(key) + checked_value = self._value_checker.CheckValue(value) + self._values[checked_key] = checked_value + self._message_listener.Modified() + + def __delitem__(self, key: _K) -> None: + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None: + self._values.update(other._values) + self._message_listener.Modified() + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class MessageMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for with submessage values.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_values', '_message_listener', + '_message_descriptor', '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + message_descriptor: Any, + key_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._message_descriptor = message_descriptor + self._key_checker = key_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + key = self._key_checker.CheckValue(key) + try: + return self._values[key] + except KeyError: + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + self._values[key] = new_element + self._message_listener.Modified() + return new_element + + def get_or_create(self, key: _K) -> _V: + """get_or_create() is an alias for getitem (ie. map[key]). + + Args: + key: The key to get or create in the map. + + This is useful in cases where you want to be explicit that the call is + mutating the map. This can avoid lint errors for statements like this + that otherwise would appear to be pointless statements: + + msg.my_map[key] + """ + return self[key] + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __contains__(self, item: _K) -> bool: + item = self._key_checker.CheckValue(item) + return item in self._values + + def __setitem__(self, key: _K, value: _V) -> NoReturn: + raise ValueError('May not set values directly, call my_map[key].foo = 5') + + def __delitem__(self, key: _K) -> None: + key = self._key_checker.CheckValue(key) + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None: + # pylint: disable=protected-access + for key in other._values: + # According to documentation: "When parsing from the wire or when merging, + # if there are duplicate map keys the last key seen is used". + if key in self: + del self[key] + self[key].CopyFrom(other[key]) + # self._message_listener.Modified() not required here, because + # mutations to submessages already propagate. + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class _UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + def __lt__(self, other): + # pylint: disable=protected-access + return self._field_number < other._field_number + + def __eq__(self, other): + if self is other: + return True + # pylint: disable=protected-access + return (self._field_number == other._field_number and + self._wire_type == other._wire_type and + self._data == other._data) + + +class UnknownFieldRef: # pylint: disable=missing-class-docstring + + def __init__(self, parent, index): + self._parent = parent + self._index = index + + def _check_valid(self): + if not self._parent: + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + if self._index >= len(self._parent): + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + + @property + def field_number(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._field_number + + @property + def wire_type(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._wire_type + + @property + def data(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._data + + +class UnknownFieldSet: + """UnknownField container""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self): + self._values = [] + + def __getitem__(self, index): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return UnknownFieldRef(self, index) + + def _internal_get(self, index): + return self._values[index] + + def __len__(self): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + return len(self._values) + + def _add(self, field_number, wire_type, data): + unknown_field = _UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + return unknown_field + + def __iter__(self): + for i in range(len(self)): + yield UnknownFieldRef(self, i) + + def _extend(self, other): + if other is None: + return + # pylint: disable=protected-access + self._values.extend(other._values) + + def __eq__(self, other): + if self is other: + return True + # Sort unknown fields because their order shouldn't + # affect equality test. + values = list(self._values) + if other is None: + return not values + values.sort() + # pylint: disable=protected-access + other_values = sorted(other._values) + return values == other_values + + def _clear(self): + for value in self._values: + # pylint: disable=protected-access + if isinstance(value._data, UnknownFieldSet): + value._data._clear() # pylint: disable=protected-access + self._values = None diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/decoder.py b/MLPY/Lib/site-packages/google/protobuf/internal/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c0cb6ef2b2659e7ae1df20e535c4e50ef902d84e --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/decoder.py @@ -0,0 +1,1029 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Code for decoding protocol buffer primitives. + +This code is very similar to encoder.py -- read the docs for that module first. + +A "decoder" is a function with the signature: + Decode(buffer, pos, end, message, field_dict) +The arguments are: + buffer: The string containing the encoded message. + pos: The current position in the string. + end: The position in the string where the current message ends. May be + less than len(buffer) if we're reading a sub-message. + message: The message object into which we're parsing. + field_dict: message._fields (avoids a hashtable lookup). +The decoder reads the field and stores it into field_dict, returning the new +buffer position. A decoder for a repeated field may proactively decode all of +the elements of that field, if they appear consecutively. + +Note that decoders may throw any of the following: + IndexError: Indicates a truncated message. + struct.error: Unpacking of a fixed-width field failed. + message.DecodeError: Other errors. + +Decoders are expected to raise an exception if they are called with pos > end. +This allows callers to be lax about bounds checking: it's fineto read past +"end" as long as you are sure that someone else will notice and throw an +exception later on. + +Something up the call stack is expected to catch IndexError and struct.error +and convert them to message.DecodeError. + +Decoders are constructed using decoder constructors with the signature: + MakeDecoder(field_number, is_repeated, is_packed, key, new_default) +The arguments are: + field_number: The field number of the field we want to decode. + is_repeated: Is the field a repeated field? (bool) + is_packed: Is the field a packed field? (bool) + key: The key to use when looking up the field within field_dict. + (This is actually the FieldDescriptor but nothing in this + file should depend on that.) + new_default: A function which takes a message object as a parameter and + returns a new instance of the default value for this field. + (This is called for repeated fields and sub-messages, when an + instance does not already exist.) + +As with encoders, we define a decoder constructor for every type of field. +Then, for every field of every message class we construct an actual decoder. +That decoder goes into a dict indexed by tag, so when we decode a message +we repeatedly read a tag, look up the corresponding decoder, and invoke it. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import math +import struct + +from google.protobuf.internal import containers +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import message + + +# This is not for optimization, but rather to avoid conflicts with local +# variables named "message". +_DecodeError = message.DecodeError + + +def _VarintDecoder(mask, result_type): + """Return an encoder for a basic varint value (does not include tag). + + Decoded values will be bitwise-anded with the given mask before being + returned, e.g. to limit them to 32 bits. The returned decoder does not + take the usual "end" parameter -- the caller is expected to do bounds checking + after the fact (often the caller can defer such checking until later). The + decoder returns a (value, new_pos) pair. + """ + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + + +def _SignedVarintDecoder(bits, result_type): + """Like _VarintDecoder() but decodes signed values.""" + + signbit = 1 << (bits - 1) + mask = (1 << bits) - 1 + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = (result ^ signbit) - signbit + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + +# All 32-bit and 64-bit values are represented as int. +_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +_DecodeSignedVarint = _SignedVarintDecoder(64, int) + +# Use these versions for values which must be limited to 32 bits. +_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +_DecodeSignedVarint32 = _SignedVarintDecoder(32, int) + + +def ReadTag(buffer, pos): + """Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple. + + We return the raw bytes of the tag rather than decoding them. The raw + bytes can then be used to look up the proper decoder. This effectively allows + us to trade some work that would be done in pure-python (decoding a varint) + for work that is done in C (searching for a byte string in a hash table). + In a low-level language it would be much cheaper to decode the varint and + use that, but not in Python. + + Args: + buffer: memoryview object of the encoded bytes + pos: int of the current position to start from + + Returns: + Tuple[bytes, int] of the tag data and new position. + """ + start = pos + while buffer[pos] & 0x80: + pos += 1 + pos += 1 + + tag_bytes = buffer[start:pos].tobytes() + return tag_bytes, pos + + +# -------------------------------------------------------------------- + + +def _SimpleDecoder(wire_type, decode_value): + """Return a constructor for a decoder for fields of a particular type. + + Args: + wire_type: The field's wire type. + decode_value: A function which decodes an individual value, e.g. + _DecodeVarint() + """ + + def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + (element, pos) = decode_value(buffer, pos) + value.append(element) + if pos > endpoint: + del value[-1] # Discard corrupt value. + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_type) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = decode_value(buffer, pos) + value.append(element) + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (new_value, pos) = decode_value(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not new_value: + field_dict.pop(key, None) + else: + field_dict[key] = new_value + return pos + return DecodeField + + return SpecificDecoder + + +def _ModifiedDecoder(wire_type, decode_value, modify_value): + """Like SimpleDecoder but additionally invokes modify_value on every value + before storing it. Usually modify_value is ZigZagDecode. + """ + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + def InnerDecode(buffer, pos): + (result, new_pos) = decode_value(buffer, pos) + return (modify_value(result), new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _StructPackDecoder(wire_type, format): + """Return a constructor for a decoder for a fixed-width field. + + Args: + wire_type: The field's wire type. + format: The format string to pass to struct.unpack(). + """ + + value_size = struct.calcsize(format) + local_unpack = struct.unpack + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + + def InnerDecode(buffer, pos): + new_pos = pos + value_size + result = local_unpack(format, buffer[pos:new_pos])[0] + return (result, new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _FloatDecoder(): + """Returns a decoder for a float field. + + This code works around a bug in struct.unpack for non-finite 32-bit + floating-point values. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized float to a float and new position. + + Args: + buffer: memoryview of the serialized bytes + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the deserialized float value and new position + in the serialized data. + """ + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. + new_pos = pos + 4 + float_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. + if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... + if float_bytes[0:3] != b'\x00\x00\x80': + return (math.nan, new_pos) + # If sign bit is set... + if float_bytes[3:4] == b'\xFF': + return (-math.inf, new_pos) + return (math.inf, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('= b'\xF0') + and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (math.nan, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack(' end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + value_start_pos = pos + (element, pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + if pos > endpoint: + if element in enum_type.values_by_number: + del value[-1] # Discard corrupt value. + else: + del message._unknown_fields[-1] + # pylint: disable=protected-access + del message._unknown_field_set._values[-1] + # pylint: enable=protected-access + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (tag_bytes, buffer[pos:new_pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value_start_pos = pos + (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not enum_value: + field_dict.pop(key, None) + return pos + # pylint: disable=protected-access + if enum_value in enum_type.values_by_number: + field_dict[key] = enum_value + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, enum_value) + # pylint: enable=protected-access + return pos + return DecodeField + + +# -------------------------------------------------------------------- + + +Int32Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) + +Int64Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) + +UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) +UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) + +SInt32Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) +SInt64Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, ' end: + raise _DecodeError('Truncated string.') + value.append(_ConvertToUnicode(buffer[pos:new_pos])) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) + return new_pos + return DecodeField + + +def BytesDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a bytes field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(buffer[pos:new_pos].tobytes()) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = buffer[pos:new_pos].tobytes() + return new_pos + return DecodeField + + +def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a group field.""" + + end_tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_END_GROUP) + end_tag_len = len(end_tag_bytes) + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_START_GROUP) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value.add()._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + return new_pos + return DecodeField + + +def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a message field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + return new_pos + return DecodeField + + +# -------------------------------------------------------------------- + +MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) + +def MessageSetItemDecoder(descriptor): + """Returns a decoder for a MessageSet item. + + The parameter is the message Descriptor. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + local_ReadTag = ReadTag + local_DecodeVarint = _DecodeVarint + local_SkipField = SkipField + + def DecodeItem(buffer, pos, end, message, field_dict): + """Decode serialized message set to its value and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + message_set_item_start = pos + type_id = -1 + message_start = -1 + message_end = -1 + + # Technically, type_id and message can appear in any order, so we need + # a little loop here. + while 1: + (tag_bytes, pos) = local_ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = local_DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = local_DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + extension = message.Extensions._FindExtensionByNumber(type_id) + # pylint: disable=protected-access + if extension is not None: + value = field_dict.get(extension) + if value is None: + message_type = extension.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + message._FACTORY.GetPrototype(message_type) + value = field_dict.setdefault( + extension, message_type._concrete_class()) + if value._InternalParse(buffer, message_start,message_end) != message_end: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + type_id, + wire_format.WIRETYPE_LENGTH_DELIMITED, + buffer[message_start:message_end].tobytes()) + # pylint: enable=protected-access + + return pos + + return DecodeItem + +# -------------------------------------------------------------------- + +def MapDecoder(field_descriptor, new_default, is_message_map): + """Returns a decoder for a map field.""" + + key = field_descriptor + tag_bytes = encoder.TagBytes(field_descriptor.number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + local_DecodeVarint = _DecodeVarint + # Can't read _concrete_class yet; might not be initialized. + message_type = field_descriptor.message_type + + def DecodeMap(buffer, pos, end, message, field_dict): + submsg = message_type._concrete_class() + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + submsg.Clear() + if submsg._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + + if is_message_map: + value[submsg.key].CopyFrom(submsg.value) + else: + value[submsg.key] = submsg.value + + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + + return DecodeMap + +# -------------------------------------------------------------------- +# Optimization is not as heavy here because calls to SkipField() are rare, +# except for handling end-group tags. + +def _SkipVarint(buffer, pos, end): + """Skip a varint value. Returns the new position.""" + # Previously ord(buffer[pos]) raised IndexError when pos is out of range. + # With this code, ord(b'') raises TypeError. Both are handled in + # python_message.py to generate a 'Truncated message' error. + while ord(buffer[pos:pos+1].tobytes()) & 0x80: + pos += 1 + pos += 1 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + +def _SkipFixed64(buffer, pos, end): + """Skip a fixed64 value. Returns the new position.""" + + pos += 8 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed64(buffer, pos): + """Decode a fixed64.""" + new_pos = pos + 8 + return (struct.unpack(' end: + raise _DecodeError('Truncated message.') + return pos + + +def _SkipGroup(buffer, pos, end): + """Skip sub-group. Returns the new position.""" + + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + new_pos = SkipField(buffer, pos, end, tag_bytes) + if new_pos == -1: + return pos + pos = new_pos + + +def _DecodeUnknownFieldSet(buffer, pos, end_pos=None): + """Decode UnknownFieldSet. Returns the UnknownFieldSet and new position.""" + + unknown_field_set = containers.UnknownFieldSet() + while end_pos is None or pos < end_pos: + (tag_bytes, pos) = ReadTag(buffer, pos) + (tag, _) = _DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if wire_type == wire_format.WIRETYPE_END_GROUP: + break + (data, pos) = _DecodeUnknownField(buffer, pos, wire_type) + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + + return (unknown_field_set, pos) + + +def _DecodeUnknownField(buffer, pos, wire_type): + """Decode a unknown field. Returns the UnknownField and new position.""" + + if wire_type == wire_format.WIRETYPE_VARINT: + (data, pos) = _DecodeVarint(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED64: + (data, pos) = _DecodeFixed64(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED32: + (data, pos) = _DecodeFixed32(buffer, pos) + elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: + (size, pos) = _DecodeVarint(buffer, pos) + data = buffer[pos:pos+size].tobytes() + pos += size + elif wire_type == wire_format.WIRETYPE_START_GROUP: + (data, pos) = _DecodeUnknownFieldSet(buffer, pos) + elif wire_type == wire_format.WIRETYPE_END_GROUP: + return (0, -1) + else: + raise _DecodeError('Wrong wire type in tag.') + + return (data, pos) + + +def _EndGroup(buffer, pos, end): + """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" + + return -1 + + +def _SkipFixed32(buffer, pos, end): + """Skip a fixed32 value. Returns the new position.""" + + pos += 4 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed32(buffer, pos): + """Decode a fixed32.""" + + new_pos = pos + 4 + return (struct.unpack('B').pack + + def EncodeVarint(write, value, unused_deterministic=None): + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeVarint + + +def _SignedVarintEncoder(): + """Return an encoder for a basic signed varint value (does not include + tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeSignedVarint(write, value, unused_deterministic=None): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeSignedVarint + + +_EncodeVarint = _VarintEncoder() +_EncodeSignedVarint = _SignedVarintEncoder() + + +def _VarintBytes(value): + """Encode the given integer as a varint and return the bytes. This is only + called at startup time so it doesn't need to be fast.""" + + pieces = [] + _EncodeVarint(pieces.append, value, True) + return b"".join(pieces) + + +def TagBytes(field_number, wire_type): + """Encode the given tag and return the bytes. Only called at startup.""" + + return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type))) + +# -------------------------------------------------------------------- +# As with sizers (see above), we have a number of common encoder +# implementations. + + +def _SimpleEncoder(wire_type, encode_value, compute_value_size): + """Return a constructor for an encoder for fields of a particular type. + + Args: + wire_type: The field's wire type, for encoding tags. + encode_value: A function which encodes an individual value, e.g. + _EncodeVarint(). + compute_value_size: A function which computes the size of an individual + value, e.g. _VarintSize(). + """ + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(element) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, element, deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, element, deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, value, deterministic) + return EncodeField + + return SpecificEncoder + + +def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): + """Like SimpleEncoder but additionally invokes modify_value on every value + before passing it to encode_value. Usually modify_value is ZigZagEncode.""" + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(modify_value(element)) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, modify_value(element), deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, modify_value(element), deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, modify_value(value), deterministic) + return EncodeField + + return SpecificEncoder + + +def _StructPackEncoder(wire_type, format): + """Return a constructor for an encoder for a fixed-width field. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + write(local_struct_pack(format, element)) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + write(local_struct_pack(format, element)) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + return write(local_struct_pack(format, value)) + return EncodeField + + return SpecificEncoder + + +def _FloatingPointEncoder(wire_type, format): + """Return a constructor for an encoder for float fields. + + This is like StructPackEncoder, but catches errors that may be due to + passing non-finite floating-point values to struct.pack, and makes a + second attempt to encode those values. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: + write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x80\xFF') + elif value != value: # NaN + write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN + write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: + raise ValueError('Can\'t encode floating-point values that are ' + '%d bytes long (only 4 or 8)' % value_size) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + # This try/except block is going to be faster than any code that + # we could write to check whether element is finite. + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + try: + write(local_struct_pack(format, value)) + except SystemError: + EncodeNonFiniteOrRaise(write, value) + return EncodeField + + return SpecificEncoder + + +# ==================================================================== +# Here we declare an encoder constructor for each field type. These work +# very similarly to sizer constructors, described earlier. + + +Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) + +UInt32Encoder = UInt64Encoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) + +SInt32Encoder = SInt64Encoder = _ModifiedEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, + wire_format.ZigZagEncode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, ' str + ValueType = int + + def __init__(self, enum_type): + """Inits EnumTypeWrapper with an EnumDescriptor.""" + self._enum_type = enum_type + self.DESCRIPTOR = enum_type # pylint: disable=invalid-name + + def Name(self, number): # pylint: disable=invalid-name + """Returns a string containing the name of an enum value.""" + try: + return self._enum_type.values_by_number[number].name + except KeyError: + pass # fall out to break exception chaining + + if not isinstance(number, int): + raise TypeError( + 'Enum value for {} must be an int, but got {} {!r}.'.format( + self._enum_type.name, type(number), number)) + else: + # repr here to handle the odd case when you pass in a boolean. + raise ValueError('Enum {} has no name defined for value {!r}'.format( + self._enum_type.name, number)) + + def Value(self, name): # pylint: disable=invalid-name + """Returns the value corresponding to the given enum name.""" + try: + return self._enum_type.values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise ValueError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def keys(self): + """Return a list of the string names in the enum. + + Returns: + A list of strs, in the order they were defined in the .proto file. + """ + + return [value_descriptor.name + for value_descriptor in self._enum_type.values] + + def values(self): + """Return a list of the integer values in the enum. + + Returns: + A list of ints, in the order they were defined in the .proto file. + """ + + return [value_descriptor.number + for value_descriptor in self._enum_type.values] + + def items(self): + """Return a list of the (name, value) pairs of the enum. + + Returns: + A list of (str, int) pairs, in the order they were defined + in the .proto file. + """ + return [(value_descriptor.name, value_descriptor.number) + for value_descriptor in self._enum_type.values] + + def __getattr__(self, name): + """Returns the value corresponding to the given enum name.""" + try: + return super( + EnumTypeWrapper, + self).__getattribute__('_enum_type').values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise AttributeError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/extension_dict.py b/MLPY/Lib/site-packages/google/protobuf/internal/extension_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..ada6e25c1fd6a8b444c05772e17f8b021c5d76aa --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/extension_dict.py @@ -0,0 +1,213 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains _ExtensionDict class to represent extensions. +""" + +from google.protobuf.internal import type_checkers +from google.protobuf.descriptor import FieldDescriptor + + +def _VerifyExtensionHandle(message, extension_handle): + """Verify that the given extension handle is valid.""" + + if not isinstance(extension_handle, FieldDescriptor): + raise KeyError('HasExtension() expects an extension handle, got: %s' % + extension_handle) + + if not extension_handle.is_extension: + raise KeyError('"%s" is not an extension.' % extension_handle.full_name) + + if not extension_handle.containing_type: + raise KeyError('"%s" is missing a containing_type.' + % extension_handle.full_name) + + if extension_handle.containing_type is not message.DESCRIPTOR: + raise KeyError('Extension "%s" extends message type "%s", but this ' + 'message is of type "%s".' % + (extension_handle.full_name, + extension_handle.containing_type.full_name, + message.DESCRIPTOR.full_name)) + + +# TODO(robinson): Unify error handling of "unknown extension" crap. +# TODO(robinson): Support iteritems()-style iteration over all +# extensions with the "has" bits turned on? +class _ExtensionDict(object): + + """Dict-like container for Extension fields on proto instances. + + Note that in all cases we expect extension handles to be + FieldDescriptors. + """ + + def __init__(self, extended_message): + """ + Args: + extended_message: Message instance for which we are the Extensions dict. + """ + self._extended_message = extended_message + + def __getitem__(self, extension_handle): + """Returns the current value of the given extension handle.""" + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + result = self._extended_message._fields.get(extension_handle) + if result is not None: + return result + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + result = extension_handle._default_constructor(self._extended_message) + elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + message_type = extension_handle.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + self._extended_message._FACTORY.GetPrototype(message_type) + assert getattr(extension_handle.message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (extension_handle.full_name, + extension_handle.message_type.full_name)) + result = extension_handle.message_type._concrete_class() + try: + result._SetListener(self._extended_message._listener_for_children) + except ReferenceError: + pass + else: + # Singular scalar -- just return the default without inserting into the + # dict. + return extension_handle.default_value + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + result = self._extended_message._fields.setdefault( + extension_handle, result) + + return result + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + + my_fields = self._extended_message.ListFields() + other_fields = other._extended_message.ListFields() + + # Get rid of non-extension fields. + my_fields = [field for field in my_fields if field.is_extension] + other_fields = [field for field in other_fields if field.is_extension] + + return my_fields == other_fields + + def __ne__(self, other): + return not self == other + + def __len__(self): + fields = self._extended_message.ListFields() + # Get rid of non-extension fields. + extension_fields = [field for field in fields if field[0].is_extension] + return len(extension_fields) + + def __hash__(self): + raise TypeError('unhashable object') + + # Note that this is only meaningful for non-repeated, scalar extension + # fields. Note also that we may have to call _Modified() when we do + # successfully set a field this way, to set any necessary "has" bits in the + # ancestors of the extended message. + def __setitem__(self, extension_handle, value): + """If extension_handle specifies a non-repeated, scalar extension + field, sets the value of that field. + """ + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or + extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE): + raise TypeError( + 'Cannot assign to extension "%s" because it is a repeated or ' + 'composite type.' % extension_handle.full_name) + + # It's slightly wasteful to lookup the type checker each time, + # but we expect this to be a vanishingly uncommon case anyway. + type_checker = type_checkers.GetTypeChecker(extension_handle) + # pylint: disable=protected-access + self._extended_message._fields[extension_handle] = ( + type_checker.CheckValue(value)) + self._extended_message._Modified() + + def __delitem__(self, extension_handle): + self._extended_message.ClearExtension(extension_handle) + + def _FindExtensionByName(self, name): + """Tries to find a known extension with the specified name. + + Args: + name: Extension full name. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_name.get(name, None) + + def _FindExtensionByNumber(self, number): + """Tries to find a known extension with the field number. + + Args: + number: Extension field number. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_number.get(number, None) + + def __iter__(self): + # Return a generator over the populated extension fields + return (f[0] for f in self._extended_message.ListFields() + if f[0].is_extension) + + def __contains__(self, extension_handle): + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if extension_handle not in self._extended_message._fields: + return False + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + return bool(self._extended_message._fields.get(extension_handle)) + + if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + value = self._extended_message._fields.get(extension_handle) + # pylint: disable=protected-access + return value is not None and value._is_present_in_parent + + return True diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/message_listener.py b/MLPY/Lib/site-packages/google/protobuf/internal/message_listener.py new file mode 100644 index 0000000000000000000000000000000000000000..769c71e8b5da8ccc70ca5c05c00c52590cf0b6a6 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/message_listener.py @@ -0,0 +1,78 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines a listener interface for observing certain +state transitions on Message objects. + +Also defines a null implementation of this interface. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +class MessageListener(object): + + """Listens for modifications made to a message. Meant to be registered via + Message._SetListener(). + + Attributes: + dirty: If True, then calling Modified() would be a no-op. This can be + used to avoid these calls entirely in the common case. + """ + + def Modified(self): + """Called every time the message is modified in such a way that the parent + message may need to be updated. This currently means either: + (a) The message was modified for the first time, so the parent message + should henceforth mark the message as present. + (b) The message's cached byte size became dirty -- i.e. the message was + modified for the first time after a previous call to ByteSize(). + Therefore the parent should also mark its byte size as dirty. + Note that (a) implies (b), since new objects start out with a client cached + size (zero). However, we document (a) explicitly because it is important. + + Modified() will *only* be called in response to one of these two events -- + not every time the sub-message is modified. + + Note that if the listener's |dirty| attribute is true, then calling + Modified at the moment would be a no-op, so it can be skipped. Performance- + sensitive callers should check this attribute directly before calling since + it will be true most of the time. + """ + + raise NotImplementedError + + +class NullMessageListener(object): + + """No-op MessageListener implementation.""" + + def Modified(self): + pass diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/message_set_extensions_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/message_set_extensions_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..63651a3f19bdfc054ce73d6df42b664c50324da0 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/message_set_extensions_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/message_set_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.message_set_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageSet.RegisterExtension(message_set_extension3) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension']) + TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension']) + + DESCRIPTOR._options = None + _TESTMESSAGESET._options = None + _TESTMESSAGESET._serialized_options = b'\010\001' + _TESTMESSAGESET._serialized_start=83 + _TESTMESSAGESET._serialized_end=113 + _TESTMESSAGESETEXTENSION1._serialized_start=116 + _TESTMESSAGESETEXTENSION1._serialized_end=281 + _TESTMESSAGESETEXTENSION2._serialized_start=284 + _TESTMESSAGESETEXTENSION2._serialized_end=451 + _TESTMESSAGESETEXTENSION3._serialized_start=453 + _TESTMESSAGESETEXTENSION3._serialized_end=493 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/missing_enum_values_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/missing_enum_values_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..5497083197f76e818ed39cebd44637e9ffed37ad --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/missing_enum_values_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/missing_enum_values.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n2google/protobuf/internal/missing_enum_values.proto\x12\x1fgoogle.protobuf.python.internal\"\xc1\x02\n\x0eTestEnumValues\x12X\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12X\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12Z\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnumB\x02\x10\x01\"\x1f\n\nNestedEnum\x12\x08\n\x04ZERO\x10\x00\x12\x07\n\x03ONE\x10\x01\"\xd3\x02\n\x15TestMissingEnumValues\x12_\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12_\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12\x61\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnumB\x02\x10\x01\"\x15\n\nNestedEnum\x12\x07\n\x03TWO\x10\x02\"\x1b\n\nJustString\x12\r\n\x05\x64ummy\x18\x01 \x02(\t') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.missing_enum_values_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._options = None + _TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001' + _TESTENUMVALUES._serialized_start=88 + _TESTENUMVALUES._serialized_end=409 + _TESTENUMVALUES_NESTEDENUM._serialized_start=378 + _TESTENUMVALUES_NESTEDENUM._serialized_end=409 + _TESTMISSINGENUMVALUES._serialized_start=412 + _TESTMISSINGENUMVALUES._serialized_end=751 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_start=730 + _TESTMISSINGENUMVALUES_NESTEDENUM._serialized_end=751 + _JUSTSTRING._serialized_start=753 + _JUSTSTRING._serialized_end=780 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_dynamic_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_dynamic_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..0953706baca1769345da54ae0ecd94d2315e9735 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_dynamic_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions_dynamic.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf.internal import more_extensions_pb2 as google_dot_protobuf_dot_internal_dot_more__extensions__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6google/protobuf/internal/more_extensions_dynamic.proto\x12\x18google.protobuf.internal\x1a.google/protobuf/internal/more_extensions.proto\"\x1f\n\x12\x44ynamicMessageType\x12\t\n\x01\x61\x18\x01 \x01(\x05:J\n\x17\x64ynamic_int32_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x64 \x01(\x05:z\n\x19\x64ynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x65 \x01(\x0b\x32,.google.protobuf.internal.DynamicMessageType:\x83\x01\n\"repeated_dynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x66 \x03(\x0b\x32,.google.protobuf.internal.DynamicMessageType') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_dynamic_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_int32_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_message_extension) + google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(repeated_dynamic_message_extension) + + DESCRIPTOR._options = None + _DYNAMICMESSAGETYPE._serialized_start=132 + _DYNAMICMESSAGETYPE._serialized_end=163 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..1cfa1b7c8b978dea17551b58de30e97c30054f34 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/more_extensions_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_extensions.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"\x99\x01\n\x0fTopLevelMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\x12\x43\n\x0enested_message\x18\x02 \x01(\x0b\x32\'.google.protobuf.internal.NestedMessageB\x02(\x01\"R\n\rNestedMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\"K\n\x0f\x45xtendedMessage\x12\x17\n\x0eoptional_int32\x18\xe9\x07 \x01(\x05\x12\x18\n\x0frepeated_string\x18\xea\x07 \x03(\t*\x05\x08\x01\x10\xe8\x07\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + ExtendedMessage.RegisterExtension(optional_int_extension) + ExtendedMessage.RegisterExtension(optional_message_extension) + ExtendedMessage.RegisterExtension(repeated_int_extension) + ExtendedMessage.RegisterExtension(repeated_message_extension) + + DESCRIPTOR._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._options = None + _TOPLEVELMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE.fields_by_name['nested_message']._options = None + _TOPLEVELMESSAGE.fields_by_name['nested_message']._serialized_options = b'(\001' + _NESTEDMESSAGE.fields_by_name['submessage']._options = None + _NESTEDMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001' + _TOPLEVELMESSAGE._serialized_start=77 + _TOPLEVELMESSAGE._serialized_end=230 + _NESTEDMESSAGE._serialized_start=232 + _NESTEDMESSAGE._serialized_end=314 + _EXTENDEDMESSAGE._serialized_start=316 + _EXTENDEDMESSAGE._serialized_end=391 + _FOREIGNMESSAGE._serialized_start=393 + _FOREIGNMESSAGE._serialized_end=438 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/more_messages_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/more_messages_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f71156094d9dec2fe6623d80869c26a31597af --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/more_messages_pb2.py @@ -0,0 +1,556 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/more_messages.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,google/protobuf/internal/more_messages.proto\x12\x18google.protobuf.internal\"h\n\x10OutOfOrderFields\x12\x17\n\x0foptional_sint32\x18\x05 \x01(\x11\x12\x17\n\x0foptional_uint32\x18\x03 \x01(\r\x12\x16\n\x0eoptional_int32\x18\x01 \x01(\x05*\x04\x08\x04\x10\x05*\x04\x08\x02\x10\x03\"\xcd\x02\n\x05\x63lass\x12\x1b\n\tint_field\x18\x01 \x01(\x05R\x08json_int\x12\n\n\x02if\x18\x02 \x01(\x05\x12(\n\x02\x61s\x18\x03 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12\x30\n\nenum_field\x18\x04 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12>\n\x11nested_enum_field\x18\x05 \x01(\x0e\x32#.google.protobuf.internal.class.for\x12;\n\x0enested_message\x18\x06 \x01(\x0b\x32#.google.protobuf.internal.class.try\x1a\x1c\n\x03try\x12\r\n\x05\x66ield\x18\x01 \x01(\x05*\x06\x08\xe7\x07\x10\x90N\"\x1c\n\x03\x66or\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04True\x10\x01*\x06\x08\xe7\x07\x10\x90N\"?\n\x0b\x45xtendClass20\n\x06return\x12\x1f.google.protobuf.internal.class\x18\xea\x07 \x01(\x05\"~\n\x0fTestFullKeyword\x12:\n\x06\x66ield1\x18\x01 \x01(\x0b\x32*.google.protobuf.internal.OutOfOrderFields\x12/\n\x06\x66ield2\x18\x02 \x01(\x0b\x32\x1f.google.protobuf.internal.class\"\xa5\x0f\n\x11LotsNestedMessage\x1a\x04\n\x02\x42\x30\x1a\x04\n\x02\x42\x31\x1a\x04\n\x02\x42\x32\x1a\x04\n\x02\x42\x33\x1a\x04\n\x02\x42\x34\x1a\x04\n\x02\x42\x35\x1a\x04\n\x02\x42\x36\x1a\x04\n\x02\x42\x37\x1a\x04\n\x02\x42\x38\x1a\x04\n\x02\x42\x39\x1a\x05\n\x03\x42\x31\x30\x1a\x05\n\x03\x42\x31\x31\x1a\x05\n\x03\x42\x31\x32\x1a\x05\n\x03\x42\x31\x33\x1a\x05\n\x03\x42\x31\x34\x1a\x05\n\x03\x42\x31\x35\x1a\x05\n\x03\x42\x31\x36\x1a\x05\n\x03\x42\x31\x37\x1a\x05\n\x03\x42\x31\x38\x1a\x05\n\x03\x42\x31\x39\x1a\x05\n\x03\x42\x32\x30\x1a\x05\n\x03\x42\x32\x31\x1a\x05\n\x03\x42\x32\x32\x1a\x05\n\x03\x42\x32\x33\x1a\x05\n\x03\x42\x32\x34\x1a\x05\n\x03\x42\x32\x35\x1a\x05\n\x03\x42\x32\x36\x1a\x05\n\x03\x42\x32\x37\x1a\x05\n\x03\x42\x32\x38\x1a\x05\n\x03\x42\x32\x39\x1a\x05\n\x03\x42\x33\x30\x1a\x05\n\x03\x42\x33\x31\x1a\x05\n\x03\x42\x33\x32\x1a\x05\n\x03\x42\x33\x33\x1a\x05\n\x03\x42\x33\x34\x1a\x05\n\x03\x42\x33\x35\x1a\x05\n\x03\x42\x33\x36\x1a\x05\n\x03\x42\x33\x37\x1a\x05\n\x03\x42\x33\x38\x1a\x05\n\x03\x42\x33\x39\x1a\x05\n\x03\x42\x34\x30\x1a\x05\n\x03\x42\x34\x31\x1a\x05\n\x03\x42\x34\x32\x1a\x05\n\x03\x42\x34\x33\x1a\x05\n\x03\x42\x34\x34\x1a\x05\n\x03\x42\x34\x35\x1a\x05\n\x03\x42\x34\x36\x1a\x05\n\x03\x42\x34\x37\x1a\x05\n\x03\x42\x34\x38\x1a\x05\n\x03\x42\x34\x39\x1a\x05\n\x03\x42\x35\x30\x1a\x05\n\x03\x42\x35\x31\x1a\x05\n\x03\x42\x35\x32\x1a\x05\n\x03\x42\x35\x33\x1a\x05\n\x03\x42\x35\x34\x1a\x05\n\x03\x42\x35\x35\x1a\x05\n\x03\x42\x35\x36\x1a\x05\n\x03\x42\x35\x37\x1a\x05\n\x03\x42\x35\x38\x1a\x05\n\x03\x42\x35\x39\x1a\x05\n\x03\x42\x36\x30\x1a\x05\n\x03\x42\x36\x31\x1a\x05\n\x03\x42\x36\x32\x1a\x05\n\x03\x42\x36\x33\x1a\x05\n\x03\x42\x36\x34\x1a\x05\n\x03\x42\x36\x35\x1a\x05\n\x03\x42\x36\x36\x1a\x05\n\x03\x42\x36\x37\x1a\x05\n\x03\x42\x36\x38\x1a\x05\n\x03\x42\x36\x39\x1a\x05\n\x03\x42\x37\x30\x1a\x05\n\x03\x42\x37\x31\x1a\x05\n\x03\x42\x37\x32\x1a\x05\n\x03\x42\x37\x33\x1a\x05\n\x03\x42\x37\x34\x1a\x05\n\x03\x42\x37\x35\x1a\x05\n\x03\x42\x37\x36\x1a\x05\n\x03\x42\x37\x37\x1a\x05\n\x03\x42\x37\x38\x1a\x05\n\x03\x42\x37\x39\x1a\x05\n\x03\x42\x38\x30\x1a\x05\n\x03\x42\x38\x31\x1a\x05\n\x03\x42\x38\x32\x1a\x05\n\x03\x42\x38\x33\x1a\x05\n\x03\x42\x38\x34\x1a\x05\n\x03\x42\x38\x35\x1a\x05\n\x03\x42\x38\x36\x1a\x05\n\x03\x42\x38\x37\x1a\x05\n\x03\x42\x38\x38\x1a\x05\n\x03\x42\x38\x39\x1a\x05\n\x03\x42\x39\x30\x1a\x05\n\x03\x42\x39\x31\x1a\x05\n\x03\x42\x39\x32\x1a\x05\n\x03\x42\x39\x33\x1a\x05\n\x03\x42\x39\x34\x1a\x05\n\x03\x42\x39\x35\x1a\x05\n\x03\x42\x39\x36\x1a\x05\n\x03\x42\x39\x37\x1a\x05\n\x03\x42\x39\x38\x1a\x05\n\x03\x42\x39\x39\x1a\x06\n\x04\x42\x31\x30\x30\x1a\x06\n\x04\x42\x31\x30\x31\x1a\x06\n\x04\x42\x31\x30\x32\x1a\x06\n\x04\x42\x31\x30\x33\x1a\x06\n\x04\x42\x31\x30\x34\x1a\x06\n\x04\x42\x31\x30\x35\x1a\x06\n\x04\x42\x31\x30\x36\x1a\x06\n\x04\x42\x31\x30\x37\x1a\x06\n\x04\x42\x31\x30\x38\x1a\x06\n\x04\x42\x31\x30\x39\x1a\x06\n\x04\x42\x31\x31\x30\x1a\x06\n\x04\x42\x31\x31\x31\x1a\x06\n\x04\x42\x31\x31\x32\x1a\x06\n\x04\x42\x31\x31\x33\x1a\x06\n\x04\x42\x31\x31\x34\x1a\x06\n\x04\x42\x31\x31\x35\x1a\x06\n\x04\x42\x31\x31\x36\x1a\x06\n\x04\x42\x31\x31\x37\x1a\x06\n\x04\x42\x31\x31\x38\x1a\x06\n\x04\x42\x31\x31\x39\x1a\x06\n\x04\x42\x31\x32\x30\x1a\x06\n\x04\x42\x31\x32\x31\x1a\x06\n\x04\x42\x31\x32\x32\x1a\x06\n\x04\x42\x31\x32\x33\x1a\x06\n\x04\x42\x31\x32\x34\x1a\x06\n\x04\x42\x31\x32\x35\x1a\x06\n\x04\x42\x31\x32\x36\x1a\x06\n\x04\x42\x31\x32\x37\x1a\x06\n\x04\x42\x31\x32\x38\x1a\x06\n\x04\x42\x31\x32\x39\x1a\x06\n\x04\x42\x31\x33\x30\x1a\x06\n\x04\x42\x31\x33\x31\x1a\x06\n\x04\x42\x31\x33\x32\x1a\x06\n\x04\x42\x31\x33\x33\x1a\x06\n\x04\x42\x31\x33\x34\x1a\x06\n\x04\x42\x31\x33\x35\x1a\x06\n\x04\x42\x31\x33\x36\x1a\x06\n\x04\x42\x31\x33\x37\x1a\x06\n\x04\x42\x31\x33\x38\x1a\x06\n\x04\x42\x31\x33\x39\x1a\x06\n\x04\x42\x31\x34\x30\x1a\x06\n\x04\x42\x31\x34\x31\x1a\x06\n\x04\x42\x31\x34\x32\x1a\x06\n\x04\x42\x31\x34\x33\x1a\x06\n\x04\x42\x31\x34\x34\x1a\x06\n\x04\x42\x31\x34\x35\x1a\x06\n\x04\x42\x31\x34\x36\x1a\x06\n\x04\x42\x31\x34\x37\x1a\x06\n\x04\x42\x31\x34\x38\x1a\x06\n\x04\x42\x31\x34\x39\x1a\x06\n\x04\x42\x31\x35\x30\x1a\x06\n\x04\x42\x31\x35\x31\x1a\x06\n\x04\x42\x31\x35\x32\x1a\x06\n\x04\x42\x31\x35\x33\x1a\x06\n\x04\x42\x31\x35\x34\x1a\x06\n\x04\x42\x31\x35\x35\x1a\x06\n\x04\x42\x31\x35\x36\x1a\x06\n\x04\x42\x31\x35\x37\x1a\x06\n\x04\x42\x31\x35\x38\x1a\x06\n\x04\x42\x31\x35\x39\x1a\x06\n\x04\x42\x31\x36\x30\x1a\x06\n\x04\x42\x31\x36\x31\x1a\x06\n\x04\x42\x31\x36\x32\x1a\x06\n\x04\x42\x31\x36\x33\x1a\x06\n\x04\x42\x31\x36\x34\x1a\x06\n\x04\x42\x31\x36\x35\x1a\x06\n\x04\x42\x31\x36\x36\x1a\x06\n\x04\x42\x31\x36\x37\x1a\x06\n\x04\x42\x31\x36\x38\x1a\x06\n\x04\x42\x31\x36\x39\x1a\x06\n\x04\x42\x31\x37\x30\x1a\x06\n\x04\x42\x31\x37\x31\x1a\x06\n\x04\x42\x31\x37\x32\x1a\x06\n\x04\x42\x31\x37\x33\x1a\x06\n\x04\x42\x31\x37\x34\x1a\x06\n\x04\x42\x31\x37\x35\x1a\x06\n\x04\x42\x31\x37\x36\x1a\x06\n\x04\x42\x31\x37\x37\x1a\x06\n\x04\x42\x31\x37\x38\x1a\x06\n\x04\x42\x31\x37\x39\x1a\x06\n\x04\x42\x31\x38\x30\x1a\x06\n\x04\x42\x31\x38\x31\x1a\x06\n\x04\x42\x31\x38\x32\x1a\x06\n\x04\x42\x31\x38\x33\x1a\x06\n\x04\x42\x31\x38\x34\x1a\x06\n\x04\x42\x31\x38\x35\x1a\x06\n\x04\x42\x31\x38\x36\x1a\x06\n\x04\x42\x31\x38\x37\x1a\x06\n\x04\x42\x31\x38\x38\x1a\x06\n\x04\x42\x31\x38\x39\x1a\x06\n\x04\x42\x31\x39\x30\x1a\x06\n\x04\x42\x31\x39\x31\x1a\x06\n\x04\x42\x31\x39\x32\x1a\x06\n\x04\x42\x31\x39\x33\x1a\x06\n\x04\x42\x31\x39\x34\x1a\x06\n\x04\x42\x31\x39\x35\x1a\x06\n\x04\x42\x31\x39\x36\x1a\x06\n\x04\x42\x31\x39\x37\x1a\x06\n\x04\x42\x31\x39\x38\x1a\x06\n\x04\x42\x31\x39\x39\x1a\x06\n\x04\x42\x32\x30\x30\x1a\x06\n\x04\x42\x32\x30\x31\x1a\x06\n\x04\x42\x32\x30\x32\x1a\x06\n\x04\x42\x32\x30\x33\x1a\x06\n\x04\x42\x32\x30\x34\x1a\x06\n\x04\x42\x32\x30\x35\x1a\x06\n\x04\x42\x32\x30\x36\x1a\x06\n\x04\x42\x32\x30\x37\x1a\x06\n\x04\x42\x32\x30\x38\x1a\x06\n\x04\x42\x32\x30\x39\x1a\x06\n\x04\x42\x32\x31\x30\x1a\x06\n\x04\x42\x32\x31\x31\x1a\x06\n\x04\x42\x32\x31\x32\x1a\x06\n\x04\x42\x32\x31\x33\x1a\x06\n\x04\x42\x32\x31\x34\x1a\x06\n\x04\x42\x32\x31\x35\x1a\x06\n\x04\x42\x32\x31\x36\x1a\x06\n\x04\x42\x32\x31\x37\x1a\x06\n\x04\x42\x32\x31\x38\x1a\x06\n\x04\x42\x32\x31\x39\x1a\x06\n\x04\x42\x32\x32\x30\x1a\x06\n\x04\x42\x32\x32\x31\x1a\x06\n\x04\x42\x32\x32\x32\x1a\x06\n\x04\x42\x32\x32\x33\x1a\x06\n\x04\x42\x32\x32\x34\x1a\x06\n\x04\x42\x32\x32\x35\x1a\x06\n\x04\x42\x32\x32\x36\x1a\x06\n\x04\x42\x32\x32\x37\x1a\x06\n\x04\x42\x32\x32\x38\x1a\x06\n\x04\x42\x32\x32\x39\x1a\x06\n\x04\x42\x32\x33\x30\x1a\x06\n\x04\x42\x32\x33\x31\x1a\x06\n\x04\x42\x32\x33\x32\x1a\x06\n\x04\x42\x32\x33\x33\x1a\x06\n\x04\x42\x32\x33\x34\x1a\x06\n\x04\x42\x32\x33\x35\x1a\x06\n\x04\x42\x32\x33\x36\x1a\x06\n\x04\x42\x32\x33\x37\x1a\x06\n\x04\x42\x32\x33\x38\x1a\x06\n\x04\x42\x32\x33\x39\x1a\x06\n\x04\x42\x32\x34\x30\x1a\x06\n\x04\x42\x32\x34\x31\x1a\x06\n\x04\x42\x32\x34\x32\x1a\x06\n\x04\x42\x32\x34\x33\x1a\x06\n\x04\x42\x32\x34\x34\x1a\x06\n\x04\x42\x32\x34\x35\x1a\x06\n\x04\x42\x32\x34\x36\x1a\x06\n\x04\x42\x32\x34\x37\x1a\x06\n\x04\x42\x32\x34\x38\x1a\x06\n\x04\x42\x32\x34\x39\x1a\x06\n\x04\x42\x32\x35\x30\x1a\x06\n\x04\x42\x32\x35\x31\x1a\x06\n\x04\x42\x32\x35\x32\x1a\x06\n\x04\x42\x32\x35\x33\x1a\x06\n\x04\x42\x32\x35\x34\x1a\x06\n\x04\x42\x32\x35\x35*\x1b\n\x02is\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04\x65lse\x10\x01:C\n\x0foptional_uint64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x04 \x01(\x04:B\n\x0eoptional_int64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x02 \x01(\x03:2\n\x08\x63ontinue\x12\x1f.google.protobuf.internal.class\x18\xe9\x07 \x01(\x05:2\n\x04with\x12#.google.protobuf.internal.class.try\x18\xe9\x07 \x01(\x05') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_messages_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + OutOfOrderFields.RegisterExtension(optional_uint64) + OutOfOrderFields.RegisterExtension(optional_int64) + globals()['class'].RegisterExtension(globals()['continue']) + getattr(globals()['class'], 'try').RegisterExtension(globals()['with']) + globals()['class'].RegisterExtension(_EXTENDCLASS.extensions_by_name['return']) + + DESCRIPTOR._options = None + _IS._serialized_start=2669 + _IS._serialized_end=2696 + _OUTOFORDERFIELDS._serialized_start=74 + _OUTOFORDERFIELDS._serialized_end=178 + _CLASS._serialized_start=181 + _CLASS._serialized_end=514 + _CLASS_TRY._serialized_start=448 + _CLASS_TRY._serialized_end=476 + _CLASS_FOR._serialized_start=478 + _CLASS_FOR._serialized_end=506 + _EXTENDCLASS._serialized_start=516 + _EXTENDCLASS._serialized_end=579 + _TESTFULLKEYWORD._serialized_start=581 + _TESTFULLKEYWORD._serialized_end=707 + _LOTSNESTEDMESSAGE._serialized_start=710 + _LOTSNESTEDMESSAGE._serialized_end=2667 + _LOTSNESTEDMESSAGE_B0._serialized_start=731 + _LOTSNESTEDMESSAGE_B0._serialized_end=735 + _LOTSNESTEDMESSAGE_B1._serialized_start=737 + _LOTSNESTEDMESSAGE_B1._serialized_end=741 + _LOTSNESTEDMESSAGE_B2._serialized_start=743 + _LOTSNESTEDMESSAGE_B2._serialized_end=747 + _LOTSNESTEDMESSAGE_B3._serialized_start=749 + _LOTSNESTEDMESSAGE_B3._serialized_end=753 + _LOTSNESTEDMESSAGE_B4._serialized_start=755 + _LOTSNESTEDMESSAGE_B4._serialized_end=759 + _LOTSNESTEDMESSAGE_B5._serialized_start=761 + _LOTSNESTEDMESSAGE_B5._serialized_end=765 + _LOTSNESTEDMESSAGE_B6._serialized_start=767 + _LOTSNESTEDMESSAGE_B6._serialized_end=771 + _LOTSNESTEDMESSAGE_B7._serialized_start=773 + _LOTSNESTEDMESSAGE_B7._serialized_end=777 + _LOTSNESTEDMESSAGE_B8._serialized_start=779 + _LOTSNESTEDMESSAGE_B8._serialized_end=783 + _LOTSNESTEDMESSAGE_B9._serialized_start=785 + _LOTSNESTEDMESSAGE_B9._serialized_end=789 + _LOTSNESTEDMESSAGE_B10._serialized_start=791 + _LOTSNESTEDMESSAGE_B10._serialized_end=796 + _LOTSNESTEDMESSAGE_B11._serialized_start=798 + _LOTSNESTEDMESSAGE_B11._serialized_end=803 + _LOTSNESTEDMESSAGE_B12._serialized_start=805 + _LOTSNESTEDMESSAGE_B12._serialized_end=810 + _LOTSNESTEDMESSAGE_B13._serialized_start=812 + _LOTSNESTEDMESSAGE_B13._serialized_end=817 + _LOTSNESTEDMESSAGE_B14._serialized_start=819 + _LOTSNESTEDMESSAGE_B14._serialized_end=824 + _LOTSNESTEDMESSAGE_B15._serialized_start=826 + _LOTSNESTEDMESSAGE_B15._serialized_end=831 + _LOTSNESTEDMESSAGE_B16._serialized_start=833 + _LOTSNESTEDMESSAGE_B16._serialized_end=838 + _LOTSNESTEDMESSAGE_B17._serialized_start=840 + _LOTSNESTEDMESSAGE_B17._serialized_end=845 + _LOTSNESTEDMESSAGE_B18._serialized_start=847 + _LOTSNESTEDMESSAGE_B18._serialized_end=852 + _LOTSNESTEDMESSAGE_B19._serialized_start=854 + _LOTSNESTEDMESSAGE_B19._serialized_end=859 + _LOTSNESTEDMESSAGE_B20._serialized_start=861 + _LOTSNESTEDMESSAGE_B20._serialized_end=866 + _LOTSNESTEDMESSAGE_B21._serialized_start=868 + _LOTSNESTEDMESSAGE_B21._serialized_end=873 + _LOTSNESTEDMESSAGE_B22._serialized_start=875 + _LOTSNESTEDMESSAGE_B22._serialized_end=880 + _LOTSNESTEDMESSAGE_B23._serialized_start=882 + _LOTSNESTEDMESSAGE_B23._serialized_end=887 + _LOTSNESTEDMESSAGE_B24._serialized_start=889 + _LOTSNESTEDMESSAGE_B24._serialized_end=894 + _LOTSNESTEDMESSAGE_B25._serialized_start=896 + _LOTSNESTEDMESSAGE_B25._serialized_end=901 + _LOTSNESTEDMESSAGE_B26._serialized_start=903 + _LOTSNESTEDMESSAGE_B26._serialized_end=908 + _LOTSNESTEDMESSAGE_B27._serialized_start=910 + _LOTSNESTEDMESSAGE_B27._serialized_end=915 + _LOTSNESTEDMESSAGE_B28._serialized_start=917 + _LOTSNESTEDMESSAGE_B28._serialized_end=922 + _LOTSNESTEDMESSAGE_B29._serialized_start=924 + _LOTSNESTEDMESSAGE_B29._serialized_end=929 + _LOTSNESTEDMESSAGE_B30._serialized_start=931 + _LOTSNESTEDMESSAGE_B30._serialized_end=936 + _LOTSNESTEDMESSAGE_B31._serialized_start=938 + _LOTSNESTEDMESSAGE_B31._serialized_end=943 + _LOTSNESTEDMESSAGE_B32._serialized_start=945 + _LOTSNESTEDMESSAGE_B32._serialized_end=950 + _LOTSNESTEDMESSAGE_B33._serialized_start=952 + _LOTSNESTEDMESSAGE_B33._serialized_end=957 + _LOTSNESTEDMESSAGE_B34._serialized_start=959 + _LOTSNESTEDMESSAGE_B34._serialized_end=964 + _LOTSNESTEDMESSAGE_B35._serialized_start=966 + _LOTSNESTEDMESSAGE_B35._serialized_end=971 + _LOTSNESTEDMESSAGE_B36._serialized_start=973 + _LOTSNESTEDMESSAGE_B36._serialized_end=978 + _LOTSNESTEDMESSAGE_B37._serialized_start=980 + _LOTSNESTEDMESSAGE_B37._serialized_end=985 + _LOTSNESTEDMESSAGE_B38._serialized_start=987 + _LOTSNESTEDMESSAGE_B38._serialized_end=992 + _LOTSNESTEDMESSAGE_B39._serialized_start=994 + _LOTSNESTEDMESSAGE_B39._serialized_end=999 + _LOTSNESTEDMESSAGE_B40._serialized_start=1001 + _LOTSNESTEDMESSAGE_B40._serialized_end=1006 + _LOTSNESTEDMESSAGE_B41._serialized_start=1008 + _LOTSNESTEDMESSAGE_B41._serialized_end=1013 + _LOTSNESTEDMESSAGE_B42._serialized_start=1015 + _LOTSNESTEDMESSAGE_B42._serialized_end=1020 + _LOTSNESTEDMESSAGE_B43._serialized_start=1022 + _LOTSNESTEDMESSAGE_B43._serialized_end=1027 + _LOTSNESTEDMESSAGE_B44._serialized_start=1029 + _LOTSNESTEDMESSAGE_B44._serialized_end=1034 + _LOTSNESTEDMESSAGE_B45._serialized_start=1036 + _LOTSNESTEDMESSAGE_B45._serialized_end=1041 + _LOTSNESTEDMESSAGE_B46._serialized_start=1043 + _LOTSNESTEDMESSAGE_B46._serialized_end=1048 + _LOTSNESTEDMESSAGE_B47._serialized_start=1050 + _LOTSNESTEDMESSAGE_B47._serialized_end=1055 + _LOTSNESTEDMESSAGE_B48._serialized_start=1057 + _LOTSNESTEDMESSAGE_B48._serialized_end=1062 + _LOTSNESTEDMESSAGE_B49._serialized_start=1064 + _LOTSNESTEDMESSAGE_B49._serialized_end=1069 + _LOTSNESTEDMESSAGE_B50._serialized_start=1071 + _LOTSNESTEDMESSAGE_B50._serialized_end=1076 + _LOTSNESTEDMESSAGE_B51._serialized_start=1078 + _LOTSNESTEDMESSAGE_B51._serialized_end=1083 + _LOTSNESTEDMESSAGE_B52._serialized_start=1085 + _LOTSNESTEDMESSAGE_B52._serialized_end=1090 + _LOTSNESTEDMESSAGE_B53._serialized_start=1092 + _LOTSNESTEDMESSAGE_B53._serialized_end=1097 + _LOTSNESTEDMESSAGE_B54._serialized_start=1099 + _LOTSNESTEDMESSAGE_B54._serialized_end=1104 + _LOTSNESTEDMESSAGE_B55._serialized_start=1106 + _LOTSNESTEDMESSAGE_B55._serialized_end=1111 + _LOTSNESTEDMESSAGE_B56._serialized_start=1113 + _LOTSNESTEDMESSAGE_B56._serialized_end=1118 + _LOTSNESTEDMESSAGE_B57._serialized_start=1120 + _LOTSNESTEDMESSAGE_B57._serialized_end=1125 + _LOTSNESTEDMESSAGE_B58._serialized_start=1127 + _LOTSNESTEDMESSAGE_B58._serialized_end=1132 + _LOTSNESTEDMESSAGE_B59._serialized_start=1134 + _LOTSNESTEDMESSAGE_B59._serialized_end=1139 + _LOTSNESTEDMESSAGE_B60._serialized_start=1141 + _LOTSNESTEDMESSAGE_B60._serialized_end=1146 + _LOTSNESTEDMESSAGE_B61._serialized_start=1148 + _LOTSNESTEDMESSAGE_B61._serialized_end=1153 + _LOTSNESTEDMESSAGE_B62._serialized_start=1155 + _LOTSNESTEDMESSAGE_B62._serialized_end=1160 + _LOTSNESTEDMESSAGE_B63._serialized_start=1162 + _LOTSNESTEDMESSAGE_B63._serialized_end=1167 + _LOTSNESTEDMESSAGE_B64._serialized_start=1169 + _LOTSNESTEDMESSAGE_B64._serialized_end=1174 + _LOTSNESTEDMESSAGE_B65._serialized_start=1176 + _LOTSNESTEDMESSAGE_B65._serialized_end=1181 + _LOTSNESTEDMESSAGE_B66._serialized_start=1183 + _LOTSNESTEDMESSAGE_B66._serialized_end=1188 + _LOTSNESTEDMESSAGE_B67._serialized_start=1190 + _LOTSNESTEDMESSAGE_B67._serialized_end=1195 + _LOTSNESTEDMESSAGE_B68._serialized_start=1197 + _LOTSNESTEDMESSAGE_B68._serialized_end=1202 + _LOTSNESTEDMESSAGE_B69._serialized_start=1204 + _LOTSNESTEDMESSAGE_B69._serialized_end=1209 + _LOTSNESTEDMESSAGE_B70._serialized_start=1211 + _LOTSNESTEDMESSAGE_B70._serialized_end=1216 + _LOTSNESTEDMESSAGE_B71._serialized_start=1218 + _LOTSNESTEDMESSAGE_B71._serialized_end=1223 + _LOTSNESTEDMESSAGE_B72._serialized_start=1225 + _LOTSNESTEDMESSAGE_B72._serialized_end=1230 + _LOTSNESTEDMESSAGE_B73._serialized_start=1232 + _LOTSNESTEDMESSAGE_B73._serialized_end=1237 + _LOTSNESTEDMESSAGE_B74._serialized_start=1239 + _LOTSNESTEDMESSAGE_B74._serialized_end=1244 + _LOTSNESTEDMESSAGE_B75._serialized_start=1246 + _LOTSNESTEDMESSAGE_B75._serialized_end=1251 + _LOTSNESTEDMESSAGE_B76._serialized_start=1253 + _LOTSNESTEDMESSAGE_B76._serialized_end=1258 + _LOTSNESTEDMESSAGE_B77._serialized_start=1260 + _LOTSNESTEDMESSAGE_B77._serialized_end=1265 + _LOTSNESTEDMESSAGE_B78._serialized_start=1267 + _LOTSNESTEDMESSAGE_B78._serialized_end=1272 + _LOTSNESTEDMESSAGE_B79._serialized_start=1274 + _LOTSNESTEDMESSAGE_B79._serialized_end=1279 + _LOTSNESTEDMESSAGE_B80._serialized_start=1281 + _LOTSNESTEDMESSAGE_B80._serialized_end=1286 + _LOTSNESTEDMESSAGE_B81._serialized_start=1288 + _LOTSNESTEDMESSAGE_B81._serialized_end=1293 + _LOTSNESTEDMESSAGE_B82._serialized_start=1295 + _LOTSNESTEDMESSAGE_B82._serialized_end=1300 + _LOTSNESTEDMESSAGE_B83._serialized_start=1302 + _LOTSNESTEDMESSAGE_B83._serialized_end=1307 + _LOTSNESTEDMESSAGE_B84._serialized_start=1309 + _LOTSNESTEDMESSAGE_B84._serialized_end=1314 + _LOTSNESTEDMESSAGE_B85._serialized_start=1316 + _LOTSNESTEDMESSAGE_B85._serialized_end=1321 + _LOTSNESTEDMESSAGE_B86._serialized_start=1323 + _LOTSNESTEDMESSAGE_B86._serialized_end=1328 + _LOTSNESTEDMESSAGE_B87._serialized_start=1330 + _LOTSNESTEDMESSAGE_B87._serialized_end=1335 + _LOTSNESTEDMESSAGE_B88._serialized_start=1337 + _LOTSNESTEDMESSAGE_B88._serialized_end=1342 + _LOTSNESTEDMESSAGE_B89._serialized_start=1344 + _LOTSNESTEDMESSAGE_B89._serialized_end=1349 + _LOTSNESTEDMESSAGE_B90._serialized_start=1351 + _LOTSNESTEDMESSAGE_B90._serialized_end=1356 + _LOTSNESTEDMESSAGE_B91._serialized_start=1358 + _LOTSNESTEDMESSAGE_B91._serialized_end=1363 + _LOTSNESTEDMESSAGE_B92._serialized_start=1365 + _LOTSNESTEDMESSAGE_B92._serialized_end=1370 + _LOTSNESTEDMESSAGE_B93._serialized_start=1372 + _LOTSNESTEDMESSAGE_B93._serialized_end=1377 + _LOTSNESTEDMESSAGE_B94._serialized_start=1379 + _LOTSNESTEDMESSAGE_B94._serialized_end=1384 + _LOTSNESTEDMESSAGE_B95._serialized_start=1386 + _LOTSNESTEDMESSAGE_B95._serialized_end=1391 + _LOTSNESTEDMESSAGE_B96._serialized_start=1393 + _LOTSNESTEDMESSAGE_B96._serialized_end=1398 + _LOTSNESTEDMESSAGE_B97._serialized_start=1400 + _LOTSNESTEDMESSAGE_B97._serialized_end=1405 + _LOTSNESTEDMESSAGE_B98._serialized_start=1407 + _LOTSNESTEDMESSAGE_B98._serialized_end=1412 + _LOTSNESTEDMESSAGE_B99._serialized_start=1414 + _LOTSNESTEDMESSAGE_B99._serialized_end=1419 + _LOTSNESTEDMESSAGE_B100._serialized_start=1421 + _LOTSNESTEDMESSAGE_B100._serialized_end=1427 + _LOTSNESTEDMESSAGE_B101._serialized_start=1429 + _LOTSNESTEDMESSAGE_B101._serialized_end=1435 + _LOTSNESTEDMESSAGE_B102._serialized_start=1437 + _LOTSNESTEDMESSAGE_B102._serialized_end=1443 + _LOTSNESTEDMESSAGE_B103._serialized_start=1445 + _LOTSNESTEDMESSAGE_B103._serialized_end=1451 + _LOTSNESTEDMESSAGE_B104._serialized_start=1453 + _LOTSNESTEDMESSAGE_B104._serialized_end=1459 + _LOTSNESTEDMESSAGE_B105._serialized_start=1461 + _LOTSNESTEDMESSAGE_B105._serialized_end=1467 + _LOTSNESTEDMESSAGE_B106._serialized_start=1469 + _LOTSNESTEDMESSAGE_B106._serialized_end=1475 + _LOTSNESTEDMESSAGE_B107._serialized_start=1477 + _LOTSNESTEDMESSAGE_B107._serialized_end=1483 + _LOTSNESTEDMESSAGE_B108._serialized_start=1485 + _LOTSNESTEDMESSAGE_B108._serialized_end=1491 + _LOTSNESTEDMESSAGE_B109._serialized_start=1493 + _LOTSNESTEDMESSAGE_B109._serialized_end=1499 + _LOTSNESTEDMESSAGE_B110._serialized_start=1501 + _LOTSNESTEDMESSAGE_B110._serialized_end=1507 + _LOTSNESTEDMESSAGE_B111._serialized_start=1509 + _LOTSNESTEDMESSAGE_B111._serialized_end=1515 + _LOTSNESTEDMESSAGE_B112._serialized_start=1517 + _LOTSNESTEDMESSAGE_B112._serialized_end=1523 + _LOTSNESTEDMESSAGE_B113._serialized_start=1525 + _LOTSNESTEDMESSAGE_B113._serialized_end=1531 + _LOTSNESTEDMESSAGE_B114._serialized_start=1533 + _LOTSNESTEDMESSAGE_B114._serialized_end=1539 + _LOTSNESTEDMESSAGE_B115._serialized_start=1541 + _LOTSNESTEDMESSAGE_B115._serialized_end=1547 + _LOTSNESTEDMESSAGE_B116._serialized_start=1549 + _LOTSNESTEDMESSAGE_B116._serialized_end=1555 + _LOTSNESTEDMESSAGE_B117._serialized_start=1557 + _LOTSNESTEDMESSAGE_B117._serialized_end=1563 + _LOTSNESTEDMESSAGE_B118._serialized_start=1565 + _LOTSNESTEDMESSAGE_B118._serialized_end=1571 + _LOTSNESTEDMESSAGE_B119._serialized_start=1573 + _LOTSNESTEDMESSAGE_B119._serialized_end=1579 + _LOTSNESTEDMESSAGE_B120._serialized_start=1581 + _LOTSNESTEDMESSAGE_B120._serialized_end=1587 + _LOTSNESTEDMESSAGE_B121._serialized_start=1589 + _LOTSNESTEDMESSAGE_B121._serialized_end=1595 + _LOTSNESTEDMESSAGE_B122._serialized_start=1597 + _LOTSNESTEDMESSAGE_B122._serialized_end=1603 + _LOTSNESTEDMESSAGE_B123._serialized_start=1605 + _LOTSNESTEDMESSAGE_B123._serialized_end=1611 + _LOTSNESTEDMESSAGE_B124._serialized_start=1613 + _LOTSNESTEDMESSAGE_B124._serialized_end=1619 + _LOTSNESTEDMESSAGE_B125._serialized_start=1621 + _LOTSNESTEDMESSAGE_B125._serialized_end=1627 + _LOTSNESTEDMESSAGE_B126._serialized_start=1629 + _LOTSNESTEDMESSAGE_B126._serialized_end=1635 + _LOTSNESTEDMESSAGE_B127._serialized_start=1637 + _LOTSNESTEDMESSAGE_B127._serialized_end=1643 + _LOTSNESTEDMESSAGE_B128._serialized_start=1645 + _LOTSNESTEDMESSAGE_B128._serialized_end=1651 + _LOTSNESTEDMESSAGE_B129._serialized_start=1653 + _LOTSNESTEDMESSAGE_B129._serialized_end=1659 + _LOTSNESTEDMESSAGE_B130._serialized_start=1661 + _LOTSNESTEDMESSAGE_B130._serialized_end=1667 + _LOTSNESTEDMESSAGE_B131._serialized_start=1669 + _LOTSNESTEDMESSAGE_B131._serialized_end=1675 + _LOTSNESTEDMESSAGE_B132._serialized_start=1677 + _LOTSNESTEDMESSAGE_B132._serialized_end=1683 + _LOTSNESTEDMESSAGE_B133._serialized_start=1685 + _LOTSNESTEDMESSAGE_B133._serialized_end=1691 + _LOTSNESTEDMESSAGE_B134._serialized_start=1693 + _LOTSNESTEDMESSAGE_B134._serialized_end=1699 + _LOTSNESTEDMESSAGE_B135._serialized_start=1701 + _LOTSNESTEDMESSAGE_B135._serialized_end=1707 + _LOTSNESTEDMESSAGE_B136._serialized_start=1709 + _LOTSNESTEDMESSAGE_B136._serialized_end=1715 + _LOTSNESTEDMESSAGE_B137._serialized_start=1717 + _LOTSNESTEDMESSAGE_B137._serialized_end=1723 + _LOTSNESTEDMESSAGE_B138._serialized_start=1725 + _LOTSNESTEDMESSAGE_B138._serialized_end=1731 + _LOTSNESTEDMESSAGE_B139._serialized_start=1733 + _LOTSNESTEDMESSAGE_B139._serialized_end=1739 + _LOTSNESTEDMESSAGE_B140._serialized_start=1741 + _LOTSNESTEDMESSAGE_B140._serialized_end=1747 + _LOTSNESTEDMESSAGE_B141._serialized_start=1749 + _LOTSNESTEDMESSAGE_B141._serialized_end=1755 + _LOTSNESTEDMESSAGE_B142._serialized_start=1757 + _LOTSNESTEDMESSAGE_B142._serialized_end=1763 + _LOTSNESTEDMESSAGE_B143._serialized_start=1765 + _LOTSNESTEDMESSAGE_B143._serialized_end=1771 + _LOTSNESTEDMESSAGE_B144._serialized_start=1773 + _LOTSNESTEDMESSAGE_B144._serialized_end=1779 + _LOTSNESTEDMESSAGE_B145._serialized_start=1781 + _LOTSNESTEDMESSAGE_B145._serialized_end=1787 + _LOTSNESTEDMESSAGE_B146._serialized_start=1789 + _LOTSNESTEDMESSAGE_B146._serialized_end=1795 + _LOTSNESTEDMESSAGE_B147._serialized_start=1797 + _LOTSNESTEDMESSAGE_B147._serialized_end=1803 + _LOTSNESTEDMESSAGE_B148._serialized_start=1805 + _LOTSNESTEDMESSAGE_B148._serialized_end=1811 + _LOTSNESTEDMESSAGE_B149._serialized_start=1813 + _LOTSNESTEDMESSAGE_B149._serialized_end=1819 + _LOTSNESTEDMESSAGE_B150._serialized_start=1821 + _LOTSNESTEDMESSAGE_B150._serialized_end=1827 + _LOTSNESTEDMESSAGE_B151._serialized_start=1829 + _LOTSNESTEDMESSAGE_B151._serialized_end=1835 + _LOTSNESTEDMESSAGE_B152._serialized_start=1837 + _LOTSNESTEDMESSAGE_B152._serialized_end=1843 + _LOTSNESTEDMESSAGE_B153._serialized_start=1845 + _LOTSNESTEDMESSAGE_B153._serialized_end=1851 + _LOTSNESTEDMESSAGE_B154._serialized_start=1853 + _LOTSNESTEDMESSAGE_B154._serialized_end=1859 + _LOTSNESTEDMESSAGE_B155._serialized_start=1861 + _LOTSNESTEDMESSAGE_B155._serialized_end=1867 + _LOTSNESTEDMESSAGE_B156._serialized_start=1869 + _LOTSNESTEDMESSAGE_B156._serialized_end=1875 + _LOTSNESTEDMESSAGE_B157._serialized_start=1877 + _LOTSNESTEDMESSAGE_B157._serialized_end=1883 + _LOTSNESTEDMESSAGE_B158._serialized_start=1885 + _LOTSNESTEDMESSAGE_B158._serialized_end=1891 + _LOTSNESTEDMESSAGE_B159._serialized_start=1893 + _LOTSNESTEDMESSAGE_B159._serialized_end=1899 + _LOTSNESTEDMESSAGE_B160._serialized_start=1901 + _LOTSNESTEDMESSAGE_B160._serialized_end=1907 + _LOTSNESTEDMESSAGE_B161._serialized_start=1909 + _LOTSNESTEDMESSAGE_B161._serialized_end=1915 + _LOTSNESTEDMESSAGE_B162._serialized_start=1917 + _LOTSNESTEDMESSAGE_B162._serialized_end=1923 + _LOTSNESTEDMESSAGE_B163._serialized_start=1925 + _LOTSNESTEDMESSAGE_B163._serialized_end=1931 + _LOTSNESTEDMESSAGE_B164._serialized_start=1933 + _LOTSNESTEDMESSAGE_B164._serialized_end=1939 + _LOTSNESTEDMESSAGE_B165._serialized_start=1941 + _LOTSNESTEDMESSAGE_B165._serialized_end=1947 + _LOTSNESTEDMESSAGE_B166._serialized_start=1949 + _LOTSNESTEDMESSAGE_B166._serialized_end=1955 + _LOTSNESTEDMESSAGE_B167._serialized_start=1957 + _LOTSNESTEDMESSAGE_B167._serialized_end=1963 + _LOTSNESTEDMESSAGE_B168._serialized_start=1965 + _LOTSNESTEDMESSAGE_B168._serialized_end=1971 + _LOTSNESTEDMESSAGE_B169._serialized_start=1973 + _LOTSNESTEDMESSAGE_B169._serialized_end=1979 + _LOTSNESTEDMESSAGE_B170._serialized_start=1981 + _LOTSNESTEDMESSAGE_B170._serialized_end=1987 + _LOTSNESTEDMESSAGE_B171._serialized_start=1989 + _LOTSNESTEDMESSAGE_B171._serialized_end=1995 + _LOTSNESTEDMESSAGE_B172._serialized_start=1997 + _LOTSNESTEDMESSAGE_B172._serialized_end=2003 + _LOTSNESTEDMESSAGE_B173._serialized_start=2005 + _LOTSNESTEDMESSAGE_B173._serialized_end=2011 + _LOTSNESTEDMESSAGE_B174._serialized_start=2013 + _LOTSNESTEDMESSAGE_B174._serialized_end=2019 + _LOTSNESTEDMESSAGE_B175._serialized_start=2021 + _LOTSNESTEDMESSAGE_B175._serialized_end=2027 + _LOTSNESTEDMESSAGE_B176._serialized_start=2029 + _LOTSNESTEDMESSAGE_B176._serialized_end=2035 + _LOTSNESTEDMESSAGE_B177._serialized_start=2037 + _LOTSNESTEDMESSAGE_B177._serialized_end=2043 + _LOTSNESTEDMESSAGE_B178._serialized_start=2045 + _LOTSNESTEDMESSAGE_B178._serialized_end=2051 + _LOTSNESTEDMESSAGE_B179._serialized_start=2053 + _LOTSNESTEDMESSAGE_B179._serialized_end=2059 + _LOTSNESTEDMESSAGE_B180._serialized_start=2061 + _LOTSNESTEDMESSAGE_B180._serialized_end=2067 + _LOTSNESTEDMESSAGE_B181._serialized_start=2069 + _LOTSNESTEDMESSAGE_B181._serialized_end=2075 + _LOTSNESTEDMESSAGE_B182._serialized_start=2077 + _LOTSNESTEDMESSAGE_B182._serialized_end=2083 + _LOTSNESTEDMESSAGE_B183._serialized_start=2085 + _LOTSNESTEDMESSAGE_B183._serialized_end=2091 + _LOTSNESTEDMESSAGE_B184._serialized_start=2093 + _LOTSNESTEDMESSAGE_B184._serialized_end=2099 + _LOTSNESTEDMESSAGE_B185._serialized_start=2101 + _LOTSNESTEDMESSAGE_B185._serialized_end=2107 + _LOTSNESTEDMESSAGE_B186._serialized_start=2109 + _LOTSNESTEDMESSAGE_B186._serialized_end=2115 + _LOTSNESTEDMESSAGE_B187._serialized_start=2117 + _LOTSNESTEDMESSAGE_B187._serialized_end=2123 + _LOTSNESTEDMESSAGE_B188._serialized_start=2125 + _LOTSNESTEDMESSAGE_B188._serialized_end=2131 + _LOTSNESTEDMESSAGE_B189._serialized_start=2133 + _LOTSNESTEDMESSAGE_B189._serialized_end=2139 + _LOTSNESTEDMESSAGE_B190._serialized_start=2141 + _LOTSNESTEDMESSAGE_B190._serialized_end=2147 + _LOTSNESTEDMESSAGE_B191._serialized_start=2149 + _LOTSNESTEDMESSAGE_B191._serialized_end=2155 + _LOTSNESTEDMESSAGE_B192._serialized_start=2157 + _LOTSNESTEDMESSAGE_B192._serialized_end=2163 + _LOTSNESTEDMESSAGE_B193._serialized_start=2165 + _LOTSNESTEDMESSAGE_B193._serialized_end=2171 + _LOTSNESTEDMESSAGE_B194._serialized_start=2173 + _LOTSNESTEDMESSAGE_B194._serialized_end=2179 + _LOTSNESTEDMESSAGE_B195._serialized_start=2181 + _LOTSNESTEDMESSAGE_B195._serialized_end=2187 + _LOTSNESTEDMESSAGE_B196._serialized_start=2189 + _LOTSNESTEDMESSAGE_B196._serialized_end=2195 + _LOTSNESTEDMESSAGE_B197._serialized_start=2197 + _LOTSNESTEDMESSAGE_B197._serialized_end=2203 + _LOTSNESTEDMESSAGE_B198._serialized_start=2205 + _LOTSNESTEDMESSAGE_B198._serialized_end=2211 + _LOTSNESTEDMESSAGE_B199._serialized_start=2213 + _LOTSNESTEDMESSAGE_B199._serialized_end=2219 + _LOTSNESTEDMESSAGE_B200._serialized_start=2221 + _LOTSNESTEDMESSAGE_B200._serialized_end=2227 + _LOTSNESTEDMESSAGE_B201._serialized_start=2229 + _LOTSNESTEDMESSAGE_B201._serialized_end=2235 + _LOTSNESTEDMESSAGE_B202._serialized_start=2237 + _LOTSNESTEDMESSAGE_B202._serialized_end=2243 + _LOTSNESTEDMESSAGE_B203._serialized_start=2245 + _LOTSNESTEDMESSAGE_B203._serialized_end=2251 + _LOTSNESTEDMESSAGE_B204._serialized_start=2253 + _LOTSNESTEDMESSAGE_B204._serialized_end=2259 + _LOTSNESTEDMESSAGE_B205._serialized_start=2261 + _LOTSNESTEDMESSAGE_B205._serialized_end=2267 + _LOTSNESTEDMESSAGE_B206._serialized_start=2269 + _LOTSNESTEDMESSAGE_B206._serialized_end=2275 + _LOTSNESTEDMESSAGE_B207._serialized_start=2277 + _LOTSNESTEDMESSAGE_B207._serialized_end=2283 + _LOTSNESTEDMESSAGE_B208._serialized_start=2285 + _LOTSNESTEDMESSAGE_B208._serialized_end=2291 + _LOTSNESTEDMESSAGE_B209._serialized_start=2293 + _LOTSNESTEDMESSAGE_B209._serialized_end=2299 + _LOTSNESTEDMESSAGE_B210._serialized_start=2301 + _LOTSNESTEDMESSAGE_B210._serialized_end=2307 + _LOTSNESTEDMESSAGE_B211._serialized_start=2309 + _LOTSNESTEDMESSAGE_B211._serialized_end=2315 + _LOTSNESTEDMESSAGE_B212._serialized_start=2317 + _LOTSNESTEDMESSAGE_B212._serialized_end=2323 + _LOTSNESTEDMESSAGE_B213._serialized_start=2325 + _LOTSNESTEDMESSAGE_B213._serialized_end=2331 + _LOTSNESTEDMESSAGE_B214._serialized_start=2333 + _LOTSNESTEDMESSAGE_B214._serialized_end=2339 + _LOTSNESTEDMESSAGE_B215._serialized_start=2341 + _LOTSNESTEDMESSAGE_B215._serialized_end=2347 + _LOTSNESTEDMESSAGE_B216._serialized_start=2349 + _LOTSNESTEDMESSAGE_B216._serialized_end=2355 + _LOTSNESTEDMESSAGE_B217._serialized_start=2357 + _LOTSNESTEDMESSAGE_B217._serialized_end=2363 + _LOTSNESTEDMESSAGE_B218._serialized_start=2365 + _LOTSNESTEDMESSAGE_B218._serialized_end=2371 + _LOTSNESTEDMESSAGE_B219._serialized_start=2373 + _LOTSNESTEDMESSAGE_B219._serialized_end=2379 + _LOTSNESTEDMESSAGE_B220._serialized_start=2381 + _LOTSNESTEDMESSAGE_B220._serialized_end=2387 + _LOTSNESTEDMESSAGE_B221._serialized_start=2389 + _LOTSNESTEDMESSAGE_B221._serialized_end=2395 + _LOTSNESTEDMESSAGE_B222._serialized_start=2397 + _LOTSNESTEDMESSAGE_B222._serialized_end=2403 + _LOTSNESTEDMESSAGE_B223._serialized_start=2405 + _LOTSNESTEDMESSAGE_B223._serialized_end=2411 + _LOTSNESTEDMESSAGE_B224._serialized_start=2413 + _LOTSNESTEDMESSAGE_B224._serialized_end=2419 + _LOTSNESTEDMESSAGE_B225._serialized_start=2421 + _LOTSNESTEDMESSAGE_B225._serialized_end=2427 + _LOTSNESTEDMESSAGE_B226._serialized_start=2429 + _LOTSNESTEDMESSAGE_B226._serialized_end=2435 + _LOTSNESTEDMESSAGE_B227._serialized_start=2437 + _LOTSNESTEDMESSAGE_B227._serialized_end=2443 + _LOTSNESTEDMESSAGE_B228._serialized_start=2445 + _LOTSNESTEDMESSAGE_B228._serialized_end=2451 + _LOTSNESTEDMESSAGE_B229._serialized_start=2453 + _LOTSNESTEDMESSAGE_B229._serialized_end=2459 + _LOTSNESTEDMESSAGE_B230._serialized_start=2461 + _LOTSNESTEDMESSAGE_B230._serialized_end=2467 + _LOTSNESTEDMESSAGE_B231._serialized_start=2469 + _LOTSNESTEDMESSAGE_B231._serialized_end=2475 + _LOTSNESTEDMESSAGE_B232._serialized_start=2477 + _LOTSNESTEDMESSAGE_B232._serialized_end=2483 + _LOTSNESTEDMESSAGE_B233._serialized_start=2485 + _LOTSNESTEDMESSAGE_B233._serialized_end=2491 + _LOTSNESTEDMESSAGE_B234._serialized_start=2493 + _LOTSNESTEDMESSAGE_B234._serialized_end=2499 + _LOTSNESTEDMESSAGE_B235._serialized_start=2501 + _LOTSNESTEDMESSAGE_B235._serialized_end=2507 + _LOTSNESTEDMESSAGE_B236._serialized_start=2509 + _LOTSNESTEDMESSAGE_B236._serialized_end=2515 + _LOTSNESTEDMESSAGE_B237._serialized_start=2517 + _LOTSNESTEDMESSAGE_B237._serialized_end=2523 + _LOTSNESTEDMESSAGE_B238._serialized_start=2525 + _LOTSNESTEDMESSAGE_B238._serialized_end=2531 + _LOTSNESTEDMESSAGE_B239._serialized_start=2533 + _LOTSNESTEDMESSAGE_B239._serialized_end=2539 + _LOTSNESTEDMESSAGE_B240._serialized_start=2541 + _LOTSNESTEDMESSAGE_B240._serialized_end=2547 + _LOTSNESTEDMESSAGE_B241._serialized_start=2549 + _LOTSNESTEDMESSAGE_B241._serialized_end=2555 + _LOTSNESTEDMESSAGE_B242._serialized_start=2557 + _LOTSNESTEDMESSAGE_B242._serialized_end=2563 + _LOTSNESTEDMESSAGE_B243._serialized_start=2565 + _LOTSNESTEDMESSAGE_B243._serialized_end=2571 + _LOTSNESTEDMESSAGE_B244._serialized_start=2573 + _LOTSNESTEDMESSAGE_B244._serialized_end=2579 + _LOTSNESTEDMESSAGE_B245._serialized_start=2581 + _LOTSNESTEDMESSAGE_B245._serialized_end=2587 + _LOTSNESTEDMESSAGE_B246._serialized_start=2589 + _LOTSNESTEDMESSAGE_B246._serialized_end=2595 + _LOTSNESTEDMESSAGE_B247._serialized_start=2597 + _LOTSNESTEDMESSAGE_B247._serialized_end=2603 + _LOTSNESTEDMESSAGE_B248._serialized_start=2605 + _LOTSNESTEDMESSAGE_B248._serialized_end=2611 + _LOTSNESTEDMESSAGE_B249._serialized_start=2613 + _LOTSNESTEDMESSAGE_B249._serialized_end=2619 + _LOTSNESTEDMESSAGE_B250._serialized_start=2621 + _LOTSNESTEDMESSAGE_B250._serialized_end=2627 + _LOTSNESTEDMESSAGE_B251._serialized_start=2629 + _LOTSNESTEDMESSAGE_B251._serialized_end=2635 + _LOTSNESTEDMESSAGE_B252._serialized_start=2637 + _LOTSNESTEDMESSAGE_B252._serialized_end=2643 + _LOTSNESTEDMESSAGE_B253._serialized_start=2645 + _LOTSNESTEDMESSAGE_B253._serialized_end=2651 + _LOTSNESTEDMESSAGE_B254._serialized_start=2653 + _LOTSNESTEDMESSAGE_B254._serialized_end=2659 + _LOTSNESTEDMESSAGE_B255._serialized_start=2661 + _LOTSNESTEDMESSAGE_B255._serialized_end=2667 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/no_package_pb2.py b/MLPY/Lib/site-packages/google/protobuf/internal/no_package_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d46dee080a00d2c3368603375a20092ca2faeba8 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/no_package_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/internal/no_package.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)google/protobuf/internal/no_package.proto\";\n\x10NoPackageMessage\x12\'\n\x0fno_package_enum\x18\x01 \x01(\x0e\x32\x0e.NoPackageEnum*?\n\rNoPackageEnum\x12\x16\n\x12NO_PACKAGE_VALUE_0\x10\x00\x12\x16\n\x12NO_PACKAGE_VALUE_1\x10\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.no_package_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _NOPACKAGEENUM._serialized_start=106 + _NOPACKAGEENUM._serialized_end=169 + _NOPACKAGEMESSAGE._serialized_start=45 + _NOPACKAGEMESSAGE._serialized_end=104 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/python_message.py b/MLPY/Lib/site-packages/google/protobuf/internal/python_message.py new file mode 100644 index 0000000000000000000000000000000000000000..00a1b97201f1acdf54dbd9faee217de91585edea --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/python_message.py @@ -0,0 +1,1539 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. +# +# TODO(robinson): Helpers for verbose, common checks like seeing if a +# descriptor's cpp_type is CPPTYPE_MESSAGE. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +from io import BytesIO +import struct +import sys +import weakref + +# We use "as" to avoid name collisions with variables. +from google.protobuf.internal import api_implementation +from google.protobuf.internal import containers +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import extension_dict +from google.protobuf.internal import message_listener as message_listener_mod +from google.protobuf.internal import type_checkers +from google.protobuf.internal import well_known_types +from google.protobuf.internal import wire_format +from google.protobuf import descriptor as descriptor_mod +from google.protobuf import message as message_mod +from google.protobuf import text_format + +_FieldDescriptor = descriptor_mod.FieldDescriptor +_AnyFullTypeName = 'google.protobuf.Any' +_ExtensionDict = extension_dict._ExtensionDict + +class GeneratedProtocolMessageType(type): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + We add implementations for all methods described in the Message class. We + also create properties to allow getting/setting all fields in the protocol + message. Finally, we create slots to prevent users from accidentally + "setting" nonexistent fields in the protocol message, which then wouldn't get + serialized / deserialized properly. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __new__(cls, name, bases, dictionary): + """Custom allocation for runtime-generated class types. + + We override __new__ because this is apparently the only place + where we can meaningfully set __slots__ on the class we're creating(?). + (The interplay between metaclasses and slots is not very well-documented). + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + + Returns: + Newly-allocated class. + + Raises: + RuntimeError: Generated code only work with python cpp extension. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + if isinstance(descriptor, str): + raise RuntimeError('The generated code only work with python cpp ' + 'extension, but it is using pure python runtime.') + + # If a concrete class already exists for this descriptor, don't try to + # create another. Doing so will break any messages that already exist with + # the existing class. + # + # The C++ implementation appears to have its own internal `PyMessageFactory` + # to achieve similar results. + # + # This most commonly happens in `text_format.py` when using descriptors from + # a custom pool; it calls symbol_database.Global().getPrototype() on a + # descriptor which already has an existing concrete class. + new_class = getattr(descriptor, '_concrete_class', None) + if new_class: + return new_class + + if descriptor.full_name in well_known_types.WKTBASES: + bases += (well_known_types.WKTBASES[descriptor.full_name],) + _AddClassAttributesForNestedExtensions(descriptor, dictionary) + _AddSlots(descriptor, dictionary) + + superclass = super(GeneratedProtocolMessageType, cls) + new_class = superclass.__new__(cls, name, bases, dictionary) + return new_class + + def __init__(cls, name, bases, dictionary): + """Here we perform the majority of our work on the class. + We add enum getters, an __init__ method, implementations + of all Message methods, and properties for all fields + in the protocol type. + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + # If this is an _existing_ class looked up via `_concrete_class` in the + # __new__ method above, then we don't need to re-initialize anything. + existing_class = getattr(descriptor, '_concrete_class', None) + if existing_class: + assert existing_class is cls, ( + 'Duplicate `GeneratedProtocolMessageType` created for descriptor %r' + % (descriptor.full_name)) + return + + cls._decoders_by_tag = {} + if (descriptor.has_options and + descriptor.GetOptions().message_set_wire_format): + cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( + decoder.MessageSetItemDecoder(descriptor), None) + + # Attach stuff to each FieldDescriptor for quick lookup later on. + for field in descriptor.fields: + _AttachFieldHelpers(cls, field) + + descriptor._concrete_class = cls # pylint: disable=protected-access + _AddEnumValues(descriptor, cls) + _AddInitMethod(descriptor, cls) + _AddPropertiesForFields(descriptor, cls) + _AddPropertiesForExtensions(descriptor, cls) + _AddStaticMethods(cls) + _AddMessageMethods(descriptor, cls) + _AddPrivateHelperMethods(descriptor, cls) + + superclass = super(GeneratedProtocolMessageType, cls) + superclass.__init__(name, bases, dictionary) + + +# Stateless helpers for GeneratedProtocolMessageType below. +# Outside clients should not access these directly. +# +# I opted not to make any of these methods on the metaclass, to make it more +# clear that I'm not really using any state there and to keep clients from +# thinking that they have direct access to these construction helpers. + + +def _PropertyName(proto_field_name): + """Returns the name of the public property attribute which + clients can use to get and (in some cases) set the value + of a protocol message field. + + Args: + proto_field_name: The protocol message field name, exactly + as it appears (or would appear) in a .proto file. + """ + # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. + # nnorwitz makes my day by writing: + # """ + # FYI. See the keyword module in the stdlib. This could be as simple as: + # + # if keyword.iskeyword(proto_field_name): + # return proto_field_name + "_" + # return proto_field_name + # """ + # Kenton says: The above is a BAD IDEA. People rely on being able to use + # getattr() and setattr() to reflectively manipulate field values. If we + # rename the properties, then every such user has to also make sure to apply + # the same transformation. Note that currently if you name a field "yield", + # you can still access it just fine using getattr/setattr -- it's not even + # that cumbersome to do so. + # TODO(kenton): Remove this method entirely if/when everyone agrees with my + # position. + return proto_field_name + + +def _AddSlots(message_descriptor, dictionary): + """Adds a __slots__ entry to dictionary, containing the names of all valid + attributes for this message type. + + Args: + message_descriptor: A Descriptor instance describing this message type. + dictionary: Class dictionary to which we'll add a '__slots__' entry. + """ + dictionary['__slots__'] = ['_cached_byte_size', + '_cached_byte_size_dirty', + '_fields', + '_unknown_fields', + '_unknown_field_set', + '_is_present_in_parent', + '_listener', + '_listener_for_children', + '__weakref__', + '_oneofs'] + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == _FieldDescriptor.TYPE_MESSAGE and + field.label == _FieldDescriptor.LABEL_OPTIONAL) + + +def _IsMapField(field): + return (field.type == _FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def _IsMessageMapField(field): + value_type = field.message_type.fields_by_name['value'] + return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE + + +def _AttachFieldHelpers(cls, field_descriptor): + is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) + is_packable = (is_repeated and + wire_format.IsTypePackable(field_descriptor.type)) + is_proto3 = field_descriptor.containing_type.syntax == 'proto3' + if not is_packable: + is_packed = False + elif field_descriptor.containing_type.syntax == 'proto2': + is_packed = (field_descriptor.has_options and + field_descriptor.GetOptions().packed) + else: + has_packed_false = (field_descriptor.has_options and + field_descriptor.GetOptions().HasField('packed') and + field_descriptor.GetOptions().packed == False) + is_packed = not has_packed_false + is_map_entry = _IsMapField(field_descriptor) + + if is_map_entry: + field_encoder = encoder.MapEncoder(field_descriptor) + sizer = encoder.MapSizer(field_descriptor, + _IsMessageMapField(field_descriptor)) + elif _IsMessageSetExtension(field_descriptor): + field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) + sizer = encoder.MessageSetItemSizer(field_descriptor.number) + else: + field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + + field_descriptor._encoder = field_encoder + field_descriptor._sizer = sizer + field_descriptor._default_constructor = _DefaultValueConstructorForField( + field_descriptor) + + def AddDecoder(wiretype, is_packed): + tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) + decode_type = field_descriptor.type + if (decode_type == _FieldDescriptor.TYPE_ENUM and + type_checkers.SupportsOpenEnums(field_descriptor)): + decode_type = _FieldDescriptor.TYPE_INT32 + + oneof_descriptor = None + clear_if_default = False + if field_descriptor.containing_oneof is not None: + oneof_descriptor = field_descriptor + elif (is_proto3 and not is_repeated and + field_descriptor.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE): + clear_if_default = True + + if is_map_entry: + is_message_map = _IsMessageMapField(field_descriptor) + + field_decoder = decoder.MapDecoder( + field_descriptor, _GetInitializeDefaultForMap(field_descriptor), + is_message_map) + elif decode_type == _FieldDescriptor.TYPE_STRING: + field_decoder = decoder.StringDecoder( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + elif field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor) + else: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + # pylint: disable=protected-access + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + + cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor) + + AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], + False) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) + + +def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + + +def _AddEnumValues(descriptor, cls): + """Sets class-level attributes for all enum fields defined in this message. + + Also exporting a class-level object that can name enum values. + + Args: + descriptor: Descriptor object for this message type. + cls: Class we're constructing for this message type. + """ + for enum_type in descriptor.enum_types: + setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) + for enum_value in enum_type.values: + setattr(cls, enum_value.name, enum_value.number) + + +def _GetInitializeDefaultForMap(field): + if field.label != _FieldDescriptor.LABEL_REPEATED: + raise ValueError('map_entry set on non-repeated field %s' % ( + field.name)) + fields_by_name = field.message_type.fields_by_name + key_checker = type_checkers.GetTypeChecker(fields_by_name['key']) + + value_field = fields_by_name['value'] + if _IsMessageMapField(field): + def MakeMessageMapDefault(message): + return containers.MessageMap( + message._listener_for_children, value_field.message_type, key_checker, + field.message_type) + return MakeMessageMapDefault + else: + value_checker = type_checkers.GetTypeChecker(value_field) + def MakePrimitiveMapDefault(message): + return containers.ScalarMap( + message._listener_for_children, key_checker, value_checker, + field.message_type) + return MakePrimitiveMapDefault + +def _DefaultValueConstructorForField(field): + """Returns a function which returns a default value for a field. + + Args: + field: FieldDescriptor object for this field. + + The returned function has one argument: + message: Message instance containing this field, or a weakref proxy + of same. + + That function in turn returns a default value for this field. The default + value may refer back to |message| via a weak reference. + """ + + if _IsMapField(field): + return _GetInitializeDefaultForMap(field) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + if field.has_default_value and field.default_value != []: + raise ValueError('Repeated field default value not empty list: %s' % ( + field.default_value)) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # We can't look at _concrete_class yet since it might not have + # been set. (Depends on order in which we initialize the classes). + message_type = field.message_type + def MakeRepeatedMessageDefault(message): + return containers.RepeatedCompositeFieldContainer( + message._listener_for_children, field.message_type) + return MakeRepeatedMessageDefault + else: + type_checker = type_checkers.GetTypeChecker(field) + def MakeRepeatedScalarDefault(message): + return containers.RepeatedScalarFieldContainer( + message._listener_for_children, type_checker) + return MakeRepeatedScalarDefault + + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # _concrete_class may not yet be initialized. + message_type = field.message_type + def MakeSubMessageDefault(message): + assert getattr(message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (field.full_name, message_type.full_name)) + result = message_type._concrete_class() + result._SetListener( + _OneofListener(message, field) + if field.containing_oneof is not None + else message._listener_for_children) + return result + return MakeSubMessageDefault + + def MakeScalarDefault(message): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return field.default_value + return MakeScalarDefault + + +def _ReraiseTypeErrorWithFieldName(message_name, field_name): + """Re-raise the currently-handled TypeError with the field name added.""" + exc = sys.exc_info()[1] + if len(exc.args) == 1 and type(exc) is TypeError: + # simple TypeError; add field name to exception message + exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) + + # re-raise possibly-amended exception with original traceback: + raise exc.with_traceback(sys.exc_info()[2]) + + +def _AddInitMethod(message_descriptor, cls): + """Adds an __init__ method to cls.""" + + def _GetIntegerEnumValue(enum_type, value): + """Convert a string or integer enum value to an integer. + + If the value is a string, it is converted to the enum value in + enum_type with the same name. If the value is not a string, it's + returned as-is. (No conversion or bounds-checking is done.) + """ + if isinstance(value, str): + try: + return enum_type.values_by_name[value].number + except KeyError: + raise ValueError('Enum type %s: unknown label "%s"' % ( + enum_type.full_name, value)) + return value + + def init(self, **kwargs): + self._cached_byte_size = 0 + self._cached_byte_size_dirty = len(kwargs) > 0 + self._fields = {} + # Contains a mapping from oneof field descriptors to the descriptor + # of the currently set field in that oneof field. + self._oneofs = {} + + # _unknown_fields is () when empty for efficiency, and will be turned into + # a list if fields are added. + self._unknown_fields = () + # _unknown_field_set is None when empty for efficiency, and will be + # turned into UnknownFieldSet struct if fields are added. + self._unknown_field_set = None # pylint: disable=protected-access + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) + for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % + (message_descriptor.name, field_name)) + if field_value is None: + # field=None is the same as no field at all. + continue + if field.label == _FieldDescriptor.LABEL_REPEATED: + copy = field._default_constructor(self) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite + if _IsMapField(field): + if _IsMessageMapField(field): + for key in field_value: + copy[key].MergeFrom(field_value[key]) + else: + copy.update(field_value) + else: + for val in field_value: + if isinstance(val, dict): + copy.add(**val) + else: + copy.add().MergeFrom(val) + else: # Scalar + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = [_GetIntegerEnumValue(field.enum_type, val) + for val in field_value] + copy.extend(field_value) + self._fields[field] = copy + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + copy = field._default_constructor(self) + new_val = field_value + if isinstance(field_value, dict): + new_val = field.message_type._concrete_class(**field_value) + try: + copy.MergeFrom(new_val) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + self._fields[field] = copy + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = _GetIntegerEnumValue(field.enum_type, field_value) + try: + setattr(self, field_name, field_value) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + + init.__module__ = None + init.__doc__ = None + cls.__init__ = init + + +def _GetFieldByName(message_descriptor, field_name): + """Returns a field descriptor by field name. + + Args: + message_descriptor: A Descriptor describing all fields in message. + field_name: The name of the field to retrieve. + Returns: + The field descriptor associated with the field name. + """ + try: + return message_descriptor.fields_by_name[field_name] + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + +def _AddPropertiesForFields(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + for field in descriptor.fields: + _AddPropertiesForField(field, cls) + + if descriptor.is_extendable: + # _ExtensionDict is just an adaptor with no state so we allocate a new one + # every time it is accessed. + cls.Extensions = property(lambda self: _ExtensionDict(self)) + + +def _AddPropertiesForField(field, cls): + """Adds a public property for a protocol message field. + Clients can use this property to get and (in the case + of non-repeated scalar fields) directly set the value + of a protocol message field. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # Catch it if we add other types that we should + # handle specially here. + assert _FieldDescriptor.MAX_CPPTYPE == 10 + + constant_name = field.name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, field.number) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + _AddPropertiesForRepeatedField(field, cls) + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + _AddPropertiesForNonRepeatedCompositeField(field, cls) + else: + _AddPropertiesForNonRepeatedScalarField(field, cls) + + +class _FieldProperty(property): + __slots__ = ('DESCRIPTOR',) + + def __init__(self, descriptor, getter, setter, doc): + property.__init__(self, getter, setter, doc=doc) + self.DESCRIPTOR = descriptor + + +def _AddPropertiesForRepeatedField(field, cls): + """Adds a public property for a "repeated" protocol message field. Clients + can use this property to get the value of the field, which will be either a + RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see + below). + + Note that when clients add values to these containers, we perform + type-checking in the case of repeated scalar fields, and we also set any + necessary "has" bits as a side-effect. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to repeated field ' + '"%s" in protocol message object.' % proto_field_name) + + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedScalarField(field, cls): + """Adds a public property for a nonrepeated, scalar protocol message field. + Clients can use this property to get and directly set the value of the field. + Note that when the client sets the value of a field by using this property, + all necessary "has" bits are set as a side-effect, and we also perform + type-checking. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + type_checker = type_checkers.GetTypeChecker(field) + default_value = field.default_value + is_proto3 = field.containing_type.syntax == 'proto3' + + def getter(self): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return self._fields.get(field, default_value) + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + clear_when_set_to_default = is_proto3 and not field.containing_oneof + + def field_setter(self, new_value): + # pylint: disable=protected-access + # Testing the value for truthiness captures all of the proto3 defaults + # (0, 0.0, enum 0, and False). + try: + new_value = type_checker.CheckValue(new_value) + except TypeError as e: + raise TypeError( + 'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e)) + if clear_when_set_to_default and not new_value: + self._fields.pop(field, None) + else: + self._fields[field] = new_value + # Check _cached_byte_size_dirty inline to improve performance, since scalar + # setters are called frequently. + if not self._cached_byte_size_dirty: + self._Modified() + + if field.containing_oneof: + def setter(self, new_value): + field_setter(self, new_value) + self._UpdateOneofState(field) + else: + setter = field_setter + + setter.__module__ = None + setter.__doc__ = 'Setter for %s.' % proto_field_name + + # Add a property to encapsulate the getter/setter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedCompositeField(field, cls): + """Adds a public property for a nonrepeated, composite protocol message field. + A composite field is a "group" or "message" field. + + Clients can use this property to get the value of the field, but cannot + assign to the property directly. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # TODO(robinson): Remove duplication with similar method + # for non-repeated scalars. + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to composite field ' + '"%s" in protocol message object.' % proto_field_name) + + # Add a property to encapsulate the getter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + + # TODO(amauryfa): Migrate all users of these attributes to functions like + # pool.FindExtensionByNumber(descriptor). + if descriptor.file is not None: + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + pool = descriptor.file.pool + cls._extensions_by_number = pool._extensions_by_number[descriptor] + cls._extensions_by_name = pool._extensions_by_name[descriptor] + +def _AddStaticMethods(cls): + # TODO(robinson): This probably needs to be thread-safe(?) + def RegisterExtension(extension_handle): + extension_handle.containing_type = cls.DESCRIPTOR + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + # pylint: disable=protected-access + cls.DESCRIPTOR.file.pool._AddExtensionDescriptor(extension_handle) + _AttachFieldHelpers(cls, extension_handle) + cls.RegisterExtension = staticmethod(RegisterExtension) + + def FromString(s): + message = cls() + message.MergeFromString(s) + return message + cls.FromString = staticmethod(FromString) + + +def _IsPresent(item): + """Given a (FieldDescriptor, value) tuple from _fields, return true if the + value should be included in the list returned by ListFields().""" + + if item[0].label == _FieldDescriptor.LABEL_REPEATED: + return bool(item[1]) + elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + return item[1]._is_present_in_parent + else: + return True + + +def _AddListFieldsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ListFields(self): + all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + + cls.ListFields = ListFields + +_PROTO3_ERROR_TEMPLATE = \ + ('Protocol message %s has no non-repeated submessage field "%s" ' + 'nor marked as optional') +_PROTO2_ERROR_TEMPLATE = 'Protocol message %s has no non-repeated field "%s"' + +def _AddHasFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + is_proto3 = (message_descriptor.syntax == "proto3") + error_msg = _PROTO3_ERROR_TEMPLATE if is_proto3 else _PROTO2_ERROR_TEMPLATE + + hassable_fields = {} + for field in message_descriptor.fields: + if field.label == _FieldDescriptor.LABEL_REPEATED: + continue + # For proto3, only submessages and fields inside a oneof have presence. + if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and + not field.containing_oneof): + continue + hassable_fields[field.name] = field + + # Has methods are supported for oneof descriptors. + for oneof in message_descriptor.oneofs: + hassable_fields[oneof.name] = oneof + + def HasField(self, field_name): + try: + field = hassable_fields[field_name] + except KeyError: + raise ValueError(error_msg % (message_descriptor.full_name, field_name)) + + if isinstance(field, descriptor_mod.OneofDescriptor): + try: + return HasField(self, self._oneofs[field].name) + except KeyError: + return False + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field) + return value is not None and value._is_present_in_parent + else: + return field in self._fields + + cls.HasField = HasField + + +def _AddClearFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def ClearField(self, field_name): + try: + field = message_descriptor.fields_by_name[field_name] + except KeyError: + try: + field = message_descriptor.oneofs_by_name[field_name] + if field in self._oneofs: + field = self._oneofs[field] + else: + return + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + if field in self._fields: + # To match the C++ implementation, we need to invalidate iterators + # for map fields when ClearField() happens. + if hasattr(self._fields[field], 'InvalidateIterators'): + self._fields[field].InvalidateIterators() + + # Note: If the field is a sub-message, its listener will still point + # at us. That's fine, because the worst than can happen is that it + # will call _Modified() and invalidate our byte size. Big deal. + del self._fields[field] + + if self._oneofs.get(field.containing_oneof, None) is field: + del self._oneofs[field.containing_oneof] + + # Always call _Modified() -- even if nothing was changed, this is + # a mutating method, and thus calling it should cause the field to become + # present in the parent message. + self._Modified() + + cls.ClearField = ClearField + + +def _AddClearExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def ClearExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + + # Similar to ClearField(), above. + if extension_handle in self._fields: + del self._fields[extension_handle] + self._Modified() + cls.ClearExtension = ClearExtension + + +def _AddHasExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def HasExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: + raise KeyError('"%s" is repeated.' % extension_handle.full_name) + + if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(extension_handle) + return value is not None and value._is_present_in_parent + else: + return extension_handle in self._fields + cls.HasExtension = HasExtension + +def _InternalUnpackAny(msg): + """Unpacks Any message and returns the unpacked message. + + This internal method is different from public Any Unpack method which takes + the target message as argument. _InternalUnpackAny method does not have + target message type and need to find the message type in descriptor pool. + + Args: + msg: An Any message to be unpacked. + + Returns: + The unpacked message. + """ + # TODO(amauryfa): Don't use the factory of generated messages. + # To make Any work with custom factories, use the message factory of the + # parent message. + # pylint: disable=g-import-not-at-top + from google.protobuf import symbol_database + factory = symbol_database.Default() + + type_url = msg.type_url + + if not type_url: + return None + + # TODO(haberman): For now we just strip the hostname. Better logic will be + # required. + type_name = type_url.split('/')[-1] + descriptor = factory.pool.FindMessageTypeByName(type_name) + + if descriptor is None: + return None + + message_class = factory.GetPrototype(descriptor) + message = message_class() + + message.ParseFromString(msg.value) + return message + + +def _AddEqualsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __eq__(self, other): + if (not isinstance(other, message_mod.Message) or + other.DESCRIPTOR != self.DESCRIPTOR): + return False + + if self is other: + return True + + if self.DESCRIPTOR.full_name == _AnyFullTypeName: + any_a = _InternalUnpackAny(self) + any_b = _InternalUnpackAny(other) + if any_a and any_b: + return any_a == any_b + + if not self.ListFields() == other.ListFields(): + return False + + # TODO(jieluo): Fix UnknownFieldSet to consider MessageSet extensions, + # then use it for the comparison. + unknown_fields = list(self._unknown_fields) + unknown_fields.sort() + other_unknown_fields = list(other._unknown_fields) + other_unknown_fields.sort() + return unknown_fields == other_unknown_fields + + cls.__eq__ = __eq__ + + +def _AddStrMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __str__(self): + return text_format.MessageToString(self) + cls.__str__ = __str__ + + +def _AddReprMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __repr__(self): + return text_format.MessageToString(self) + cls.__repr__ = __repr__ + + +def _AddUnicodeMethod(unused_message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def __unicode__(self): + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + cls.__unicode__ = __unicode__ + + +def _BytesForNonRepeatedElement(value, field_number, field_type): + """Returns the number of bytes needed to serialize a non-repeated element. + The returned byte count includes space for tag information and any + other additional space associated with serializing value. + + Args: + value: Value we're serializing. + field_number: Field number of this value. (Since the field number + is stored as part of a varint-encoded tag, this has an impact + on the total bytes required to serialize the value). + field_type: The type of the field. One of the TYPE_* constants + within FieldDescriptor. + """ + try: + fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] + return fn(field_number, value) + except KeyError: + raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) + + +def _AddByteSizeMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ByteSize(self): + if not self._cached_byte_size_dirty: + return self._cached_byte_size + + size = 0 + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + size = descriptor.fields_by_name['key']._sizer(self.key) + size += descriptor.fields_by_name['value']._sizer(self.value) + else: + for field_descriptor, field_value in self.ListFields(): + size += field_descriptor._sizer(field_value) + for tag_bytes, value_bytes in self._unknown_fields: + size += len(tag_bytes) + len(value_bytes) + + self._cached_byte_size = size + self._cached_byte_size_dirty = False + self._listener_for_children.dirty = False + return size + + cls.ByteSize = ByteSize + + +def _AddSerializeToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializeToString(self, **kwargs): + # Check if the message has all of its required fields set. + if not self.IsInitialized(): + raise message_mod.EncodeError( + 'Message %s is missing required fields: %s' % ( + self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) + return self.SerializePartialToString(**kwargs) + cls.SerializeToString = SerializeToString + + +def _AddSerializePartialToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializePartialToString(self, **kwargs): + out = BytesIO() + self._InternalSerialize(out.write, **kwargs) + return out.getvalue() + cls.SerializePartialToString = SerializePartialToString + + def InternalSerialize(self, write_bytes, deterministic=None): + if deterministic is None: + deterministic = ( + api_implementation.IsPythonDefaultSerializationDeterministic()) + else: + deterministic = bool(deterministic) + + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + descriptor.fields_by_name['key']._encoder( + write_bytes, self.key, deterministic) + descriptor.fields_by_name['value']._encoder( + write_bytes, self.value, deterministic) + else: + for field_descriptor, field_value in self.ListFields(): + field_descriptor._encoder(write_bytes, field_value, deterministic) + for tag_bytes, value_bytes in self._unknown_fields: + write_bytes(tag_bytes) + write_bytes(value_bytes) + cls._InternalSerialize = InternalSerialize + + +def _AddMergeFromStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def MergeFromString(self, serialized): + serialized = memoryview(serialized) + length = len(serialized) + try: + if self._InternalParse(serialized, 0, length) != length: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise message_mod.DecodeError('Unexpected end-group tag.') + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') + except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString + + local_ReadTag = decoder.ReadTag + local_SkipField = decoder.SkipField + decoders_by_tag = cls._decoders_by_tag + + def InternalParse(self, buffer, pos, end): + """Create a message from serialized bytes. + + Args: + self: Message, instance of the proto message object. + buffer: memoryview of the serialized data. + pos: int, position to start in the serialized data. + end: int, end position of the serialized data. + + Returns: + Message object. + """ + # Guard against internal misuse, since this function is called internally + # quite extensively, and its easy to accidentally pass bytes. + assert isinstance(buffer, memoryview) + self._Modified() + field_dict = self._fields + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + while pos != end: + (tag_bytes, new_pos) = local_ReadTag(buffer, pos) + field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) + if field_decoder is None: + if not self._unknown_fields: # pylint: disable=protected-access + self._unknown_fields = [] # pylint: disable=protected-access + if unknown_field_set is None: + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise message_mod.DecodeError('Field number 0 is illegal.') + # TODO(jieluo): remove old_pos. + old_pos = new_pos + (data, new_pos) = decoder._DecodeUnknownField( + buffer, new_pos, wire_type) # pylint: disable=protected-access + if new_pos == -1: + return pos + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + # TODO(jieluo): remove _unknown_fields. + new_pos = local_SkipField(buffer, old_pos, end, tag_bytes) + if new_pos == -1: + return pos + self._unknown_fields.append( + (tag_bytes, buffer[old_pos:new_pos].tobytes())) + pos = new_pos + else: + pos = field_decoder(buffer, new_pos, end, self, field_dict) + if field_desc: + self._UpdateOneofState(field_desc) + return pos + cls._InternalParse = InternalParse + + +def _AddIsInitializedMethod(message_descriptor, cls): + """Adds the IsInitialized and FindInitializationError methods to the + protocol message class.""" + + required_fields = [field for field in message_descriptor.fields + if field.label == _FieldDescriptor.LABEL_REQUIRED] + + def IsInitialized(self, errors=None): + """Checks if all required fields of a message are set. + + Args: + errors: A list which, if provided, will be populated with the field + paths of all missing required fields. + + Returns: + True iff the specified message has all required fields set. + """ + + # Performance is critical so we avoid HasField() and ListFields(). + + for field in required_fields: + if (field not in self._fields or + (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and + not self._fields[field]._is_present_in_parent)): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + for field, value in list(self._fields.items()): # dict can change size! + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.label == _FieldDescriptor.LABEL_REPEATED: + if (field.message_type.has_options and + field.message_type.GetOptions().map_entry): + continue + for element in value: + if not element.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + elif value._is_present_in_parent and not value.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + return True + + cls.IsInitialized = IsInitialized + + def FindInitializationErrors(self): + """Finds required fields which are not initialized. + + Returns: + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + """ + + errors = [] # simplify things + + for field in required_fields: + if not self.HasField(field.name): + errors.append(field.name) + + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + name = '(%s)' % field.full_name + else: + name = field.name + + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + element = value[key] + prefix = '%s[%s].' % (name, key) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + # ScalarMaps can't have any initialization errors. + pass + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for i in range(len(value)): + element = value[i] + prefix = '%s[%d].' % (name, i) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + prefix = name + '.' + sub_errors = value.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + + return errors + + cls.FindInitializationErrors = FindInitializationErrors + + +def _FullyQualifiedClassName(klass): + module = klass.__module__ + name = getattr(klass, '__qualname__', klass.__name__) + if module in (None, 'builtins', '__builtin__'): + return name + return module + '.' + name + + +def _AddMergeFromMethod(cls): + LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED + CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE + + def MergeFrom(self, msg): + if not isinstance(msg, cls): + raise TypeError( + 'Parameter to MergeFrom() must be instance of same class: ' + 'expected %s got %s.' % (_FullyQualifiedClassName(cls), + _FullyQualifiedClassName(msg.__class__))) + + assert msg is not self + self._Modified() + + fields = self._fields + + for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + elif field.cpp_type == CPPTYPE_MESSAGE: + if value._is_present_in_parent: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + else: + self._fields[field] = value + if field.containing_oneof: + self._UpdateOneofState(field) + + if msg._unknown_fields: + if not self._unknown_fields: + self._unknown_fields = [] + self._unknown_fields.extend(msg._unknown_fields) + # pylint: disable=protected-access + if self._unknown_field_set is None: + self._unknown_field_set = containers.UnknownFieldSet() + self._unknown_field_set._extend(msg._unknown_field_set) + + cls.MergeFrom = MergeFrom + + +def _AddWhichOneofMethod(message_descriptor, cls): + def WhichOneof(self, oneof_name): + """Returns the name of the currently set field inside a oneof, or None.""" + try: + field = message_descriptor.oneofs_by_name[oneof_name] + except KeyError: + raise ValueError( + 'Protocol message has no oneof "%s" field.' % oneof_name) + + nested_field = self._oneofs.get(field, None) + if nested_field is not None and self.HasField(nested_field.name): + return nested_field.name + else: + return None + + cls.WhichOneof = WhichOneof + + +def _Clear(self): + # Clear fields. + self._fields = {} + self._unknown_fields = () + # pylint: disable=protected-access + if self._unknown_field_set is not None: + self._unknown_field_set._clear() + self._unknown_field_set = None + + self._oneofs = {} + self._Modified() + + +def _UnknownFields(self): + if self._unknown_field_set is None: # pylint: disable=protected-access + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + return self._unknown_field_set # pylint: disable=protected-access + + +def _DiscardUnknownFields(self): + self._unknown_fields = [] + self._unknown_field_set = None # pylint: disable=protected-access + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + value[key].DiscardUnknownFields() + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for sub_message in value: + sub_message.DiscardUnknownFields() + else: + value.DiscardUnknownFields() + + +def _SetListener(self, listener): + if listener is None: + self._listener = message_listener_mod.NullMessageListener() + else: + self._listener = listener + + +def _AddMessageMethods(message_descriptor, cls): + """Adds implementations of all Message methods to cls.""" + _AddListFieldsMethod(message_descriptor, cls) + _AddHasFieldMethod(message_descriptor, cls) + _AddClearFieldMethod(message_descriptor, cls) + if message_descriptor.is_extendable: + _AddClearExtensionMethod(cls) + _AddHasExtensionMethod(cls) + _AddEqualsMethod(message_descriptor, cls) + _AddStrMethod(message_descriptor, cls) + _AddReprMethod(message_descriptor, cls) + _AddUnicodeMethod(message_descriptor, cls) + _AddByteSizeMethod(message_descriptor, cls) + _AddSerializeToStringMethod(message_descriptor, cls) + _AddSerializePartialToStringMethod(message_descriptor, cls) + _AddMergeFromStringMethod(message_descriptor, cls) + _AddIsInitializedMethod(message_descriptor, cls) + _AddMergeFromMethod(cls) + _AddWhichOneofMethod(message_descriptor, cls) + # Adds methods which do not depend on cls. + cls.Clear = _Clear + cls.UnknownFields = _UnknownFields + cls.DiscardUnknownFields = _DiscardUnknownFields + cls._SetListener = _SetListener + + +def _AddPrivateHelperMethods(message_descriptor, cls): + """Adds implementation of private helper methods to cls.""" + + def Modified(self): + """Sets the _cached_byte_size_dirty bit to true, + and propagates this to our listener iff this was a state change. + """ + + # Note: Some callers check _cached_byte_size_dirty before calling + # _Modified() as an extra optimization. So, if this method is ever + # changed such that it does stuff even when _cached_byte_size_dirty is + # already true, the callers need to be updated. + if not self._cached_byte_size_dirty: + self._cached_byte_size_dirty = True + self._listener_for_children.dirty = True + self._is_present_in_parent = True + self._listener.Modified() + + def _UpdateOneofState(self, field): + """Sets field as the active field in its containing oneof. + + Will also delete currently active field in the oneof, if it is different + from the argument. Does not mark the message as modified. + """ + other_field = self._oneofs.setdefault(field.containing_oneof, field) + if other_field is not field: + del self._fields[other_field] + self._oneofs[field.containing_oneof] = field + + cls._Modified = Modified + cls.SetInParent = Modified + cls._UpdateOneofState = _UpdateOneofState + + +class _Listener(object): + + """MessageListener implementation that a parent message registers with its + child message. + + In order to support semantics like: + + foo.bar.baz.qux = 23 + assert foo.HasField('bar') + + ...child objects must have back references to their parents. + This helper class is at the heart of this support. + """ + + def __init__(self, parent_message): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + """ + # This listener establishes a back reference from a child (contained) object + # to its parent (containing) object. We make this a weak reference to avoid + # creating cyclic garbage when the client finishes with the 'parent' object + # in the tree. + if isinstance(parent_message, weakref.ProxyType): + self._parent_message_weakref = parent_message + else: + self._parent_message_weakref = weakref.proxy(parent_message) + + # As an optimization, we also indicate directly on the listener whether + # or not the parent message is dirty. This way we can avoid traversing + # up the tree in the common case. + self.dirty = False + + def Modified(self): + if self.dirty: + return + try: + # Propagate the signal to our parents iff this is the first field set. + self._parent_message_weakref._Modified() + except ReferenceError: + # We can get here if a client has kept a reference to a child object, + # and is now setting a field on it, but the child's parent has been + # garbage-collected. This is not an error. + pass + + +class _OneofListener(_Listener): + """Special listener implementation for setting composite oneof fields.""" + + def __init__(self, parent_message, field): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + field: The descriptor of the field being set in the parent message. + """ + super(_OneofListener, self).__init__(parent_message) + self._field = field + + def Modified(self): + """Also updates the state of the containing oneof in the parent message.""" + try: + self._parent_message_weakref._UpdateOneofState(self._field) + super(_OneofListener, self).Modified() + except ReferenceError: + pass diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/type_checkers.py b/MLPY/Lib/site-packages/google/protobuf/internal/type_checkers.py new file mode 100644 index 0000000000000000000000000000000000000000..5f8e54066a854cd492becab107d2649df0ede77b --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/type_checkers.py @@ -0,0 +1,435 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides type checking routines. + +This module defines type checking utilities in the forms of dictionaries: + +VALUE_CHECKERS: A dictionary of field types and a value validation object. +TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing + function. +TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization + function. +FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their + corresponding wire types. +TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization + function. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import ctypes +import numbers + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import descriptor + +_FieldDescriptor = descriptor.FieldDescriptor + + +def TruncateToFourByteFloat(original): + return ctypes.c_float(original).value + + +def ToShortestFloat(original): + """Returns the shortest float that has same value in wire.""" + # All 4 byte floats have between 6 and 9 significant digits, so we + # start with 6 as the lower bound. + # It has to be iterative because use '.9g' directly can not get rid + # of the noises for most values. For example if set a float_field=0.9 + # use '.9g' will print 0.899999976. + precision = 6 + rounded = float('{0:.{1}g}'.format(original, precision)) + while TruncateToFourByteFloat(rounded) != original: + precision += 1 + rounded = float('{0:.{1}g}'.format(original, precision)) + return rounded + + +def SupportsOpenEnums(field_descriptor): + return field_descriptor.containing_type.syntax == 'proto3' + + +def GetTypeChecker(field): + """Returns a type checker for a message field of the specified types. + + Args: + field: FieldDescriptor object for this field. + + Returns: + An instance of TypeChecker which can be used to verify the types + of values assigned to a field of the specified type. + """ + if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and + field.type == _FieldDescriptor.TYPE_STRING): + return UnicodeValueChecker() + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + if SupportsOpenEnums(field): + # When open enums are supported, any int32 can be assigned. + return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] + else: + return EnumValueChecker(field.enum_type) + return _VALUE_CHECKERS[field.cpp_type] + + +# None of the typecheckers below make any attempt to guard against people +# subclassing builtin types and doing weird things. We're not trying to +# protect against malicious clients here, just people accidentally shooting +# themselves in the foot in obvious ways. +class TypeChecker(object): + + """Type checker used to catch type errors as early as possible + when the client is setting scalar fields in protocol messages. + """ + + def __init__(self, *acceptable_types): + self._acceptable_types = acceptable_types + + def CheckValue(self, proposed_value): + """Type check the provided value and return it. + + The returned value might have been normalized to another type. + """ + if not isinstance(proposed_value, self._acceptable_types): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), self._acceptable_types)) + raise TypeError(message) + return proposed_value + + +class TypeCheckerWithDefault(TypeChecker): + + def __init__(self, default_value, *acceptable_types): + TypeChecker.__init__(self, *acceptable_types) + self._default_value = default_value + + def DefaultValue(self): + return self._default_value + + +class BoolValueChecker(object): + """Type checker used for bool fields.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bool, int))) + raise TypeError(message) + return bool(proposed_value) + + def DefaultValue(self): + return False + + +# IntValueChecker and its subclasses perform integer type-checks +# and bounds-checks. +class IntValueChecker(object): + + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + + if not self._MIN <= int(proposed_value) <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) + # We force all values to int to make alternate implementations where the + # distinction is more significant (e.g. the C++ implementation) simpler. + proposed_value = int(proposed_value) + return proposed_value + + def DefaultValue(self): + return 0 + + +class EnumValueChecker(object): + + """Checker used for enum fields. Performs type-check and range check.""" + + def __init__(self, enum_type): + self._enum_type = enum_type + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, numbers.Integral): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + if int(proposed_value) not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) + return proposed_value + + def DefaultValue(self): + return self._enum_type.values[0].number + + +class UnicodeValueChecker(object): + + """Checker used for string fields. + + Always returns a unicode value, even if the input is of type str. + """ + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, (bytes, str)): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bytes, str))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is valid UTF-8 data. + if isinstance(proposed_value, bytes): + try: + proposed_value = proposed_value.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 ' + 'encoding. Non-UTF-8 strings must be converted to ' + 'unicode objects before being added.' % + (proposed_value)) + else: + try: + proposed_value.encode('utf8') + except UnicodeEncodeError: + raise ValueError('%.1024r isn\'t a valid unicode string and ' + 'can\'t be encoded in UTF-8.'% + (proposed_value)) + + return proposed_value + + def DefaultValue(self): + return u"" + + +class Int32ValueChecker(IntValueChecker): + # We're sure to use ints instead of longs here since comparison may be more + # efficient. + _MIN = -2147483648 + _MAX = 2147483647 + + +class Uint32ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 32) - 1 + + +class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 + + +class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 + + +# The max 4 bytes float is about 3.4028234663852886e+38 +_FLOAT_MAX = float.fromhex('0x1.fffffep+127') +_FLOAT_MIN = -_FLOAT_MAX +_INF = float('inf') +_NEG_INF = float('-inf') + + +class DoubleValueChecker(object): + """Checker used for double fields. + + Performs type-check and range check. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + if (not hasattr(proposed_value, '__float__') and + not hasattr(proposed_value, '__index__')) or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: int, float' % + (proposed_value, type(proposed_value))) + raise TypeError(message) + return float(proposed_value) + + def DefaultValue(self): + return 0.0 + + +class FloatValueChecker(DoubleValueChecker): + """Checker used for float fields. + + Performs type-check and range check. + + Values exceeding a 32-bit float will be converted to inf/-inf. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + converted_value = super().CheckValue(proposed_value) + # This inf rounding matches the C++ proto SafeDoubleToFloat logic. + if converted_value > _FLOAT_MAX: + return _INF + if converted_value < _FLOAT_MIN: + return _NEG_INF + + return TruncateToFourByteFloat(converted_value) + +# Type-checkers for all scalar CPPTYPEs. +_VALUE_CHECKERS = { + _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), + _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: DoubleValueChecker(), + _FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(), + _FieldDescriptor.CPPTYPE_BOOL: BoolValueChecker(), + _FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes), +} + + +# Map from field type to a function F, such that F(field_num, value) +# gives the total byte size for a value of the given type. This +# byte size includes tag information and any other additional space +# associated with serializing "value". +TYPE_TO_BYTE_SIZE_FN = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, + _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, + _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, + _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, + _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, + _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, + _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, + _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, + _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, + _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, + _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, + _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, + _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, + _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, + _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, + _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, + _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, + _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize + } + + +# Maps from field types to encoder constructors. +TYPE_TO_ENCODER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, + _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, + _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, + _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, + _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, + _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, + _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, + _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, + } + + +# Maps from field types to sizer constructors. +TYPE_TO_SIZER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, + _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, + _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, + _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, + _FieldDescriptor.TYPE_STRING: encoder.StringSizer, + _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, + _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, + _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, + } + + +# Maps from field type to a decoder constructor. +TYPE_TO_DECODER = { + _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, + _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, + _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, + _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, + _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, + _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, + _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, + _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, + _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, + _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, + _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, + _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, + _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, + _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, + _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, + _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, + _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, + _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, + } + +# Maps from field type to expected wiretype. +FIELD_TYPE_TO_WIRE_TYPE = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_STRING: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, + _FieldDescriptor.TYPE_MESSAGE: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_BYTES: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, + } diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/well_known_types.py b/MLPY/Lib/site-packages/google/protobuf/internal/well_known_types.py new file mode 100644 index 0000000000000000000000000000000000000000..4e771b9421d9081881666ec197f4a30a7bec5c97 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/well_known_types.py @@ -0,0 +1,878 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains well known classes. + +This files defines well known classes which need extra maintenance including: + - Any + - Duration + - FieldMask + - Struct + - Timestamp +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +import calendar +import collections.abc +import datetime + +from google.protobuf.descriptor import FieldDescriptor + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_NANOS_PER_SECOND = 1000000000 +_NANOS_PER_MILLISECOND = 1000000 +_NANOS_PER_MICROSECOND = 1000 +_MILLIS_PER_SECOND = 1000 +_MICROS_PER_SECOND = 1000000 +_SECONDS_PER_DAY = 24 * 3600 +_DURATION_SECONDS_MAX = 315576000000 + + +class Any(object): + """Class for Any Message type.""" + + __slots__ = () + + def Pack(self, msg, type_url_prefix='type.googleapis.com/', + deterministic=None): + """Packs the specified message into current Any message.""" + if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': + self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + else: + self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + self.value = msg.SerializeToString(deterministic=deterministic) + + def Unpack(self, msg): + """Unpacks the current Any message into specified message.""" + descriptor = msg.DESCRIPTOR + if not self.Is(descriptor): + return False + msg.ParseFromString(self.value) + return True + + def TypeName(self): + """Returns the protobuf type name of the inner message.""" + # Only last part is to be used: b/25630112 + return self.type_url.split('/')[-1] + + def Is(self, descriptor): + """Checks if this Any represents the given protobuf type.""" + return '/' in self.type_url and self.TypeName() == descriptor.full_name + + +_EPOCH_DATETIME_NAIVE = datetime.datetime.utcfromtimestamp(0) +_EPOCH_DATETIME_AWARE = datetime.datetime.fromtimestamp( + 0, tz=datetime.timezone.utc) + + +class Timestamp(object): + """Class for Timestamp message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + nanos = self.nanos % _NANOS_PER_SECOND + total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND + seconds = total_sec % _SECONDS_PER_DAY + days = (total_sec - seconds) // _SECONDS_PER_DAY + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(days, seconds) + + result = dt.isoformat() + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 'Z' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03dZ' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06dZ' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09dZ' % nanos + + def FromJsonString(self, value): + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. + Example of accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Timestamp JSON value not a string: {!r}'.format(value)) + timezone_offset = value.find('Z') + if timezone_offset == -1: + timezone_offset = value.find('+') + if timezone_offset == -1: + timezone_offset = value.rfind('-') + if timezone_offset == -1: + raise ValueError( + 'Failed to parse timestamp: missing valid timezone offset.') + time_value = value[0:timezone_offset] + # Parse datetime and nanos. + point_position = time_value.find('.') + if point_position == -1: + second_value = time_value + nano_value = '' + else: + second_value = time_value[:point_position] + nano_value = time_value[point_position + 1:] + if 't' in second_value: + raise ValueError( + 'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', ' + 'lowercase \'t\' is not accepted'.format(second_value)) + date_object = datetime.datetime.strptime(second_value, _TIMESTAMPFOMAT) + td = date_object - datetime.datetime(1970, 1, 1) + seconds = td.seconds + td.days * _SECONDS_PER_DAY + if len(nano_value) > 9: + raise ValueError( + 'Failed to parse Timestamp: nanos {0} more than ' + '9 fractional digits.'.format(nano_value)) + if nano_value: + nanos = round(float('0.' + nano_value) * 1e9) + else: + nanos = 0 + # Parse timezone offsets. + if value[timezone_offset] == 'Z': + if len(value) != timezone_offset + 1: + raise ValueError('Failed to parse timestamp: invalid trailing' + ' data {0}.'.format(value)) + else: + timezone = value[timezone_offset:] + pos = timezone.find(':') + if pos == -1: + raise ValueError( + 'Invalid timezone offset value: {0}.'.format(timezone)) + if timezone[0] == '+': + seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + else: + seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + # Set seconds and nanos + self.seconds = int(seconds) + self.nanos = int(nanos) + + def GetCurrentTime(self): + """Get the current UTC into Timestamp.""" + self.FromDatetime(datetime.datetime.utcnow()) + + def ToNanoseconds(self): + """Converts Timestamp to nanoseconds since epoch.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts Timestamp to microseconds since epoch.""" + return (self.seconds * _MICROS_PER_SECOND + + self.nanos // _NANOS_PER_MICROSECOND) + + def ToMilliseconds(self): + """Converts Timestamp to milliseconds since epoch.""" + return (self.seconds * _MILLIS_PER_SECOND + + self.nanos // _NANOS_PER_MILLISECOND) + + def ToSeconds(self): + """Converts Timestamp to seconds since epoch.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds since epoch to Timestamp.""" + self.seconds = nanos // _NANOS_PER_SECOND + self.nanos = nanos % _NANOS_PER_SECOND + + def FromMicroseconds(self, micros): + """Converts microseconds since epoch to Timestamp.""" + self.seconds = micros // _MICROS_PER_SECOND + self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND + + def FromMilliseconds(self, millis): + """Converts milliseconds since epoch to Timestamp.""" + self.seconds = millis // _MILLIS_PER_SECOND + self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND + + def FromSeconds(self, seconds): + """Converts seconds since epoch to Timestamp.""" + self.seconds = seconds + self.nanos = 0 + + def ToDatetime(self, tzinfo=None): + """Converts Timestamp to a datetime. + + Args: + tzinfo: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + delta = datetime.timedelta( + seconds=self.seconds, + microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)) + if tzinfo is None: + return _EPOCH_DATETIME_NAIVE + delta + else: + return _EPOCH_DATETIME_AWARE.astimezone(tzinfo) + delta + + def FromDatetime(self, dt): + """Converts datetime to Timestamp. + + Args: + dt: A datetime. If it's timezone-naive, it's assumed to be in UTC. + """ + # Using this guide: http://wiki.python.org/moin/WorkingWithTime + # And this conversion guide: http://docs.python.org/library/time.html + + # Turn the date parameter into a tuple (struct_time) that can then be + # manipulated into a long value of seconds. During the conversion from + # struct_time to long, the source date in UTC, and so it follows that the + # correct transformation is calendar.timegm() + self.seconds = calendar.timegm(dt.utctimetuple()) + self.nanos = dt.microsecond * _NANOS_PER_MICROSECOND + + +class Duration(object): + """Class for Duration message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + _CheckDurationValid(self.seconds, self.nanos) + if self.seconds < 0 or self.nanos < 0: + result = '-' + seconds = - self.seconds + int((0 - self.nanos) // 1e9) + nanos = (0 - self.nanos) % 1e9 + else: + result = '' + seconds = self.seconds + int(self.nanos // 1e9) + nanos = self.nanos % 1e9 + result += '%d' % seconds + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 's' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03ds' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06ds' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09ds' % nanos + + def FromJsonString(self, value): + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Duration JSON value not a string: {!r}'.format(value)) + if len(value) < 1 or value[-1] != 's': + raise ValueError( + 'Duration must end with letter "s": {0}.'.format(value)) + try: + pos = value.find('.') + if pos == -1: + seconds = int(value[:-1]) + nanos = 0 + else: + seconds = int(value[:pos]) + if value[0] == '-': + nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) + else: + nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) + _CheckDurationValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + except ValueError as e: + raise ValueError( + 'Couldn\'t parse duration: {0} : {1}.'.format(value, e)) + + def ToNanoseconds(self): + """Converts a Duration to nanoseconds.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts a Duration to microseconds.""" + micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) + return self.seconds * _MICROS_PER_SECOND + micros + + def ToMilliseconds(self): + """Converts a Duration to milliseconds.""" + millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) + return self.seconds * _MILLIS_PER_SECOND + millis + + def ToSeconds(self): + """Converts a Duration to seconds.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds to Duration.""" + self._NormalizeDuration(nanos // _NANOS_PER_SECOND, + nanos % _NANOS_PER_SECOND) + + def FromMicroseconds(self, micros): + """Converts microseconds to Duration.""" + self._NormalizeDuration( + micros // _MICROS_PER_SECOND, + (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) + + def FromMilliseconds(self, millis): + """Converts milliseconds to Duration.""" + self._NormalizeDuration( + millis // _MILLIS_PER_SECOND, + (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) + + def FromSeconds(self, seconds): + """Converts seconds to Duration.""" + self.seconds = seconds + self.nanos = 0 + + def ToTimedelta(self): + """Converts Duration to timedelta.""" + return datetime.timedelta( + seconds=self.seconds, microseconds=_RoundTowardZero( + self.nanos, _NANOS_PER_MICROSECOND)) + + def FromTimedelta(self, td): + """Converts timedelta to Duration.""" + self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY, + td.microseconds * _NANOS_PER_MICROSECOND) + + def _NormalizeDuration(self, seconds, nanos): + """Set Duration by seconds and nanos.""" + # Force nanos to be negative if the duration is negative. + if seconds < 0 and nanos > 0: + seconds += 1 + nanos -= _NANOS_PER_SECOND + self.seconds = seconds + self.nanos = nanos + + +def _CheckDurationValid(seconds, nanos): + if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX: + raise ValueError( + 'Duration is not valid: Seconds {0} must be in range ' + '[-315576000000, 315576000000].'.format(seconds)) + if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Duration is not valid: Nanos {0} must be in range ' + '[-999999999, 999999999].'.format(nanos)) + if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0): + raise ValueError( + 'Duration is not valid: Sign mismatch.') + + +def _RoundTowardZero(value, divider): + """Truncates the remainder part after division.""" + # For some languages, the sign of the remainder is implementation + # dependent if any of the operands is negative. Here we enforce + # "rounded toward zero" semantics. For example, for (-5) / 2 an + # implementation may give -3 as the result with the remainder being + # 1. This function ensures we always return -2 (closer to zero). + result = value // divider + remainder = value % divider + if result < 0 and remainder > 0: + return result + 1 + else: + return result + + +class FieldMask(object): + """Class for FieldMask message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts FieldMask to string according to proto3 JSON spec.""" + camelcase_paths = [] + for path in self.paths: + camelcase_paths.append(_SnakeCaseToCamelCase(path)) + return ','.join(camelcase_paths) + + def FromJsonString(self, value): + """Converts string to FieldMask according to proto3 JSON spec.""" + if not isinstance(value, str): + raise ValueError('FieldMask JSON value not a string: {!r}'.format(value)) + self.Clear() + if value: + for path in value.split(','): + self.paths.append(_CamelCaseToSnakeCase(path)) + + def IsValidForDescriptor(self, message_descriptor): + """Checks whether the FieldMask is valid for Message Descriptor.""" + for path in self.paths: + if not _IsValidPath(message_descriptor, path): + return False + return True + + def AllFieldsFromDescriptor(self, message_descriptor): + """Gets all direct fields of Message Descriptor to FieldMask.""" + self.Clear() + for field in message_descriptor.fields: + self.paths.append(field.name) + + def CanonicalFormFromMask(self, mask): + """Converts a FieldMask to the canonical form. + + Removes paths that are covered by another path. For example, + "foo.bar" is covered by "foo" and will be removed if "foo" + is also in the FieldMask. Then sorts all paths in alphabetical order. + + Args: + mask: The original FieldMask to be converted. + """ + tree = _FieldMaskTree(mask) + tree.ToFieldMask(self) + + def Union(self, mask1, mask2): + """Merges mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + tree.MergeFromFieldMask(mask2) + tree.ToFieldMask(self) + + def Intersect(self, mask1, mask2): + """Intersects mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + intersection = _FieldMaskTree() + for path in mask2.paths: + tree.IntersectPath(path, intersection) + intersection.ToFieldMask(self) + + def MergeMessage( + self, source, destination, + replace_message_field=False, replace_repeated_field=False): + """Merges fields specified in FieldMask from source to destination. + + Args: + source: Source message. + destination: The destination message to be merged into. + replace_message_field: Replace message field if True. Merge message + field if False. + replace_repeated_field: Replace repeated field if True. Append + elements of repeated field if False. + """ + tree = _FieldMaskTree(self) + tree.MergeMessage( + source, destination, replace_message_field, replace_repeated_field) + + +def _IsValidPath(message_descriptor, path): + """Checks whether the path is valid for Message Descriptor.""" + parts = path.split('.') + last = parts.pop() + for name in parts: + field = message_descriptor.fields_by_name.get(name) + if (field is None or + field.label == FieldDescriptor.LABEL_REPEATED or + field.type != FieldDescriptor.TYPE_MESSAGE): + return False + message_descriptor = field.message_type + return last in message_descriptor.fields_by_name + + +def _CheckFieldMaskMessage(message): + """Raises ValueError if message is not a FieldMask.""" + message_descriptor = message.DESCRIPTOR + if (message_descriptor.name != 'FieldMask' or + message_descriptor.file.name != 'google/protobuf/field_mask.proto'): + raise ValueError('Message {0} is not a FieldMask.'.format( + message_descriptor.full_name)) + + +def _SnakeCaseToCamelCase(path_name): + """Converts a path name from snake_case to camelCase.""" + result = [] + after_underscore = False + for c in path_name: + if c.isupper(): + raise ValueError( + 'Fail to print FieldMask to Json string: Path name ' + '{0} must not contain uppercase letters.'.format(path_name)) + if after_underscore: + if c.islower(): + result.append(c.upper()) + after_underscore = False + else: + raise ValueError( + 'Fail to print FieldMask to Json string: The ' + 'character after a "_" must be a lowercase letter ' + 'in path name {0}.'.format(path_name)) + elif c == '_': + after_underscore = True + else: + result += c + + if after_underscore: + raise ValueError('Fail to print FieldMask to Json string: Trailing "_" ' + 'in path name {0}.'.format(path_name)) + return ''.join(result) + + +def _CamelCaseToSnakeCase(path_name): + """Converts a field name from camelCase to snake_case.""" + result = [] + for c in path_name: + if c == '_': + raise ValueError('Fail to parse FieldMask: Path name ' + '{0} must not contain "_"s.'.format(path_name)) + if c.isupper(): + result += '_' + result += c.lower() + else: + result += c + return ''.join(result) + + +class _FieldMaskTree(object): + """Represents a FieldMask in a tree structure. + + For example, given a FieldMask "foo.bar,foo.baz,bar.baz", + the FieldMaskTree will be: + [_root] -+- foo -+- bar + | | + | +- baz + | + +- bar --- baz + In the tree, each leaf node represents a field path. + """ + + __slots__ = ('_root',) + + def __init__(self, field_mask=None): + """Initializes the tree by FieldMask.""" + self._root = {} + if field_mask: + self.MergeFromFieldMask(field_mask) + + def MergeFromFieldMask(self, field_mask): + """Merges a FieldMask to the tree.""" + for path in field_mask.paths: + self.AddPath(path) + + def AddPath(self, path): + """Adds a field path into the tree. + + If the field path to add is a sub-path of an existing field path + in the tree (i.e., a leaf node), it means the tree already matches + the given path so nothing will be added to the tree. If the path + matches an existing non-leaf node in the tree, that non-leaf node + will be turned into a leaf node with all its children removed because + the path matches all the node's children. Otherwise, a new path will + be added. + + Args: + path: The field path to add. + """ + node = self._root + for name in path.split('.'): + if name not in node: + node[name] = {} + elif not node[name]: + # Pre-existing empty node implies we already have this entire tree. + return + node = node[name] + # Remove any sub-trees we might have had. + node.clear() + + def ToFieldMask(self, field_mask): + """Converts the tree to a FieldMask.""" + field_mask.Clear() + _AddFieldPaths(self._root, '', field_mask) + + def IntersectPath(self, path, intersection): + """Calculates the intersection part of a field path with this tree. + + Args: + path: The field path to calculates. + intersection: The out tree to record the intersection part. + """ + node = self._root + for name in path.split('.'): + if name not in node: + return + elif not node[name]: + intersection.AddPath(path) + return + node = node[name] + intersection.AddLeafNodes(path, node) + + def AddLeafNodes(self, prefix, node): + """Adds leaf nodes begin with prefix to this tree.""" + if not node: + self.AddPath(prefix) + for name in node: + child_path = prefix + '.' + name + self.AddLeafNodes(child_path, node[name]) + + def MergeMessage( + self, source, destination, + replace_message, replace_repeated): + """Merge all fields specified by this tree from source to destination.""" + _MergeMessage( + self._root, source, destination, replace_message, replace_repeated) + + +def _StrConvert(value): + """Converts value to str if it is not.""" + # This file is imported by c extension and some methods like ClearField + # requires string for the field name. py2/py3 has different text + # type and may use unicode. + if not isinstance(value, str): + return value.encode('utf-8') + return value + + +def _MergeMessage( + node, source, destination, replace_message, replace_repeated): + """Merge all fields specified by a sub-tree from source to destination.""" + source_descriptor = source.DESCRIPTOR + for name in node: + child = node[name] + field = source_descriptor.fields_by_name[name] + if field is None: + raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( + name, source_descriptor.full_name)) + if child: + # Sub-paths are only allowed for singular message fields. + if (field.label == FieldDescriptor.LABEL_REPEATED or + field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): + raise ValueError('Error: Field {0} in message {1} is not a singular ' + 'message field and cannot have sub-fields.'.format( + name, source_descriptor.full_name)) + if source.HasField(name): + _MergeMessage( + child, getattr(source, name), getattr(destination, name), + replace_message, replace_repeated) + continue + if field.label == FieldDescriptor.LABEL_REPEATED: + if replace_repeated: + destination.ClearField(_StrConvert(name)) + repeated_source = getattr(source, name) + repeated_destination = getattr(destination, name) + repeated_destination.MergeFrom(repeated_source) + else: + if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + if replace_message: + destination.ClearField(_StrConvert(name)) + if source.HasField(name): + getattr(destination, name).MergeFrom(getattr(source, name)) + else: + setattr(destination, name, getattr(source, name)) + + +def _AddFieldPaths(node, prefix, field_mask): + """Adds the field paths descended from node to field_mask.""" + if not node and prefix: + field_mask.paths.append(prefix) + return + for name in sorted(node): + if prefix: + child_path = prefix + '.' + name + else: + child_path = name + _AddFieldPaths(node[name], child_path, field_mask) + + +def _SetStructValue(struct_value, value): + if value is None: + struct_value.null_value = 0 + elif isinstance(value, bool): + # Note: this check must come before the number check because in Python + # True and False are also considered numbers. + struct_value.bool_value = value + elif isinstance(value, str): + struct_value.string_value = value + elif isinstance(value, (int, float)): + struct_value.number_value = value + elif isinstance(value, (dict, Struct)): + struct_value.struct_value.Clear() + struct_value.struct_value.update(value) + elif isinstance(value, (list, ListValue)): + struct_value.list_value.Clear() + struct_value.list_value.extend(value) + else: + raise ValueError('Unexpected type') + + +def _GetStructValue(struct_value): + which = struct_value.WhichOneof('kind') + if which == 'struct_value': + return struct_value.struct_value + elif which == 'null_value': + return None + elif which == 'number_value': + return struct_value.number_value + elif which == 'string_value': + return struct_value.string_value + elif which == 'bool_value': + return struct_value.bool_value + elif which == 'list_value': + return struct_value.list_value + elif which is None: + raise ValueError('Value not set') + + +class Struct(object): + """Class for Struct message type.""" + + __slots__ = () + + def __getitem__(self, key): + return _GetStructValue(self.fields[key]) + + def __contains__(self, item): + return item in self.fields + + def __setitem__(self, key, value): + _SetStructValue(self.fields[key], value) + + def __delitem__(self, key): + del self.fields[key] + + def __len__(self): + return len(self.fields) + + def __iter__(self): + return iter(self.fields) + + def keys(self): # pylint: disable=invalid-name + return self.fields.keys() + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] + + def items(self): # pylint: disable=invalid-name + return [(key, self[key]) for key in self] + + def get_or_create_list(self, key): + """Returns a list for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('list_value'): + # Clear will mark list_value modified which will indeed create a list. + self.fields[key].list_value.Clear() + return self.fields[key].list_value + + def get_or_create_struct(self, key): + """Returns a struct for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('struct_value'): + # Clear will mark struct_value modified which will indeed create a struct. + self.fields[key].struct_value.Clear() + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name + for key, value in dictionary.items(): + _SetStructValue(self.fields[key], value) + +collections.abc.MutableMapping.register(Struct) + + +class ListValue(object): + """Class for ListValue message type.""" + + __slots__ = () + + def __len__(self): + return len(self.values) + + def append(self, value): + _SetStructValue(self.values.add(), value) + + def extend(self, elem_seq): + for value in elem_seq: + self.append(value) + + def __getitem__(self, index): + """Retrieves item by the specified index.""" + return _GetStructValue(self.values.__getitem__(index)) + + def __setitem__(self, index, value): + _SetStructValue(self.values.__getitem__(index), value) + + def __delitem__(self, key): + del self.values[key] + + def items(self): + for i in range(len(self)): + yield self[i] + + def add_struct(self): + """Appends and returns a struct value as the next value in the list.""" + struct_value = self.values.add().struct_value + # Clear will mark struct_value modified which will indeed create a struct. + struct_value.Clear() + return struct_value + + def add_list(self): + """Appends and returns a list value as the next value in the list.""" + list_value = self.values.add().list_value + # Clear will mark list_value modified which will indeed create a list. + list_value.Clear() + return list_value + +collections.abc.MutableSequence.register(ListValue) + + +WKTBASES = { + 'google.protobuf.Any': Any, + 'google.protobuf.Duration': Duration, + 'google.protobuf.FieldMask': FieldMask, + 'google.protobuf.ListValue': ListValue, + 'google.protobuf.Struct': Struct, + 'google.protobuf.Timestamp': Timestamp, +} diff --git a/MLPY/Lib/site-packages/google/protobuf/internal/wire_format.py b/MLPY/Lib/site-packages/google/protobuf/internal/wire_format.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0ef59ed0a5539a07ca8e7c4e6136fa8a6614ad --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/internal/wire_format.py @@ -0,0 +1,268 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Constants and static functions to support protocol buffer wire format.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +from google.protobuf import descriptor +from google.protobuf import message + + +TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. +TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 + +# These numbers identify the wire type of a protocol buffer value. +# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded +# tag-and-type to store one of these WIRETYPE_* constants. +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_VARINT = 0 +WIRETYPE_FIXED64 = 1 +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 +WIRETYPE_END_GROUP = 4 +WIRETYPE_FIXED32 = 5 +_WIRETYPE_MAX = 5 + + +# Bounds for various integer types. +INT32_MAX = int((1 << 31) - 1) +INT32_MIN = int(-(1 << 31)) +UINT32_MAX = (1 << 32) - 1 + +INT64_MAX = (1 << 63) - 1 +INT64_MIN = -(1 << 63) +UINT64_MAX = (1 << 64) - 1 + +# "struct" format strings that will encode/decode the specified formats. +FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) + + +def ZigZagEncode(value): + """ZigZag Transform: Encodes signed integers so that they can be + effectively used with varint encoding. See wire_format.h for + more details. + """ + if value >= 0: + return value << 1 + return (value << 1) ^ (~0) + + +def ZigZagDecode(value): + """Inverse of ZigZagEncode().""" + if not value & 0x1: + return value >> 1 + return (value >> 1) ^ (~0) + + + +# The *ByteSize() functions below return the number of bytes required to +# serialize "field number + type" information and then serialize the value. + + +def Int32ByteSize(field_number, int32): + return Int64ByteSize(field_number, int32) + + +def Int32ByteSizeNoTag(int32): + return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) + + +def Int64ByteSize(field_number, int64): + # Have to convert to uint before calling UInt64ByteSize(). + return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) + + +def UInt32ByteSize(field_number, uint32): + return UInt64ByteSize(field_number, uint32) + + +def UInt64ByteSize(field_number, uint64): + return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) + + +def SInt32ByteSize(field_number, int32): + return UInt32ByteSize(field_number, ZigZagEncode(int32)) + + +def SInt64ByteSize(field_number, int64): + return UInt64ByteSize(field_number, ZigZagEncode(int64)) + + +def Fixed32ByteSize(field_number, fixed32): + return TagByteSize(field_number) + 4 + + +def Fixed64ByteSize(field_number, fixed64): + return TagByteSize(field_number) + 8 + + +def SFixed32ByteSize(field_number, sfixed32): + return TagByteSize(field_number) + 4 + + +def SFixed64ByteSize(field_number, sfixed64): + return TagByteSize(field_number) + 8 + + +def FloatByteSize(field_number, flt): + return TagByteSize(field_number) + 4 + + +def DoubleByteSize(field_number, double): + return TagByteSize(field_number) + 8 + + +def BoolByteSize(field_number, b): + return TagByteSize(field_number) + 1 + + +def EnumByteSize(field_number, enum): + return UInt32ByteSize(field_number, enum) + + +def StringByteSize(field_number, string): + return BytesByteSize(field_number, string.encode('utf-8')) + + +def BytesByteSize(field_number, b): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(len(b)) + + len(b)) + + +def GroupByteSize(field_number, message): + return (2 * TagByteSize(field_number) # START and END group. + + message.ByteSize()) + + +def MessageByteSize(field_number, message): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(message.ByteSize()) + + message.ByteSize()) + + +def MessageSetItemByteSize(field_number, msg): + # First compute the sizes of the tags. + # There are 2 tags for the beginning and ending of the repeated group, that + # is field number 1, one with field number 2 (type_id) and one with field + # number 3 (message). + total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) + + # Add the number of bytes for type_id. + total_size += _VarUInt64ByteSizeNoTag(field_number) + + message_size = msg.ByteSize() + + # The number of bytes for encoding the length of the message. + total_size += _VarUInt64ByteSizeNoTag(message_size) + + # The size of the message. + total_size += message_size + return total_size + + +def TagByteSize(field_number): + """Returns the bytes required to serialize a tag with this field number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) + + +# Private helper function for the *ByteSize() functions above. + +def _VarUInt64ByteSizeNoTag(uint64): + """Returns the number of bytes required to serialize a single varint + using boundary value comparisons. (unrolled loop optimization -WPierce) + uint64 must be unsigned. + """ + if uint64 <= 0x7f: return 1 + if uint64 <= 0x3fff: return 2 + if uint64 <= 0x1fffff: return 3 + if uint64 <= 0xfffffff: return 4 + if uint64 <= 0x7ffffffff: return 5 + if uint64 <= 0x3ffffffffff: return 6 + if uint64 <= 0x1ffffffffffff: return 7 + if uint64 <= 0xffffffffffffff: return 8 + if uint64 <= 0x7fffffffffffffff: return 9 + if uint64 > UINT64_MAX: + raise message.EncodeError('Value out of range: %d' % uint64) + return 10 + + +NON_PACKABLE_TYPES = ( + descriptor.FieldDescriptor.TYPE_STRING, + descriptor.FieldDescriptor.TYPE_GROUP, + descriptor.FieldDescriptor.TYPE_MESSAGE, + descriptor.FieldDescriptor.TYPE_BYTES +) + + +def IsTypePackable(field_type): + """Return true iff packable = true is valid for fields of this type. + + Args: + field_type: a FieldDescriptor::Type value. + + Returns: + True iff fields of this type are packable. + """ + return field_type not in NON_PACKABLE_TYPES diff --git a/MLPY/Lib/site-packages/google/protobuf/json_format.py b/MLPY/Lib/site-packages/google/protobuf/json_format.py new file mode 100644 index 0000000000000000000000000000000000000000..4204778e43bcc4f7a89fac468508bc8b07672ddb --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/json_format.py @@ -0,0 +1,912 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in JSON format. + +Simple usage example: + + # Create a proto object and serialize it to a json format string. + message = my_proto_pb2.MyMessage(foo='bar') + json_string = json_format.MessageToJson(message) + + # Parse a json format string to proto object. + message = json_format.Parse(json_string, my_proto_pb2.MyMessage()) +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + + +import base64 +from collections import OrderedDict +import json +import math +from operator import methodcaller +import re +import sys + +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import symbol_database + + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32, + descriptor.FieldDescriptor.CPPTYPE_UINT32, + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, + descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) +_INFINITY = 'Infinity' +_NEG_INFINITY = '-Infinity' +_NAN = 'NaN' + +_UNPAIRED_SURROGATE_PATTERN = re.compile( + u'[\ud800-\udbff](?![\udc00-\udfff])|(? self.max_recursion_depth: + raise ParseError('Message too deep. Max recursion depth is {0}'.format( + self.max_recursion_depth)) + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if not path: + path = message_descriptor.name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value, message, path) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self) + else: + self._ConvertFieldValuePair(value, message, path) + self.recursion_depth -= 1 + + def _ConvertFieldValuePair(self, js, message, path): + """Convert field value pairs into regular message. + + Args: + js: A JSON object to convert the field value pairs. + message: A regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of problems converting. + """ + names = [] + message_descriptor = message.DESCRIPTOR + fields_by_json_name = dict((f.json_name, f) + for f in message_descriptor.fields) + for name in js: + try: + field = fields_by_json_name.get(name, None) + if not field: + field = message_descriptor.fields_by_name.get(name, None) + if not field and _VALID_EXTENSION_NAME.match(name): + if not message_descriptor.is_extendable: + raise ParseError( + 'Message type {0} does not have extensions at {1}'.format( + message_descriptor.full_name, path)) + identifier = name[1:-1] # strip [] brackets + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + # Try looking for extension by the message type name, dropping the + # field name following the final . separator in full_name. + identifier = '.'.join(identifier.split('.')[:-1]) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + if self.ignore_unknown_fields: + continue + raise ParseError( + ('Message type "{0}" has no field named "{1}" at "{2}".\n' + ' Available Fields(except extensions): "{3}"').format( + message_descriptor.full_name, name, path, + [f.json_name for f in message_descriptor.fields])) + if name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" fields at "{2}".'.format( + message.DESCRIPTOR.full_name, name, path)) + names.append(name) + value = js[name] + # Check no other oneof field is parsed. + if field.containing_oneof is not None and value is not None: + oneof_name = field.containing_oneof.name + if oneof_name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" oneof fields at "{2}".'.format( + message.DESCRIPTOR.full_name, oneof_name, + path)) + names.append(oneof_name) + + if value is None: + if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.message_type.full_name == 'google.protobuf.Value'): + sub_message = getattr(message, field.name) + sub_message.null_value = 0 + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM + and field.enum_type.full_name == 'google.protobuf.NullValue'): + setattr(message, field.name, 0) + else: + message.ClearField(field.name) + continue + + # Parse field value. + if _IsMapEntry(field): + message.ClearField(field.name) + self._ConvertMapFieldValue(value, message, field, + '{0}.{1}'.format(path, name)) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + message.ClearField(field.name) + if not isinstance(value, list): + raise ParseError('repeated field {0} must be in [] which is ' + '{1} at {2}'.format(name, value, path)) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + # Repeated message field. + for index, item in enumerate(value): + sub_message = getattr(message, field.name).add() + # None is a null_value in Value. + if (item is None and + sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + self.ConvertMessage(item, sub_message, + '{0}.{1}[{2}]'.format(path, name, index)) + else: + # Repeated scalar field. + for index, item in enumerate(value): + if item is None: + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + getattr(message, field.name).append( + _ConvertScalarFieldValue( + item, field, '{0}.{1}[{2}]'.format(path, name, index))) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + sub_message = message.Extensions[field] + else: + sub_message = getattr(message, field.name) + sub_message.SetInParent() + self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name)) + else: + if field.is_extension: + message.Extensions[field] = _ConvertScalarFieldValue( + value, field, '{0}.{1}'.format(path, name)) + else: + setattr( + message, field.name, + _ConvertScalarFieldValue(value, field, + '{0}.{1}'.format(path, name))) + except ParseError as e: + if field and field.containing_oneof is None: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + else: + raise ParseError(str(e)) + except ValueError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + except TypeError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + + def _ConvertAnyMessage(self, value, message, path): + """Convert a JSON representation into Any message.""" + if isinstance(value, dict) and not value: + return + try: + type_url = value['@type'] + except KeyError: + raise ParseError( + '@type is missing when parsing any message at {0}'.format(path)) + + try: + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + except TypeError as e: + raise ParseError('{0} at {1}'.format(e, path)) + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value['value'], sub_message, + '{0}.value'.format(path)) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message, + '{0}.value'.format(path))( + self) + else: + del value['@type'] + self._ConvertFieldValuePair(value, sub_message, path) + value['@type'] = type_url + # Sets Any message + message.value = sub_message.SerializeToString() + message.type_url = type_url + + def _ConvertGenericMessage(self, value, message, path): + """Convert a JSON representation into message with FromJsonString.""" + # Duration, Timestamp, FieldMask have a FromJsonString method to do the + # conversion. Users can also call the method directly. + try: + message.FromJsonString(value) + except ValueError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + def _ConvertValueMessage(self, value, message, path): + """Convert a JSON representation into Value message.""" + if isinstance(value, dict): + self._ConvertStructMessage(value, message.struct_value, path) + elif isinstance(value, list): + self._ConvertListValueMessage(value, message.list_value, path) + elif value is None: + message.null_value = 0 + elif isinstance(value, bool): + message.bool_value = value + elif isinstance(value, str): + message.string_value = value + elif isinstance(value, _INT_OR_FLOAT): + message.number_value = value + else: + raise ParseError('Value {0} has unexpected type {1} at {2}'.format( + value, type(value), path)) + + def _ConvertListValueMessage(self, value, message, path): + """Convert a JSON representation into ListValue message.""" + if not isinstance(value, list): + raise ParseError('ListValue must be in [] which is {0} at {1}'.format( + value, path)) + message.ClearField('values') + for index, item in enumerate(value): + self._ConvertValueMessage(item, message.values.add(), + '{0}[{1}]'.format(path, index)) + + def _ConvertStructMessage(self, value, message, path): + """Convert a JSON representation into Struct message.""" + if not isinstance(value, dict): + raise ParseError('Struct must be in a dict which is {0} at {1}'.format( + value, path)) + # Clear will mark the struct as modified so it will be created even if + # there are no values. + message.Clear() + for key in value: + self._ConvertValueMessage(value[key], message.fields[key], + '{0}.{1}'.format(path, key)) + return + + def _ConvertWrapperMessage(self, value, message, path): + """Convert a JSON representation into Wrapper message.""" + field = message.DESCRIPTOR.fields_by_name['value'] + setattr( + message, 'value', + _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path))) + + def _ConvertMapFieldValue(self, value, message, field, path): + """Convert map field value for a message map field. + + Args: + value: A JSON object to convert the map field value. + message: A protocol message to record the converted data. + field: The descriptor of the map field to be converted. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + if not isinstance(value, dict): + raise ParseError( + 'Map field {0} must be in a dict which is {1} at {2}'.format( + field.name, value, path)) + key_field = field.message_type.fields_by_name['key'] + value_field = field.message_type.fields_by_name['value'] + for key in value: + key_value = _ConvertScalarFieldValue(key, key_field, + '{0}.key'.format(path), True) + if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self.ConvertMessage(value[key], + getattr(message, field.name)[key_value], + '{0}[{1}]'.format(path, key_value)) + else: + getattr(message, field.name)[key_value] = _ConvertScalarFieldValue( + value[key], value_field, path='{0}[{1}]'.format(path, key_value)) + + +def _ConvertScalarFieldValue(value, field, path, require_str=False): + """Convert a single scalar field value. + + Args: + value: A scalar value to convert the scalar field value. + field: The descriptor of the field to convert. + path: parent path to log parse error info. + require_str: If True, the field value must be a str. + + Returns: + The converted scalar field value + + Raises: + ParseError: In case of convert problems. + """ + try: + if field.cpp_type in _INT_TYPES: + return _ConvertInteger(value) + elif field.cpp_type in _FLOAT_TYPES: + return _ConvertFloat(value, field) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return _ConvertBool(value, require_str) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + if isinstance(value, str): + encoded = value.encode('utf-8') + else: + encoded = value + # Add extra padding '=' + padded_value = encoded + b'=' * (4 - len(encoded) % 4) + return base64.urlsafe_b64decode(padded_value) + else: + # Checking for unpaired surrogates appears to be unreliable, + # depending on the specific Python version, so we check manually. + if _UNPAIRED_SURROGATE_PATTERN.search(value): + raise ParseError('Unpaired surrogate') + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + # Convert an enum value. + enum_value = field.enum_type.values_by_name.get(value, None) + if enum_value is None: + try: + number = int(value) + enum_value = field.enum_type.values_by_number.get(number, None) + except ValueError: + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + if enum_value is None: + if field.file.syntax == 'proto3': + # Proto3 accepts unknown enums. + return number + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + return enum_value.number + except ParseError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + +def _ConvertInteger(value): + """Convert an integer. + + Args: + value: A scalar value to convert. + + Returns: + The integer value. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + if isinstance(value, float) and not value.is_integer(): + raise ParseError('Couldn\'t parse integer: {0}'.format(value)) + + if isinstance(value, str) and value.find(' ') != -1: + raise ParseError('Couldn\'t parse integer: "{0}"'.format(value)) + + if isinstance(value, bool): + raise ParseError('Bool value {0} is not acceptable for ' + 'integer field'.format(value)) + + return int(value) + + +def _ConvertFloat(value, field): + """Convert an floating point number.""" + if isinstance(value, float): + if math.isnan(value): + raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead') + if math.isinf(value): + if value > 0: + raise ParseError('Couldn\'t parse Infinity or value too large, ' + 'use quoted "Infinity" instead') + else: + raise ParseError('Couldn\'t parse -Infinity or value too small, ' + 'use quoted "-Infinity" instead') + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + # pylint: disable=protected-access + if value > type_checkers._FLOAT_MAX: + raise ParseError('Float value too large') + # pylint: disable=protected-access + if value < type_checkers._FLOAT_MIN: + raise ParseError('Float value too small') + if value == 'nan': + raise ParseError('Couldn\'t parse float "nan", use "NaN" instead') + try: + # Assume Python compatible syntax. + return float(value) + except ValueError: + # Check alternative spellings. + if value == _NEG_INFINITY: + return float('-inf') + elif value == _INFINITY: + return float('inf') + elif value == _NAN: + return float('nan') + else: + raise ParseError('Couldn\'t parse float: {0}'.format(value)) + + +def _ConvertBool(value, require_str): + """Convert a boolean value. + + Args: + value: A scalar value to convert. + require_str: If True, value must be a str. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + if require_str: + if value == 'true': + return True + elif value == 'false': + return False + else: + raise ParseError('Expected "true" or "false", not {0}'.format(value)) + + if not isinstance(value, bool): + raise ParseError('Expected true or false without quotes') + return value + +_WKTJSONMETHODS = { + 'google.protobuf.Any': ['_AnyMessageToJsonObject', + '_ConvertAnyMessage'], + 'google.protobuf.Duration': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.FieldMask': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.ListValue': ['_ListValueMessageToJsonObject', + '_ConvertListValueMessage'], + 'google.protobuf.Struct': ['_StructMessageToJsonObject', + '_ConvertStructMessage'], + 'google.protobuf.Timestamp': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.Value': ['_ValueMessageToJsonObject', + '_ConvertValueMessage'] +} diff --git a/MLPY/Lib/site-packages/google/protobuf/message.py b/MLPY/Lib/site-packages/google/protobuf/message.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a7dfd006f16c407fccee4a78f194477e09b5e8 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/message.py @@ -0,0 +1,424 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# TODO(robinson): We should just make these methods all "pure-virtual" and move +# all implementation out, into reflection.py for now. + + +"""Contains an abstract base class for protocol messages.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +class Error(Exception): + """Base error type for this module.""" + pass + + +class DecodeError(Error): + """Exception raised when deserializing messages.""" + pass + + +class EncodeError(Error): + """Exception raised when serializing messages.""" + pass + + +class Message(object): + + """Abstract base class for protocol messages. + + Protocol message classes are almost always generated by the protocol + compiler. These generated types subclass Message and implement the methods + shown below. + """ + + # TODO(robinson): Link to an HTML document here. + + # TODO(robinson): Document that instances of this class will also + # have an Extensions attribute with __getitem__ and __setitem__. + # Again, not sure how to best convey this. + + # TODO(robinson): Document that the class must also have a static + # RegisterExtension(extension_field) method. + # Not sure how to best express at this point. + + # TODO(robinson): Document these fields and methods. + + __slots__ = [] + + #: The :class:`google.protobuf.descriptor.Descriptor` for this message type. + DESCRIPTOR = None + + def __deepcopy__(self, memo=None): + clone = type(self)() + clone.MergeFrom(self) + return clone + + def __eq__(self, other_msg): + """Recursively compares two messages by value and structure.""" + raise NotImplementedError + + def __ne__(self, other_msg): + # Can't just say self != other_msg, since that would infinitely recurse. :) + return not self == other_msg + + def __hash__(self): + raise TypeError('unhashable object') + + def __str__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __unicode__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def MergeFrom(self, other_msg): + """Merges the contents of the specified message into current message. + + This method merges the contents of the specified message into the current + message. Singular fields that are set in the specified message overwrite + the corresponding fields in the current message. Repeated fields are + appended. Singular sub-messages and groups are recursively merged. + + Args: + other_msg (Message): A message to merge into the current message. + """ + raise NotImplementedError + + def CopyFrom(self, other_msg): + """Copies the content of the specified message into the current message. + + The method clears the current message and then merges the specified + message using MergeFrom. + + Args: + other_msg (Message): A message to copy into the current one. + """ + if self is other_msg: + return + self.Clear() + self.MergeFrom(other_msg) + + def Clear(self): + """Clears all data that was set in the message.""" + raise NotImplementedError + + def SetInParent(self): + """Mark this as present in the parent. + + This normally happens automatically when you assign a field of a + sub-message, but sometimes you want to make the sub-message + present while keeping it empty. If you find yourself using this, + you may want to reconsider your design. + """ + raise NotImplementedError + + def IsInitialized(self): + """Checks if the message is initialized. + + Returns: + bool: The method returns True if the message is initialized (i.e. all of + its required fields are set). + """ + raise NotImplementedError + + # TODO(robinson): MergeFromString() should probably return None and be + # implemented in terms of a helper that returns the # of bytes read. Our + # deserialization routines would use the helper when recursively + # deserializing, but the end user would almost always just want the no-return + # MergeFromString(). + + def MergeFromString(self, serialized): + """Merges serialized protocol buffer data into this message. + + When we find a field in `serialized` that is already present + in this message: + + - If it's a "repeated" field, we append to the end of our list. + - Else, if it's a scalar, we overwrite our field. + - Else, (it's a nonrepeated composite), we recursively merge + into the existing composite. + + Args: + serialized (bytes): Any object that allows us to call + ``memoryview(serialized)`` to access a string of bytes using the + buffer interface. + + Returns: + int: The number of bytes read from `serialized`. + For non-group messages, this will always be `len(serialized)`, + but for messages which are actually groups, this will + generally be less than `len(serialized)`, since we must + stop when we reach an ``END_GROUP`` tag. Note that if + we *do* stop because of an ``END_GROUP`` tag, the number + of bytes returned does not include the bytes + for the ``END_GROUP`` tag information. + + Raises: + DecodeError: if the input cannot be parsed. + """ + # TODO(robinson): Document handling of unknown fields. + # TODO(robinson): When we switch to a helper, this will return None. + raise NotImplementedError + + def ParseFromString(self, serialized): + """Parse serialized protocol buffer data into this message. + + Like :func:`MergeFromString()`, except we clear the object first. + + Raises: + message.DecodeError if the input cannot be parsed. + """ + self.Clear() + return self.MergeFromString(serialized) + + def SerializeToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary string representation of the message if all of the required + fields in the message are set (i.e. the message is initialized). + + Raises: + EncodeError: if the message isn't initialized (see :func:`IsInitialized`). + """ + raise NotImplementedError + + def SerializePartialToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + This method is similar to SerializeToString but doesn't check if the + message is initialized. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + bytes: A serialized representation of the partial message. + """ + raise NotImplementedError + + # TODO(robinson): Decide whether we like these better + # than auto-generated has_foo() and clear_foo() methods + # on the instances themselves. This way is less consistent + # with C++, but it makes reflection-type access easier and + # reduces the number of magically autogenerated things. + # + # TODO(robinson): Be sure to document (and test) exactly + # which field names are accepted here. Are we case-sensitive? + # What do we do with fields that share names with Python keywords + # like 'lambda' and 'yield'? + # + # nnorwitz says: + # """ + # Typically (in python), an underscore is appended to names that are + # keywords. So they would become lambda_ or yield_. + # """ + def ListFields(self): + """Returns a list of (FieldDescriptor, value) tuples for present fields. + + A message field is non-empty if HasField() would return true. A singular + primitive field is non-empty if HasField() would return true in proto2 or it + is non zero in proto3. A repeated field is non-empty if it contains at least + one element. The fields are ordered by field number. + + Returns: + list[tuple(FieldDescriptor, value)]: field descriptors and values + for all fields in the message which are not empty. The values vary by + field type. + """ + raise NotImplementedError + + def HasField(self, field_name): + """Checks if a certain field is set for the message. + + For a oneof group, checks if any field inside is set. Note that if the + field_name is not defined in the message descriptor, :exc:`ValueError` will + be raised. + + Args: + field_name (str): The name of the field to check for presence. + + Returns: + bool: Whether a value has been set for the named field. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def ClearField(self, field_name): + """Clears the contents of a given field. + + Inside a oneof group, clears the field set. If the name neither refers to a + defined field or oneof group, :exc:`ValueError` is raised. + + Args: + field_name (str): The name of the field to check for presence. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def WhichOneof(self, oneof_group): + """Returns the name of the field that is set inside a oneof group. + + If no field is set, returns None. + + Args: + oneof_group (str): the name of the oneof group to check. + + Returns: + str or None: The name of the group that is set, or None. + + Raises: + ValueError: no group with the given name exists + """ + raise NotImplementedError + + def HasExtension(self, extension_handle): + """Checks if a certain extension is present for this message. + + Extensions are retrieved using the :attr:`Extensions` mapping (if present). + + Args: + extension_handle: The handle for the extension to check. + + Returns: + bool: Whether the extension is present for this message. + + Raises: + KeyError: if the extension is repeated. Similar to repeated fields, + there is no separate notion of presence: a "not present" repeated + extension is an empty list. + """ + raise NotImplementedError + + def ClearExtension(self, extension_handle): + """Clears the contents of a given extension. + + Args: + extension_handle: The handle for the extension to clear. + """ + raise NotImplementedError + + def UnknownFields(self): + """Returns the UnknownFieldSet. + + Returns: + UnknownFieldSet: The unknown fields stored in this message. + """ + raise NotImplementedError + + def DiscardUnknownFields(self): + """Clears all fields in the :class:`UnknownFieldSet`. + + This operation is recursive for nested message. + """ + raise NotImplementedError + + def ByteSize(self): + """Returns the serialized size of this message. + + Recursively calls ByteSize() on all contained messages. + + Returns: + int: The number of bytes required to serialize this message. + """ + raise NotImplementedError + + @classmethod + def FromString(cls, s): + raise NotImplementedError + + @staticmethod + def RegisterExtension(extension_handle): + raise NotImplementedError + + def _SetListener(self, message_listener): + """Internal method used by the protocol message implementation. + Clients should not call this directly. + + Sets a listener that this message will call on certain state transitions. + + The purpose of this method is to register back-edges from children to + parents at runtime, for the purpose of setting "has" bits and + byte-size-dirty bits in the parent and ancestor objects whenever a child or + descendant object is modified. + + If the client wants to disconnect this Message from the object tree, she + explicitly sets callback to None. + + If message_listener is None, unregisters any existing listener. Otherwise, + message_listener must implement the MessageListener interface in + internal/message_listener.py, and we discard any listener registered + via a previous _SetListener() call. + """ + raise NotImplementedError + + def __getstate__(self): + """Support the pickle protocol.""" + return dict(serialized=self.SerializePartialToString()) + + def __setstate__(self, state): + """Support the pickle protocol.""" + self.__init__() + serialized = state['serialized'] + # On Python 3, using encoding='latin1' is required for unpickling + # protos pickled by Python 2. + if not isinstance(serialized, bytes): + serialized = serialized.encode('latin1') + self.ParseFromString(serialized) + + def __reduce__(self): + message_descriptor = self.DESCRIPTOR + if message_descriptor.containing_type is None: + return type(self), (), self.__getstate__() + # the message type must be nested. + # Python does not pickle nested classes; use the symbol_database on the + # receiving end. + container = message_descriptor + return (_InternalConstructMessage, (container.full_name,), + self.__getstate__()) + + +def _InternalConstructMessage(full_name): + """Constructs a nested message.""" + from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top + + return symbol_database.Default().GetSymbol(full_name)() diff --git a/MLPY/Lib/site-packages/google/protobuf/message_factory.py b/MLPY/Lib/site-packages/google/protobuf/message_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..4770ae156a6d91406dfda4e71cf6e8bf48f2a5cf --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/message_factory.py @@ -0,0 +1,185 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a factory class for generating dynamic messages. + +The easiest way to use this class is if you have access to the FileDescriptor +protos containing the messages you want to create you can just do the following: + +message_classes = message_factory.GetMessages(iterable_of_file_descriptors) +my_proto_instance = message_classes['some.proto.package.MessageName']() +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message + +if api_implementation.Type() == 'cpp': + from google.protobuf.pyext import cpp_message as message_impl +else: + from google.protobuf.internal import python_message as message_impl + + +# The type of all Message classes. +_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType + + +class MessageFactory(object): + """Factory for creating Proto2 messages from descriptors in a pool.""" + + def __init__(self, pool=None): + """Initializes a new factory.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + # local cache of all classes built from protobuf descriptors + self._classes = {} + + def GetPrototype(self, descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + if descriptor not in self._classes: + result_class = self.CreatePrototype(descriptor) + # The assignment to _classes is redundant for the base implementation, but + # might avoid confusion in cases where CreatePrototype gets overridden and + # does not call the base implementation. + self._classes[descriptor] = result_class + return result_class + return self._classes[descriptor] + + def CreatePrototype(self, descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Don't call this function directly, it always creates a new class. Call + GetPrototype() instead. This method is meant to be overridden in subblasses + to perform additional operations on the newly constructed class. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + descriptor_name = descriptor.name + result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE( + descriptor_name, + (message.Message,), + { + 'DESCRIPTOR': descriptor, + # If module not set, it wrongly points to message_factory module. + '__module__': None, + }) + result_class._FACTORY = self # pylint: disable=protected-access + # Assign in _classes before doing recursive calls to avoid infinite + # recursion. + self._classes[descriptor] = result_class + for field in descriptor.fields: + if field.message_type: + self.GetPrototype(field.message_type) + for extension in result_class.DESCRIPTOR.extensions: + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result_class + + def GetMessages(self, files): + """Gets all the messages from a specified file. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for desc in file_desc.message_types_by_name.values(): + result[desc.full_name] = self.GetPrototype(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, + # the python classes created in the factory need them to be registered + # explicitly, which is done below. + # + # The call to RegisterExtension will specifically check if the + # extension was already registered on the object and either + # ignore the registration if the original was the same, or raise + # an error if they were different. + + for extension in file_desc.extensions_by_name.values(): + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result + + +_FACTORY = MessageFactory() + + +def GetMessages(file_protos): + """Builds a dictionary of all the messages available in a set of files. + + Args: + file_protos: Iterable of FileDescriptorProto to build messages out of. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + # The cpp implementation of the protocol buffer library requires to add the + # message in topological order of the dependency graph. + file_by_name = {file_proto.name: file_proto for file_proto in file_protos} + def _AddFile(file_proto): + for dependency in file_proto.dependency: + if dependency in file_by_name: + # Remove from elements to be visited, in order to cut cycles. + _AddFile(file_by_name.pop(dependency)) + _FACTORY.pool.Add(file_proto) + while file_by_name: + _AddFile(file_by_name.popitem()[1]) + return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) diff --git a/MLPY/Lib/site-packages/google/protobuf/proto_builder.py b/MLPY/Lib/site-packages/google/protobuf/proto_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..02b64c6c0676daf53e99fe266a3a33c9fada2afa --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/proto_builder.py @@ -0,0 +1,134 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Dynamic Protobuf class creator.""" + +from collections import OrderedDict +import hashlib +import os + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor +from google.protobuf import message_factory + + +def _GetMessageFromFactory(factory, full_name): + """Get a proto class from the MessageFactory by name. + + Args: + factory: a MessageFactory instance. + full_name: str, the fully qualified name of the proto type. + Returns: + A class, for the type identified by full_name. + Raises: + KeyError, if the proto is not found in the factory's descriptor pool. + """ + proto_descriptor = factory.pool.FindMessageTypeByName(full_name) + proto_cls = factory.GetPrototype(proto_descriptor) + return proto_cls + + +def MakeSimpleProtoClass(fields, full_name=None, pool=None): + """Create a Protobuf class whose fields are basic types. + + Note: this doesn't validate field names! + + Args: + fields: dict of {name: field_type} mappings for each field in the proto. If + this is an OrderedDict the order will be maintained, otherwise the + fields will be sorted by name. + full_name: optional str, the fully-qualified name of the proto type. + pool: optional DescriptorPool instance. + Returns: + a class, the new protobuf class with a FileDescriptor. + """ + factory = message_factory.MessageFactory(pool=pool) + + if full_name is not None: + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. + field_items = fields.items() + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + + # Use a consistent file name that is unlikely to conflict with any imported + # proto files. + fields_hash = hashlib.sha1() + for f_name, f_type in field_items: + fields_hash.update(f_name.encode('utf-8')) + fields_hash.update(str(f_type).encode('utf-8')) + proto_file_name = fields_hash.hexdigest() + '.proto' + + # If the proto is anonymous, use the same hash to name it. + if full_name is None: + full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + + fields_hash.hexdigest()) + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # This is the first time we see this proto: add a new descriptor to the pool. + factory.pool.Add( + _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) + return _GetMessageFromFactory(factory, full_name) + + +def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): + """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" + package, name = full_name.rsplit('.', 1) + file_proto = descriptor_pb2.FileDescriptorProto() + file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) + file_proto.package = package + desc_proto = file_proto.message_type.add() + desc_proto.name = name + for f_number, (f_name, f_type) in enumerate(field_items, 1): + field_proto = desc_proto.field.add() + field_proto.name = f_name + # # If the number falls in the reserved range, reassign it to the correct + # # number after the range. + if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: + f_number += ( + descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - + descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) + field_proto.number = f_number + field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL + field_proto.type = f_type + return file_proto diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/__init__.py b/MLPY/Lib/site-packages/google/protobuf/pyext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72eced66f081b1e3a5cab8f90213985bc0820517 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/cpp_message.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/cpp_message.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5301e8cd13e270513a0985a45ae3ad5c25021d33 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/cpp_message.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/python_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/python_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85afb53aa38b2e3f84a30b8f4946328044451ac2 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/pyext/__pycache__/python_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5eb75e853e6067d99c7eb99c91798caf428f7c0c --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6511287fbcee5eafec23eac8779099366693fcf3717ffd8a67b7ddd673c1ed2 +size 1619456 diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/cpp_message.py b/MLPY/Lib/site-packages/google/protobuf/pyext/cpp_message.py new file mode 100644 index 0000000000000000000000000000000000000000..abd3117159f7af104db4a4cb27e899bbfd81db70 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,65 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/MLPY/Lib/site-packages/google/protobuf/pyext/python_pb2.py b/MLPY/Lib/site-packages/google/protobuf/pyext/python_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..2c6ecf4c987a4cf6a019f5f0913ee7f07c36f691 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/pyext/python_pb2.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/pyext/python.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"google/protobuf/pyext/python.proto\x12\x1fgoogle.protobuf.python.internal\"\xbc\x02\n\x0cTestAllTypes\x12\\\n\x17repeated_nested_message\x18\x01 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\\\n\x17optional_nested_message\x18\x02 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage\x12\x16\n\x0eoptional_int32\x18\x03 \x01(\x05\x1aX\n\rNestedMessage\x12\n\n\x02\x62\x62\x18\x01 \x01(\x05\x12;\n\x02\x63\x63\x18\x02 \x01(\x0b\x32/.google.protobuf.python.internal.ForeignMessage\"&\n\x0e\x46oreignMessage\x12\t\n\x01\x63\x18\x01 \x01(\x05\x12\t\n\x01\x64\x18\x02 \x03(\x05\"\x1d\n\x11TestAllExtensions*\x08\x08\x01\x10\x80\x80\x80\x80\x02:\x9a\x01\n!optional_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x01 \x01(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessage:\x9a\x01\n!repeated_nested_message_extension\x12\x32.google.protobuf.python.internal.TestAllExtensions\x18\x02 \x03(\x0b\x32;.google.protobuf.python.internal.TestAllTypes.NestedMessageB\x02H\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.pyext.python_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestAllExtensions.RegisterExtension(optional_nested_message_extension) + TestAllExtensions.RegisterExtension(repeated_nested_message_extension) + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'H\001' + _TESTALLTYPES._serialized_start=72 + _TESTALLTYPES._serialized_end=388 + _TESTALLTYPES_NESTEDMESSAGE._serialized_start=300 + _TESTALLTYPES_NESTEDMESSAGE._serialized_end=388 + _FOREIGNMESSAGE._serialized_start=390 + _FOREIGNMESSAGE._serialized_end=428 + _TESTALLEXTENSIONS._serialized_start=430 + _TESTALLEXTENSIONS._serialized_end=459 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/reflection.py b/MLPY/Lib/site-packages/google/protobuf/reflection.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5d8f701eebf5a5c9507d7fcd3205962aab12ca --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/reflection.py @@ -0,0 +1,95 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +from google.protobuf import message_factory +from google.protobuf import symbol_database + +# The type of all Message classes. +# Part of the public interface, but normally only used by message factories. +GeneratedProtocolMessageType = message_factory._GENERATED_PROTOCOL_MESSAGE_TYPE + +MESSAGE_CLASS_CACHE = {} + + +# Deprecated. Please NEVER use reflection.ParseMessage(). +def ParseMessage(descriptor, byte_str): + """Generate a new Message instance from this Descriptor and a byte string. + + DEPRECATED: ParseMessage is deprecated because it is using MakeClass(). + Please use MessageFactory.GetPrototype() instead. + + Args: + descriptor: Protobuf Descriptor object + byte_str: Serialized protocol buffer byte string + + Returns: + Newly created protobuf Message object. + """ + result_class = MakeClass(descriptor) + new_msg = result_class() + new_msg.ParseFromString(byte_str) + return new_msg + + +# Deprecated. Please NEVER use reflection.MakeClass(). +def MakeClass(descriptor): + """Construct a class object for a protobuf described by descriptor. + + DEPRECATED: use MessageFactory.GetPrototype() instead. + + Args: + descriptor: A descriptor.Descriptor object describing the protobuf. + Returns: + The Message class object described by the descriptor. + """ + # Original implementation leads to duplicate message classes, which won't play + # well with extensions. Message factory info is also missing. + # Redirect to message_factory. + return symbol_database.Default().GetPrototype(descriptor) diff --git a/MLPY/Lib/site-packages/google/protobuf/service.py b/MLPY/Lib/site-packages/google/protobuf/service.py new file mode 100644 index 0000000000000000000000000000000000000000..24022221bf4ddb1015e9b9c350e692907e1a1a2d --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/service.py @@ -0,0 +1,228 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""DEPRECATED: Declares the RPC service interfaces. + +This module declares the abstract interfaces underlying proto2 RPC +services. These are intended to be independent of any particular RPC +implementation, so that proto2 services can be used on top of a variety +of implementations. Starting with version 2.3.0, RPC implementations should +not try to build on these, but should instead provide code generator plugins +which generate code specific to the particular RPC implementation. This way +the generated code can be more appropriate for the implementation in use +and can avoid unnecessary layers of indirection. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class RpcException(Exception): + """Exception raised on failed blocking RPC method call.""" + pass + + +class Service(object): + + """Abstract base interface for protocol-buffer-based RPC services. + + Services themselves are abstract classes (implemented either by servers or as + stubs), but they subclass this base interface. The methods of this + interface can be used to call the methods of the service without knowing + its exact type at compile time (analogous to the Message interface). + """ + + def GetDescriptor(): + """Retrieves this service's descriptor.""" + raise NotImplementedError + + def CallMethod(self, method_descriptor, rpc_controller, + request, done): + """Calls a method of the service specified by method_descriptor. + + If "done" is None then the call is blocking and the response + message will be returned directly. Otherwise the call is asynchronous + and "done" will later be called with the response value. + + In the blocking case, RpcException will be raised on error. + + Preconditions: + + * method_descriptor.service == GetDescriptor + * request is of the exact same classes as returned by + GetRequestClass(method). + * After the call has started, the request must not be modified. + * "rpc_controller" is of the correct type for the RPC implementation being + used by this Service. For stubs, the "correct type" depends on the + RpcChannel which the stub is using. + + Postconditions: + + * "done" will be called when the method is complete. This may be + before CallMethod() returns or it may be at some point in the future. + * If the RPC failed, the response value passed to "done" will be None. + Further details about the failure can be found by querying the + RpcController. + """ + raise NotImplementedError + + def GetRequestClass(self, method_descriptor): + """Returns the class of the request message for the specified method. + + CallMethod() requires that the request is of a particular subclass of + Message. GetRequestClass() gets the default instance of this required + type. + + Example: + method = service.GetDescriptor().FindMethodByName("Foo") + request = stub.GetRequestClass(method)() + request.ParseFromString(input) + service.CallMethod(method, request, callback) + """ + raise NotImplementedError + + def GetResponseClass(self, method_descriptor): + """Returns the class of the response message for the specified method. + + This method isn't really needed, as the RpcChannel's CallMethod constructs + the response protocol message. It's provided anyway in case it is useful + for the caller to know the response type in advance. + """ + raise NotImplementedError + + +class RpcController(object): + + """An RpcController mediates a single method call. + + The primary purpose of the controller is to provide a way to manipulate + settings specific to the RPC implementation and to find out about RPC-level + errors. The methods provided by the RpcController interface are intended + to be a "least common denominator" set of features which we expect all + implementations to support. Specific implementations may provide more + advanced features (e.g. deadline propagation). + """ + + # Client-side methods below + + def Reset(self): + """Resets the RpcController to its initial state. + + After the RpcController has been reset, it may be reused in + a new call. Must not be called while an RPC is in progress. + """ + raise NotImplementedError + + def Failed(self): + """Returns true if the call failed. + + After a call has finished, returns true if the call failed. The possible + reasons for failure depend on the RPC implementation. Failed() must not + be called before a call has finished. If Failed() returns true, the + contents of the response message are undefined. + """ + raise NotImplementedError + + def ErrorText(self): + """If Failed is true, returns a human-readable description of the error.""" + raise NotImplementedError + + def StartCancel(self): + """Initiate cancellation. + + Advises the RPC system that the caller desires that the RPC call be + canceled. The RPC system may cancel it immediately, may wait awhile and + then cancel it, or may not even cancel the call at all. If the call is + canceled, the "done" callback will still be called and the RpcController + will indicate that the call failed at that time. + """ + raise NotImplementedError + + # Server-side methods below + + def SetFailed(self, reason): + """Sets a failure reason. + + Causes Failed() to return true on the client side. "reason" will be + incorporated into the message returned by ErrorText(). If you find + you need to return machine-readable information about failures, you + should incorporate it into your response protocol buffer and should + NOT call SetFailed(). + """ + raise NotImplementedError + + def IsCanceled(self): + """Checks if the client cancelled the RPC. + + If true, indicates that the client canceled the RPC, so the server may + as well give up on replying to it. The server should still call the + final "done" callback. + """ + raise NotImplementedError + + def NotifyOnCancel(self, callback): + """Sets a callback to invoke on cancel. + + Asks that the given callback be called when the RPC is canceled. The + callback will always be called exactly once. If the RPC completes without + being canceled, the callback will be called after completion. If the RPC + has already been canceled when NotifyOnCancel() is called, the callback + will be called immediately. + + NotifyOnCancel() must be called no more than once per request. + """ + raise NotImplementedError + + +class RpcChannel(object): + + """Abstract interface for an RPC channel. + + An RpcChannel represents a communication line to a service which can be used + to call that service's methods. The service may be running on another + machine. Normally, you should not use an RpcChannel directly, but instead + construct a stub {@link Service} wrapping it. Example: + + Example: + RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") + RpcController controller = rpcImpl.Controller() + MyService service = MyService_Stub(channel) + service.MyMethod(controller, request, callback) + """ + + def CallMethod(self, method_descriptor, rpc_controller, + request, response_class, done): + """Calls the method identified by the descriptor. + + Call the given method of the remote service. The signature of this + procedure looks the same as Service.CallMethod(), but the requirements + are less strict in one important way: the request object doesn't have to + be of any specific class as long as its descriptor is method.input_type. + """ + raise NotImplementedError diff --git a/MLPY/Lib/site-packages/google/protobuf/service_reflection.py b/MLPY/Lib/site-packages/google/protobuf/service_reflection.py new file mode 100644 index 0000000000000000000000000000000000000000..c989feecb942fb1c499e416f3f775a0985ca1ea4 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/service_reflection.py @@ -0,0 +1,295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains metaclasses used to create protocol service and service stub +classes from ServiceDescriptor objects at runtime. + +The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to +inject all useful functionality into the classes output by the protocol +compiler at compile-time. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class GeneratedServiceType(type): + + """Metaclass for service classes created at runtime from ServiceDescriptors. + + Implementations for all methods described in the Service class are added here + by this class. We also create properties to allow getting/setting all fields + in the protocol message. + + The protocol compiler currently uses this metaclass to create protocol service + classes at runtime. Clients can also manually create their own classes at + runtime, as in this example:: + + mydescriptor = ServiceDescriptor(.....) + class MyProtoService(service.Service): + __metaclass__ = GeneratedServiceType + DESCRIPTOR = mydescriptor + myservice_instance = MyProtoService() + # ... + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service class. + + Args: + name: Name of the class (ignored, but required by the metaclass + protocol). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service class is subclassed. + if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] + service_builder = _ServiceBuilder(descriptor) + service_builder.BuildService(cls) + cls.DESCRIPTOR = descriptor + + +class GeneratedServiceStubType(GeneratedServiceType): + + """Metaclass for service stubs created at runtime from ServiceDescriptors. + + This class has similar responsibilities as GeneratedServiceType, except that + it creates the service stub classes. + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service stub class. + + Args: + name: Name of the class (ignored, here). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service stub is subclassed. + if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] + service_stub_builder = _ServiceStubBuilder(descriptor) + service_stub_builder.BuildServiceStub(cls) + + +class _ServiceBuilder(object): + + """This class constructs a protocol service class using a service descriptor. + + Given a service descriptor, this class constructs a class that represents + the specified service descriptor. One service builder instance constructs + exactly one service class. That means all instances of that class share the + same builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + service class. + """ + self.descriptor = service_descriptor + + def BuildService(builder, cls): + """Constructs the service class. + + Args: + cls: The class that will be constructed. + """ + + # CallMethod needs to operate with an instance of the Service class. This + # internal wrapper function exists only to be able to pass the service + # instance to the method that does the real CallMethod work. + # Making sure to use exact argument names from the abstract interface in + # service.py to match the type signature + def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done): + return builder._CallMethod(self, method_descriptor, rpc_controller, + request, done) + + def _WrapGetRequestClass(self, method_descriptor): + return builder._GetRequestClass(method_descriptor) + + def _WrapGetResponseClass(self, method_descriptor): + return builder._GetResponseClass(method_descriptor) + + builder.cls = cls + cls.CallMethod = _WrapCallMethod + cls.GetDescriptor = staticmethod(lambda: builder.descriptor) + cls.GetDescriptor.__doc__ = 'Returns the service descriptor.' + cls.GetRequestClass = _WrapGetRequestClass + cls.GetResponseClass = _WrapGetResponseClass + for method in builder.descriptor.methods: + setattr(cls, method.name, builder._GenerateNonImplementedMethod(method)) + + def _CallMethod(self, srvc, method_descriptor, + rpc_controller, request, callback): + """Calls the method described by a given method descriptor. + + Args: + srvc: Instance of the service for which this method is called. + method_descriptor: Descriptor that represent the method to call. + rpc_controller: RPC controller to use for this method's execution. + request: Request protocol message. + callback: A callback to invoke after the method has completed. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'CallMethod() given method descriptor for wrong service type.') + method = getattr(srvc, method_descriptor.name) + return method(rpc_controller, request, callback) + + def _GetRequestClass(self, method_descriptor): + """Returns the class of the request protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + request protocol message class. + + Returns: + A class that represents the input protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetRequestClass() given method descriptor for wrong service type.') + return method_descriptor.input_type._concrete_class + + def _GetResponseClass(self, method_descriptor): + """Returns the class of the response protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + response protocol message class. + + Returns: + A class that represents the output protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetResponseClass() given method descriptor for wrong service type.') + return method_descriptor.output_type._concrete_class + + def _GenerateNonImplementedMethod(self, method): + """Generates and returns a method that can be set for a service methods. + + Args: + method: Descriptor of the service method for which a method is to be + generated. + + Returns: + A method that can be added to the service class. + """ + return lambda inst, rpc_controller, request, callback: ( + self._NonImplementedMethod(method.name, rpc_controller, callback)) + + def _NonImplementedMethod(self, method_name, rpc_controller, callback): + """The body of all methods in the generated service class. + + Args: + method_name: Name of the method being executed. + rpc_controller: RPC controller used to execute this method. + callback: A callback which will be invoked when the method finishes. + """ + rpc_controller.SetFailed('Method %s not implemented.' % method_name) + callback(None) + + +class _ServiceStubBuilder(object): + + """Constructs a protocol service stub class using a service descriptor. + + Given a service descriptor, this class constructs a suitable stub class. + A stub is just a type-safe wrapper around an RpcChannel which emulates a + local implementation of the service. + + One service stub builder instance constructs exactly one class. It means all + instances of that class share the same service stub builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service stub class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + stub class. + """ + self.descriptor = service_descriptor + + def BuildServiceStub(self, cls): + """Constructs the stub class. + + Args: + cls: The class that will be constructed. + """ + + def _ServiceStubInit(stub, rpc_channel): + stub.rpc_channel = rpc_channel + self.cls = cls + cls.__init__ = _ServiceStubInit + for method in self.descriptor.methods: + setattr(cls, method.name, self._GenerateStubMethod(method)) + + def _GenerateStubMethod(self, method): + return (lambda inst, rpc_controller, request, callback=None: + self._StubMethod(inst, method, rpc_controller, request, callback)) + + def _StubMethod(self, stub, method_descriptor, + rpc_controller, request, callback): + """The body of all service methods in the generated stub class. + + Args: + stub: Stub instance. + method_descriptor: Descriptor of the invoked method. + rpc_controller: Rpc controller to execute the method. + request: Request protocol message. + callback: A callback to execute when the method finishes. + Returns: + Response message (in case of blocking call). + """ + return stub.rpc_channel.CallMethod( + method_descriptor, rpc_controller, request, + method_descriptor.output_type._concrete_class, callback) diff --git a/MLPY/Lib/site-packages/google/protobuf/source_context_pb2.py b/MLPY/Lib/site-packages/google/protobuf/source_context_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..30cca2e06e7bbf4dab98e53a1e920dc74d22e99f --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/source_context_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/source_context.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$google/protobuf/source_context.proto\x12\x0fgoogle.protobuf\"\"\n\rSourceContext\x12\x11\n\tfile_name\x18\x01 \x01(\tB\x8a\x01\n\x13\x63om.google.protobufB\x12SourceContextProtoP\x01Z6google.golang.org/protobuf/types/known/sourcecontextpb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.source_context_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\022SourceContextProtoP\001Z6google.golang.org/protobuf/types/known/sourcecontextpb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SOURCECONTEXT._serialized_start=57 + _SOURCECONTEXT._serialized_end=91 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/struct_pb2.py b/MLPY/Lib/site-packages/google/protobuf/struct_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..149728ca084e764b49d1a834f9326cc5730d2726 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/struct_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/struct.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x84\x01\n\x06Struct\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntry\x1a\x45\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"\xea\x01\n\x05Value\x12\x30\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x16\n\x0cnumber_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x12/\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x30\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x42\x06\n\x04kind\"3\n\tListValue\x12&\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _STRUCT_FIELDSENTRY._options = None + _STRUCT_FIELDSENTRY._serialized_options = b'8\001' + _NULLVALUE._serialized_start=474 + _NULLVALUE._serialized_end=501 + _STRUCT._serialized_start=50 + _STRUCT._serialized_end=182 + _STRUCT_FIELDSENTRY._serialized_start=113 + _STRUCT_FIELDSENTRY._serialized_end=182 + _VALUE._serialized_start=185 + _VALUE._serialized_end=419 + _LISTVALUE._serialized_start=421 + _LISTVALUE._serialized_end=472 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/symbol_database.py b/MLPY/Lib/site-packages/google/protobuf/symbol_database.py new file mode 100644 index 0000000000000000000000000000000000000000..d06ba9ba67dde6b6aabff9c9594c8a2da6ff4a33 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/symbol_database.py @@ -0,0 +1,194 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A database of Python protocol buffer generated symbols. + +SymbolDatabase is the MessageFactory for messages generated at compile time, +and makes it easy to create new instances of a registered type, given only the +type's protocol buffer symbol name. + +Example usage:: + + db = symbol_database.SymbolDatabase() + + # Register symbols of interest, from one or multiple files. + db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) + db.RegisterMessage(my_proto_pb2.MyMessage) + db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) + + # The database can be used as a MessageFactory, to generate types based on + # their name: + types = db.GetMessages(['my_proto.proto']) + my_message_instance = types['MyMessage']() + + # The database's underlying descriptor pool can be queried, so it's not + # necessary to know a type's filename to be able to generate it: + filename = db.pool.FindFileContainingSymbol('MyMessage') + my_message_instance = db.GetMessages([filename])['MyMessage']() + + # This functionality is also provided directly via a convenience method: + my_message_instance = db.GetSymbol('MyMessage')() +""" + + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +class SymbolDatabase(message_factory.MessageFactory): + """A database of Python generated symbols.""" + + def RegisterMessage(self, message): + """Registers the given message type in the local database. + + Calls to GetSymbol() and GetMessages() will return messages registered here. + + Args: + message: A :class:`google.protobuf.message.Message` subclass (or + instance); its descriptor will be registered. + + Returns: + The provided message. + """ + + desc = message.DESCRIPTOR + self._classes[desc] = message + self.RegisterMessageDescriptor(desc) + return message + + def RegisterMessageDescriptor(self, message_descriptor): + """Registers the given message descriptor in the local database. + + Args: + message_descriptor (Descriptor): the message descriptor to add. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddDescriptor(message_descriptor) + + def RegisterEnumDescriptor(self, enum_descriptor): + """Registers the given enum descriptor in the local database. + + Args: + enum_descriptor (EnumDescriptor): The enum descriptor to register. + + Returns: + EnumDescriptor: The provided descriptor. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddEnumDescriptor(enum_descriptor) + return enum_descriptor + + def RegisterServiceDescriptor(self, service_descriptor): + """Registers the given service descriptor in the local database. + + Args: + service_descriptor (ServiceDescriptor): the service descriptor to + register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddServiceDescriptor(service_descriptor) + + def RegisterFileDescriptor(self, file_descriptor): + """Registers the given file descriptor in the local database. + + Args: + file_descriptor (FileDescriptor): The file descriptor to register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._InternalAddFileDescriptor(file_descriptor) + + def GetSymbol(self, symbol): + """Tries to find a symbol in the local database. + + Currently, this method only returns message.Message instances, however, if + may be extended in future to support other symbol types. + + Args: + symbol (str): a protocol buffer symbol. + + Returns: + A Python class corresponding to the symbol. + + Raises: + KeyError: if the symbol could not be found. + """ + + return self._classes[self.pool.FindMessageTypeByName(symbol)] + + def GetMessages(self, files): + # TODO(amauryfa): Fix the differences with MessageFactory. + """Gets all registered messages from a specified file. + + Only messages already created and registered will be returned; (this is the + case for imported _pb2 modules) + But unlike MessageFactory, this version also returns already defined nested + messages, but does not register any message extensions. + + Args: + files (list[str]): The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. + + Raises: + KeyError: if a file could not be found. + """ + + def _GetAllMessages(desc): + """Walk a message Descriptor and recursively yields all message names.""" + yield desc + for msg_desc in desc.nested_types: + for nested_desc in _GetAllMessages(msg_desc): + yield nested_desc + + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for msg_desc in file_desc.message_types_by_name.values(): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] + except KeyError: + # This descriptor has no registered class, skip it. + pass + return result + + +_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) + + +def Default(): + """Returns the default SymbolDatabase.""" + return _DEFAULT diff --git a/MLPY/Lib/site-packages/google/protobuf/text_encoding.py b/MLPY/Lib/site-packages/google/protobuf/text_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..b8e69c8aabbde485c2033849126703f839bee5b4 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/text_encoding.py @@ -0,0 +1,110 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Encoding related utilities.""" +import re + +_cescape_chr_to_symbol_map = {} +_cescape_chr_to_symbol_map[9] = r'\t' # optional escape +_cescape_chr_to_symbol_map[10] = r'\n' # optional escape +_cescape_chr_to_symbol_map[13] = r'\r' # optional escape +_cescape_chr_to_symbol_map[34] = r'\"' # necessary escape +_cescape_chr_to_symbol_map[39] = r"\'" # optional escape +_cescape_chr_to_symbol_map[92] = r'\\' # necessary escape + +# Lookup table for unicode +_cescape_unicode_to_str = [chr(i) for i in range(0, 256)] +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_unicode_to_str[byte] = string + +# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) +_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + + [chr(i) for i in range(32, 127)] + + [r'\%03o' % i for i in range(127, 256)]) +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_byte_to_str[byte] = string +del byte, string + + +def CEscape(text, as_utf8): + # type: (...) -> str + """Escape a bytes string for use in an text protocol buffer. + + Args: + text: A byte string to be escaped. + as_utf8: Specifies if result may contain non-ASCII characters. + In Python 3 this allows unescaped non-ASCII Unicode characters. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + Returns: + Escaped string (str). + """ + # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not + # satisfy our needs; they encodes unprintable characters using two-digit hex + # escapes whereas our C++ unescaping function allows hex escapes to be any + # length. So, "\0011".encode('string_escape') ends up being "\\x011", which + # will be decoded in C++ as a single-character string with char code 0x11. + text_is_unicode = isinstance(text, str) + if as_utf8 and text_is_unicode: + # We're already unicode, no processing beyond control char escapes. + return text.translate(_cescape_chr_to_symbol_map) + ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints. + if as_utf8: + return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text) + return ''.join(_cescape_byte_to_str[ord_(c)] for c in text) + + +_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') + + +def CUnescape(text): + # type: (str) -> bytes + """Unescape a text string with C-style escape sequences to UTF-8 bytes. + + Args: + text: The data to parse in a str. + Returns: + A byte string. + """ + + def ReplaceHex(m): + # Only replace the match if the number of leading back slashes is odd. i.e. + # the slash itself is not escaped. + if len(m.group(1)) & 1: + return m.group(1) + 'x0' + m.group(2) + return m.group(0) + + # This is required because the 'string_escape' encoding doesn't + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + + return (result.encode('utf-8') # Make it bytes to allow decode. + .decode('unicode_escape') + # Make it bytes again to return the proper type. + .encode('raw_unicode_escape')) diff --git a/MLPY/Lib/site-packages/google/protobuf/text_format.py b/MLPY/Lib/site-packages/google/protobuf/text_format.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7dd94b052ba58a89dd0fb8487bf57b4fe244f0 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/text_format.py @@ -0,0 +1,1795 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in text format. + +Simple usage example:: + + # Create a proto object and serialize it to a text proto string. + message = my_proto_pb2.MyMessage(foo='bar') + text_proto = text_format.MessageToString(message) + + # Parse a text proto string. + message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +# TODO(b/129989314) Import thread contention leads to test failures. +import encodings.raw_unicode_escape # pylint: disable=unused-import +import encodings.unicode_escape # pylint: disable=unused-import +import io +import math +import re + +from google.protobuf.internal import decoder +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import text_encoding + +# pylint: disable=g-import-not-at-top +__all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', + 'PrintFieldValue', 'Merge', 'MessageToBytes'] + +_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), + type_checkers.Int32ValueChecker(), + type_checkers.Uint64ValueChecker(), + type_checkers.Int64ValueChecker()) +_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) +_FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) +_QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' + + +class Error(Exception): + """Top-level module error for text_format.""" + + +class ParseError(Error): + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{0}'.format(column) + message = '{0} : {1}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column + + +class TextWriter(object): + + def __init__(self, as_utf8): + self._writer = io.StringIO() + + def write(self, val): + return self._writer.write(val) + + def close(self): + return self._writer.close() + + def getvalue(self): + return self._writer.getvalue() + + +def MessageToString( + message, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + # type: (...) -> str + """Convert protobuf message to text format. + + Double values can be formatted compactly with 15 digits of + precision (which is the most that IEEE 754 "double" can guarantee) + using double_format='.15g'. To ensure that converting to text and back to a + proto will result in an identical value, double_format='.17g' should be used. + + Args: + message: The protocol buffers message. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, fields of a proto message will be printed using + the order defined in source code instead of the field number, extensions + will be printed at the end of the message and their relative order is + determined by the extension number. By default, use the field number + order. + float_format (str): If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest float + that has same value in wire will be printed. Also affect double field + if double_format is not set but float_format is set. + double_format (str): If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, use ``str()`` + use_field_number: If True, print field numbers instead of names. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + indent (int): The initial indent level, in terms of spaces, for pretty + print. + message_formatter (function(message, indent, as_one_line) -> unicode|None): + Custom formatter for selected sub-messages (usually based on message + type). Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if the + field is a proto message. + + Returns: + str: A string of the text formatted protocol buffer message. + """ + out = TextWriter(as_utf8) + printer = _Printer( + out, + indent, + as_utf8, + as_one_line, + use_short_repeated_primitives, + pointy_brackets, + use_index_order, + float_format, + double_format, + use_field_number, + descriptor_pool, + message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + result = out.getvalue() + out.close() + if as_one_line: + return result.rstrip() + return result + + +def MessageToBytes(message, **kwargs): + # type: (...) -> bytes + """Convert protobuf message to encoded text format. See MessageToString.""" + text = MessageToString(message, **kwargs) + if isinstance(text, bytes): + return text + codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' + return text.encode(codec) + + +def _IsMapEntry(field): + return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def PrintMessage(message, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + printer = _Printer( + out=out, indent=indent, as_utf8=as_utf8, + as_one_line=as_one_line, + use_short_repeated_primitives=use_short_repeated_primitives, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, + float_format=float_format, + double_format=double_format, + use_field_number=use_field_number, + descriptor_pool=descriptor_pool, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + + +def PrintField(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field name/value pair.""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintField(field, value) + + +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field value (not including name).""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintFieldValue(field, value) + + +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + if descriptor_pool is None: + from google.protobuf import descriptor_pool as pool_mod + descriptor_pool = pool_mod.Default() + from google.protobuf import symbol_database + database = symbol_database.Default() + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = database.GetPrototype(message_descriptor) + return message_type() + + +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 + + +class _Printer(object): + """Text format printer for protocol message.""" + + def __init__( + self, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Initialize the Printer. + + Double values can be formatted compactly with 15 digits of precision + (which is the most that IEEE 754 "double" can guarantee) using + double_format='.15g'. To ensure that converting to text and back to a proto + will result in an identical value, double_format='.17g' should be used. + + Args: + out: To record the text format result. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + self.out = out + self.indent = indent + self.as_utf8 = as_utf8 + self.as_one_line = as_one_line + self.use_short_repeated_primitives = use_short_repeated_primitives + self.pointy_brackets = pointy_brackets + self.use_index_order = use_index_order + self.float_format = float_format + if double_format is not None: + self.double_format = double_format + else: + self.double_format = float_format + self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + self.message_formatter = message_formatter + self.print_unknown_fields = print_unknown_fields + self.force_colon = force_colon + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + if '/' not in message.type_url: + return False + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + colon = ':' if self.force_colon else '' + self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False + + def _TryCustomFormatMessage(self, message): + formatted = self.message_formatter(message, self.indent, self.as_one_line) + if formatted is None: + return False + + out = self.out + out.write(' ' * self.indent) + out.write(formatted) + out.write(' ' if self.as_one_line else '\n') + return True + + def PrintMessage(self, message): + """Convert protobuf message to text format. + + Args: + message: The protocol buffers message. + """ + if self.message_formatter and self._TryCustomFormatMessage(message): + return + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self._TryPrintAsAnyMessage(message)): + return + fields = message.ListFields() + if self.use_index_order: + fields.sort( + key=lambda x: x[0].number if x[0].is_extension else x[0].index) + for field, value in fields: + if _IsMapEntry(field): + for key in sorted(value): + # This is slow for maps with submessage entries because it copies the + # entire tree. Unfortunately this would take significant refactoring + # of this file to work around. + # + # TODO(haberman): refactor and optimize if this becomes an issue. + entry_submsg = value.GetEntryClass()(key=key, value=value[key]) + self.PrintField(field, entry_submsg) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (self.use_short_repeated_primitives + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): + self._PrintShortRepeatedPrimitivesValue(field, value) + else: + for element in value: + self.PrintField(field, element) + else: + self.PrintField(field, value) + + if self.print_unknown_fields: + self._PrintUnknownFields(message.UnknownFields()) + + def _PrintUnknownFields(self, unknown_fields): + """Print unknown fields.""" + out = self.out + for field in unknown_fields: + out.write(' ' * self.indent) + out.write(str(field.field_number)) + if field.wire_type == WIRETYPE_START_GROUP: + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(field.data) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: + try: + # If this field is parseable as a Message, it is probably + # an embedded message. + # pylint: disable=protected-access + (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( + memoryview(field.data), 0, len(field.data)) + except Exception: # pylint: disable=broad-except + pos = 0 + + if pos == len(field.data): + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(embedded_unknown_message) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + else: + # A string or bytes field. self.as_utf8 may not work. + out.write(': \"') + out.write(text_encoding.CEscape(field.data, False)) + out.write('\" ' if self.as_one_line else '\"\n') + else: + # varint, fixed32, fixed64 + out.write(': ') + out.write(str(field.data)) + out.write(' ' if self.as_one_line else '\n') + + def _PrintFieldName(self, field): + """Print field name.""" + out = self.out + out.write(' ' * self.indent) + if self.use_field_number: + out.write(str(field.number)) + else: + if field.is_extension: + out.write('[') + if (field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): + out.write(field.message_type.full_name) + else: + out.write(field.full_name) + out.write(']') + elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: + # For groups, use the capitalized name. + out.write(field.message_type.name) + else: + out.write(field.name) + + if (self.force_colon or + field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): + # The colon is optional in this case, but our cross-language golden files + # don't include it. Here, the colon is only included if force_colon is + # set to True + out.write(':') + + def PrintField(self, field, value): + """Print a single field name/value pair.""" + self._PrintFieldName(field) + self.out.write(' ') + self.PrintFieldValue(field, value) + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintShortRepeatedPrimitivesValue(self, field, value): + """"Prints short repeated primitives value.""" + # Note: this is called only when value has at least one element. + self._PrintFieldName(field) + self.out.write(' [') + for i in range(len(value) - 1): + self.PrintFieldValue(field, value[i]) + self.out.write(', ') + self.PrintFieldValue(field, value[-1]) + self.out.write(']') + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write('%s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write('%s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + + def PrintFieldValue(self, field, value): + """Print a single field value (not including name). + + For repeated fields, the value should be a single element. + + Args: + field: The descriptor of the field to be printed. + value: The value of the field. + """ + out = self.out + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self._PrintMessageFieldValue(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + out.write(enum_value.name) + else: + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') + if isinstance(value, str) and not self.as_utf8: + out_value = value.encode('utf-8') + else: + out_value = value + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # We always need to escape all binary data in TYPE_BYTES fields. + out_as_utf8 = False + else: + out_as_utf8 = self.as_utf8 + out.write(text_encoding.CEscape(out_value, out_as_utf8)) + out.write('\"') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + if value: + out.write('true') + else: + out.write('false') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format is not None: + out.write('{1:{0}}'.format(self.float_format, value)) + else: + if math.isnan(value): + out.write(str(value)) + else: + out.write(str(type_checkers.ToShortestFloat(value))) + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and + self.double_format is not None): + out.write('{1:{0}}'.format(self.double_format, value)) + else: + out.write(str(value)) + + +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + NOTE: for historical reasons this function does not clear the input + message. This is different from what the binary msg.ParseFrom(...) does. + If text contains a field already set in message, the value is appended if the + field is repeated. Otherwise, an error is raised. + + Example:: + + a = MyProto() + a.repeated_field.append('test') + b = MyProto() + + # Repeated fields are combined + text_format.Parse(repr(a), b) + text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"] + + # Non-repeated fields cannot be overwritten + a.singular_field = 1 + b.singular_field = 2 + text_format.Parse(repr(a), b) # ParseError + + # Binary version: + b.ParseFromString(a.SerializeToString()) # repeated_field is now "test" + + Caller is responsible for clearing the message as needed. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + Like Parse(), but allows repeated values for a non-repeated field, and uses + the last one. This means any non-repeated, top-level fields specified in text + replace those in the message. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return MergeLines( + text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def ParseLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Parse() for caveats. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.ParseLines(lines, message) + + +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Merge() for more details. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.MergeLines(lines, message) + + +class _Parser(object): + """Text format parser for protocol message.""" + + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + self.allow_unknown_extension = allow_unknown_extension + self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool + self.allow_unknown_field = allow_unknown_field + + def ParseLines(self, lines, message): + """Parses a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = False + self._ParseOrMerge(lines, message) + return message + + def MergeLines(self, lines, message): + """Merges a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = True + self._ParseOrMerge(lines, message) + return message + + def _ParseOrMerge(self, lines, message): + """Converts a text representation of a protocol message into a message. + + Args: + lines: Lines of a message's text representation. + message: A protocol buffer message to merge into. + + Raises: + ParseError: On text parsing problems. + """ + # Tokenize expects native str lines. + str_lines = ( + line if isinstance(line, str) else line.decode('utf-8') + for line in lines) + tokenizer = Tokenizer(str_lines) + while not tokenizer.AtEnd(): + self._MergeField(tokenizer, message) + + def _MergeField(self, tokenizer, message): + """Merges a single protocol message field into a message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + message: A protocol message to record the data. + + Raises: + ParseError: In case of text parsing problems. + """ + message_descriptor = message.DESCRIPTOR + if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + if not expanded_any_sub_message: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + deterministic = False + + message.Pack(expanded_any_sub_message, + type_url_prefix=type_url_prefix, + deterministic=deterministic) + return + + if tokenizer.TryConsume('['): + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + name = '.'.join(name) + + if not message_descriptor.is_extendable: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" does not have extensions.' % + message_descriptor.full_name) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(name) + # pylint: enable=protected-access + + + if not field: + if self.allow_unknown_extension: + field = None + else: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" not registered. ' + 'Did you import the _pb2 module which defines it? ' + 'If you are trying to place the extension in the MessageSet ' + 'field of another message that is in an Any or MessageSet field, ' + 'that message\'s _pb2 module must be imported as well' % name) + elif message_descriptor != field.containing_type: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) + + tokenizer.Consume(']') + + else: + name = tokenizer.ConsumeIdentifierOrNumber() + if self.allow_field_number and name.isdigit(): + number = ParseInteger(name, True, True) + field = message_descriptor.fields_by_number.get(number, None) + if not field and message_descriptor.is_extendable: + field = message.Extensions._FindExtensionByNumber(number) + else: + field = message_descriptor.fields_by_name.get(name, None) + + # Group names are expected to be capitalized as they appear in the + # .proto file, which actually matches their type names, not their field + # names. + if not field: + field = message_descriptor.fields_by_name.get(name.lower(), None) + if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: + field = None + + if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and + field.message_type.name != name): + field = None + + if not field and not self.allow_unknown_field: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) + + if field: + if not self._allow_multiple_scalars and field.containing_oneof: + # Check if there's a different field set in this oneof. + # Note that we ignore the case if the same field was set before, and we + # apply _allow_multiple_scalars to non-scalar fields as well. + which_oneof = message.WhichOneof(field.containing_oneof.name) + if which_oneof is not None and which_oneof != field.name: + raise tokenizer.ParseErrorPreviousToken( + 'Field "%s" is specified along with field "%s", another member ' + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) + + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + tokenizer.TryConsume(':') + merger = self._MergeMessageField + else: + tokenizer.Consume(':') + merger = self._MergeScalarField + + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): + # Short repeated format, e.g. "foo: [1, 2, 3]" + if not tokenizer.TryConsume(']'): + while True: + merger(tokenizer, message, field) + if tokenizer.TryConsume(']'): + break + tokenizer.Consume(',') + + else: + merger(tokenizer, message, field) + + else: # Proto field is unknown. + assert (self.allow_unknown_extension or self.allow_unknown_field) + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + prefix = [tokenizer.ConsumeIdentifier()] + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(prefix), '.'.join(name) + + def _MergeMessageField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: The message of which field is a member. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + """ + is_map_entry = _IsMapEntry(field) + + if tokenizer.TryConsume('<'): + end_token = '>' + else: + tokenizer.Consume('{') + end_token = '}' + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + sub_message = message.Extensions[field].add() + elif is_map_entry: + sub_message = getattr(message, field.name).GetEntryClass()() + else: + sub_message = getattr(message, field.name).add() + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + sub_message = message.Extensions[field] + else: + # Also apply _allow_multiple_scalars to message field. + # TODO(jieluo): Change to _allow_singular_overwrites. + if (not self._allow_multiple_scalars and + message.HasField(field.name)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + sub_message = getattr(message, field.name) + sub_message.SetInParent() + + while not tokenizer.TryConsume(end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) + self._MergeField(tokenizer, sub_message) + + if is_map_entry: + value_cpptype = field.message_type.fields_by_name['value'].cpp_type + if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + value = getattr(message, field.name)[sub_message.key] + value.CopyFrom(sub_message.value) + else: + getattr(message, field.name)[sub_message.key] = sub_message.value + + @staticmethod + def _IsProto3Syntax(message): + message_descriptor = message.DESCRIPTOR + return (hasattr(message_descriptor, 'syntax') and + message_descriptor.syntax == 'proto3') + + def _MergeScalarField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: A protocol message to record the data. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + RuntimeError: On runtime errors. + """ + _ = self.allow_unknown_extension + value = None + + if field.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_SFIXED32): + value = _ConsumeInt32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_SINT64, + descriptor.FieldDescriptor.TYPE_SFIXED64): + value = _ConsumeInt64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_FIXED32): + value = _ConsumeUint32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_FIXED64): + value = _ConsumeUint64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, + descriptor.FieldDescriptor.TYPE_DOUBLE): + value = tokenizer.ConsumeFloat() + elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: + value = tokenizer.ConsumeBool() + elif field.type == descriptor.FieldDescriptor.TYPE_STRING: + value = tokenizer.ConsumeString() + elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: + value = tokenizer.ConsumeByteString() + elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: + value = tokenizer.ConsumeEnum(field) + else: + raise RuntimeError('Unknown field type %d' % field.type) + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + message.Extensions[field].append(value) + else: + getattr(message, field.name).append(value) + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + not self._IsProto3Syntax(message) and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + else: + message.Extensions[field] = value + else: + duplicate_error = False + if not self._allow_multiple_scalars: + if self._IsProto3Syntax(message): + # Proto3 doesn't represent presence so we try best effort to check + # multiple scalars by compare to default values. + duplicate_error = bool(getattr(message, field.name)) + else: + duplicate_error = message.HasField(field.name) + + if duplicate_error: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + else: + setattr(message, field.name, value) + + +def _SkipFieldContents(tokenizer): + """Skips over contents (value or message) of a field. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + # Try to guess the type of this field. + # If this field is not a message, there should be a ":" between the + # field name and the field value and also the field value should not + # start with "{" or "<" which indicates the beginning of a message body. + # If there is no ":" or there is a "{" or "<" after ":", this field has + # to be a message or the input is ill-formed. + if tokenizer.TryConsume(':') and not tokenizer.LookingAt( + '{') and not tokenizer.LookingAt('<'): + _SkipFieldValue(tokenizer) + else: + _SkipFieldMessage(tokenizer) + + +def _SkipField(tokenizer): + """Skips over a complete field (name and value/message). + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + if tokenizer.TryConsume('['): + # Consume extension name. + tokenizer.ConsumeIdentifier() + while tokenizer.TryConsume('.'): + tokenizer.ConsumeIdentifier() + tokenizer.Consume(']') + else: + tokenizer.ConsumeIdentifierOrNumber() + + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + +def _SkipFieldMessage(tokenizer): + """Skips over a field message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + + if tokenizer.TryConsume('<'): + delimiter = '>' + else: + tokenizer.Consume('{') + delimiter = '}' + + while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): + _SkipField(tokenizer) + + tokenizer.Consume(delimiter) + + +def _SkipFieldValue(tokenizer): + """Skips over a field value. + + Args: + tokenizer: A tokenizer to parse the field name and values. + + Raises: + ParseError: In case an invalid field value is found. + """ + # String/bytes tokens can come in multiple adjacent string literals. + # If we can consume one, consume as many as we can. + if tokenizer.TryConsumeByteString(): + while tokenizer.TryConsumeByteString(): + pass + return + + if (not tokenizer.TryConsumeIdentifier() and + not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and + not tokenizer.TryConsumeFloat()): + raise ParseError('Invalid field value: ' + tokenizer.token) + + +class Tokenizer(object): + """Protocol buffer text representation tokenizer. + + This class handles the lower level string parsing by splitting it into + meaningful tokens. + + It was directly ported from the Java protocol buffer API. + """ + + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) + _TOKEN = re.compile('|'.join([ + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number + ] + [ # quoted str for each quote mark + # Avoid backtracking! https://stackoverflow.com/a/844267 + r'{qt}[^{qt}\n\\]*((\\.)+[^{qt}\n\\]*)*({qt}|\\?$)'.format(qt=mark) + for mark in _QUOTES + ])) + + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') + + def __init__(self, lines, skip_comments=True): + self._position = 0 + self._line = -1 + self._column = 0 + self._token_start = None + self.token = '' + self._lines = iter(lines) + self._current_line = '' + self._previous_line = 0 + self._previous_column = 0 + self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) + self._SkipWhitespace() + self.NextToken() + + def LookingAt(self, token): + return self.token == token + + def AtEnd(self): + """Checks the end of the text was reached. + + Returns: + True iff the end was reached. + """ + return not self.token + + def _PopLine(self): + while len(self._current_line) <= self._column: + try: + self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False + return + else: + self._line += 1 + self._column = 0 + + def _SkipWhitespace(self): + while True: + self._PopLine() + match = self._whitespace_pattern.match(self._current_line, self._column) + if not match: + break + length = len(match.group(0)) + self._column += length + + def TryConsume(self, token): + """Tries to consume a given piece of text. + + Args: + token: Text to consume. + + Returns: + True iff the text was consumed. + """ + if self.token == token: + self.NextToken() + return True + return False + + def Consume(self, token): + """Consumes a piece of text. + + Args: + token: Text to consume. + + Raises: + ParseError: If the text couldn't be consumed. + """ + if not self.TryConsume(token): + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result + + def ConsumeCommentOrTrailingComment(self): + """Consumes a comment, returns a 2-tuple (trailing bool, comment str).""" + + # Tokenizer initializes _previous_line and _previous_column to 0. As the + # tokenizer starts, it looks like there is a previous token on the line. + just_started = self._line == 0 and self._column == 0 + + before_parsing = self._previous_line + comment = self.ConsumeComment() + + # A trailing comment is a comment on the same line than the previous token. + trailing = (self._previous_line == before_parsing + and not just_started) + + return trailing, comment + + def TryConsumeIdentifier(self): + try: + self.ConsumeIdentifier() + return True + except ParseError: + return False + + def ConsumeIdentifier(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER.match(result): + raise self.ParseError('Expected identifier.') + self.NextToken() + return result + + def TryConsumeIdentifierOrNumber(self): + try: + self.ConsumeIdentifierOrNumber() + return True + except ParseError: + return False + + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number, got %s.' % result) + self.NextToken() + return result + + def TryConsumeInteger(self): + try: + self.ConsumeInteger() + return True + except ParseError: + return False + + def ConsumeInteger(self): + """Consumes an integer number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + try: + result = _ParseAbstractInteger(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeFloat(self): + try: + self.ConsumeFloat() + return True + except ParseError: + return False + + def ConsumeFloat(self): + """Consumes an floating point number. + + Returns: + The number parsed. + + Raises: + ParseError: If a floating point number couldn't be consumed. + """ + try: + result = ParseFloat(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeBool(self): + """Consumes a boolean value. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + try: + result = ParseBool(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeByteString(self): + try: + self.ConsumeByteString() + return True + except ParseError: + return False + + def ConsumeString(self): + """Consumes a string value. + + Returns: + The string parsed. + + Raises: + ParseError: If a string value couldn't be consumed. + """ + the_bytes = self.ConsumeByteString() + try: + return str(the_bytes, 'utf-8') + except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): + """Consumes a byte array value. + + Returns: + The array parsed (as a string). + + Raises: + ParseError: If a byte array value couldn't be consumed. + """ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in _QUOTES: + the_list.append(self._ConsumeSingleByteString()) + return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. + + String literals (whether bytes or text) can come in multiple adjacent + tokens which are automatically concatenated, like in C or Python. This + method only consumes one token. + + Returns: + The token parsed. + Raises: + ParseError: When the wrong format data is found. + """ + text = self.token + if len(text) < 1 or text[0] not in _QUOTES: + raise self.ParseError('Expected string but found: %r' % (text,)) + + if len(text) < 2 or text[-1] != text[0]: + raise self.ParseError('String missing ending quote: %r' % (text,)) + + try: + result = text_encoding.CUnescape(text[1:-1]) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ParseErrorPreviousToken(self, message): + """Creates and *returns* a ParseError for the previously read token. + + Args: + message: A message to set for the exception. + + Returns: + A ParseError instance. + """ + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) + + def ParseError(self, message): + """Creates and *returns* a ParseError for the current token.""" + return ParseError('\'' + self._current_line + '\': ' + message, + self._line + 1, self._column + 1) + + def _StringParseError(self, e): + return self.ParseError('Couldn\'t parse string: ' + str(e)) + + def NextToken(self): + """Reads the next meaningful token.""" + self._previous_line = self._line + self._previous_column = self._column + + self._column += len(self.token) + self._SkipWhitespace() + + if not self._more_lines: + self.token = '' + return + + match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) + if match: + token = match.group(0) + self.token = token + else: + self.token = self._current_line[self._column] + +# Aliased so it can still be accessed by current visibility violators. +# TODO(dbarnett): Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + + +def ParseInteger(text, is_signed=False, is_long=False): + """Parses an integer. + + Args: + text: The text to parse. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + orig_text = text + c_octal_match = re.match(r'(-?)0(\d+)$', text) + if c_octal_match: + # Python 3 no longer supports 0755 octal syntax without the 'o', so + # we always use the '0o' prefix for multi-digit numbers starting with 0. + text = c_octal_match.group(1) + '0o' + c_octal_match.group(2) + try: + return int(text, 0) + except ValueError: + raise ValueError('Couldn\'t parse integer: %s' % orig_text) + + +def ParseFloat(text): + """Parse a floating point number. + + Args: + text: Text to parse. + + Returns: + The number parsed. + + Raises: + ValueError: If a floating point number couldn't be parsed. + """ + try: + # Assume Python compatible syntax. + return float(text) + except ValueError: + # Check alternative spellings. + if _FLOAT_INFINITY.match(text): + if text[0] == '-': + return float('-inf') + else: + return float('inf') + elif _FLOAT_NAN.match(text): + return float('nan') + else: + # assume '1.0f' format + try: + return float(text.rstrip('f')) + except ValueError: + raise ValueError('Couldn\'t parse float: %s' % text) + + +def ParseBool(text): + """Parse a boolean value. + + Args: + text: Text to parse. + + Returns: + Boolean values parsed + + Raises: + ValueError: If text is not a valid boolean. + """ + if text in ('true', 't', '1', 'True'): + return True + elif text in ('false', 'f', '0', 'False'): + return False + else: + raise ValueError('Expected "true" or "false".') + + +def ParseEnum(field, value): + """Parse an enum value. + + The value can be specified by a number (the enum value), or by + a string literal (the enum name). + + Args: + field: Enum field descriptor. + value: String value. + + Returns: + Enum value number. + + Raises: + ValueError: If the enum value could not be parsed. + """ + enum_descriptor = field.enum_type + try: + number = int(value, 0) + except ValueError: + # Identifier. + enum_value = enum_descriptor.values_by_name.get(value, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) + else: + # Numeric value. + if hasattr(field.file, 'syntax'): + # Attribute is checked for compatibility. + if field.file.syntax == 'proto3': + # Proto3 accept numeric unknown enums. + return number + enum_value = enum_descriptor.values_by_number.get(number, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) + return enum_value.number diff --git a/MLPY/Lib/site-packages/google/protobuf/timestamp_pb2.py b/MLPY/Lib/site-packages/google/protobuf/timestamp_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..558d49694181846b56a00110c0efa054c6bb9834 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/timestamp_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/timestamp.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _TIMESTAMP._serialized_start=52 + _TIMESTAMP._serialized_end=95 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/type_pb2.py b/MLPY/Lib/site-packages/google/protobuf/type_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..19903fb6b4be5708bf24d34ebcc99cda33368f31 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/type_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/type.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.type_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SYNTAX._serialized_start=1413 + _SYNTAX._serialized_end=1459 + _TYPE._serialized_start=113 + _TYPE._serialized_end=328 + _FIELD._serialized_start=331 + _FIELD._serialized_end=1056 + _FIELD_KIND._serialized_start=610 + _FIELD_KIND._serialized_end=938 + _FIELD_CARDINALITY._serialized_start=940 + _FIELD_CARDINALITY._serialized_end=1056 + _ENUM._serialized_start=1059 + _ENUM._serialized_end=1265 + _ENUMVALUE._serialized_start=1267 + _ENUMVALUE._serialized_end=1350 + _OPTION._serialized_start=1352 + _OPTION._serialized_end=1411 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/util/__init__.py b/MLPY/Lib/site-packages/google/protobuf/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e6df4b75532ba875c63454afb10c1b7ba31b0d Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b82e003da640ee7a2ee97e357a3773813d43df2 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_proto3_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_proto3_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ba99128fc269ff156ef4bc59dd14552e7e25ca6 Binary files /dev/null and b/MLPY/Lib/site-packages/google/protobuf/util/__pycache__/json_format_proto3_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/google/protobuf/util/json_format_pb2.py b/MLPY/Lib/site-packages/google/protobuf/util/json_format_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..66a5836c82084c2a65999345cce06ccf4b5d8a2b --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/util/json_format_pb2.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/protobuf/util/json_format.proto\x12\x11protobuf_unittest\"\x89\x01\n\x13TestFlagsAndStrings\x12\t\n\x01\x41\x18\x01 \x02(\x05\x12K\n\rrepeatedgroup\x18\x02 \x03(\n24.protobuf_unittest.TestFlagsAndStrings.RepeatedGroup\x1a\x1a\n\rRepeatedGroup\x12\t\n\x01\x66\x18\x03 \x02(\t\"!\n\x14TestBase64ByteArrays\x12\t\n\x01\x61\x18\x01 \x02(\x0c\"G\n\x12TestJavaScriptJSON\x12\t\n\x01\x61\x18\x01 \x01(\x05\x12\r\n\x05\x66inal\x18\x02 \x01(\x02\x12\n\n\x02in\x18\x03 \x01(\t\x12\x0b\n\x03Var\x18\x04 \x01(\t\"Q\n\x18TestJavaScriptOrderJSON1\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\"\x89\x01\n\x18TestJavaScriptOrderJSON2\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\x12\x36\n\x01z\x18\x06 \x03(\x0b\x32+.protobuf_unittest.TestJavaScriptOrderJSON1\"$\n\x0cTestLargeInt\x12\t\n\x01\x61\x18\x01 \x02(\x03\x12\t\n\x01\x62\x18\x02 \x02(\x04\"\xa0\x01\n\x0bTestNumbers\x12\x30\n\x01\x61\x18\x01 \x01(\x0e\x32%.protobuf_unittest.TestNumbers.MyType\x12\t\n\x01\x62\x18\x02 \x01(\x05\x12\t\n\x01\x63\x18\x03 \x01(\x02\x12\t\n\x01\x64\x18\x04 \x01(\x08\x12\t\n\x01\x65\x18\x05 \x01(\x01\x12\t\n\x01\x66\x18\x06 \x01(\r\"(\n\x06MyType\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"T\n\rTestCamelCase\x12\x14\n\x0cnormal_field\x18\x01 \x01(\t\x12\x15\n\rCAPITAL_FIELD\x18\x02 \x01(\x05\x12\x16\n\x0e\x43\x61melCaseField\x18\x03 \x01(\x05\"|\n\x0bTestBoolMap\x12=\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32+.protobuf_unittest.TestBoolMap.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"O\n\rTestRecursion\x12\r\n\x05value\x18\x01 \x01(\x05\x12/\n\x05\x63hild\x18\x02 \x01(\x0b\x32 .protobuf_unittest.TestRecursion\"\x86\x01\n\rTestStringMap\x12\x43\n\nstring_map\x18\x01 \x03(\x0b\x32/.protobuf_unittest.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc4\x01\n\x14TestStringSerializer\x12\x15\n\rscalar_string\x18\x01 \x01(\t\x12\x17\n\x0frepeated_string\x18\x02 \x03(\t\x12J\n\nstring_map\x18\x03 \x03(\x0b\x32\x36.protobuf_unittest.TestStringSerializer.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"$\n\x18TestMessageWithExtension*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"z\n\rTestExtension\x12\r\n\x05value\x18\x01 \x01(\t2Z\n\x03\x65xt\x12+.protobuf_unittest.TestMessageWithExtension\x18\x64 \x01(\x0b\x32 .protobuf_unittest.TestExtension\"Q\n\x14TestDefaultEnumValue\x12\x39\n\nenum_value\x18\x01 \x01(\x0e\x32\x1c.protobuf_unittest.EnumValue:\x07\x44\x45\x46\x41ULT*2\n\tEnumValue\x12\x0c\n\x08PROTOCOL\x10\x00\x12\n\n\x06\x42UFFER\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageWithExtension.RegisterExtension(_TESTEXTENSION.extensions_by_name['ext']) + + DESCRIPTOR._options = None + _TESTBOOLMAP_BOOLMAPENTRY._options = None + _TESTBOOLMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._options = None + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_options = b'8\001' + _ENUMVALUE._serialized_start=1607 + _ENUMVALUE._serialized_end=1657 + _TESTFLAGSANDSTRINGS._serialized_start=62 + _TESTFLAGSANDSTRINGS._serialized_end=199 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_start=173 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_end=199 + _TESTBASE64BYTEARRAYS._serialized_start=201 + _TESTBASE64BYTEARRAYS._serialized_end=234 + _TESTJAVASCRIPTJSON._serialized_start=236 + _TESTJAVASCRIPTJSON._serialized_end=307 + _TESTJAVASCRIPTORDERJSON1._serialized_start=309 + _TESTJAVASCRIPTORDERJSON1._serialized_end=390 + _TESTJAVASCRIPTORDERJSON2._serialized_start=393 + _TESTJAVASCRIPTORDERJSON2._serialized_end=530 + _TESTLARGEINT._serialized_start=532 + _TESTLARGEINT._serialized_end=568 + _TESTNUMBERS._serialized_start=571 + _TESTNUMBERS._serialized_end=731 + _TESTNUMBERS_MYTYPE._serialized_start=691 + _TESTNUMBERS_MYTYPE._serialized_end=731 + _TESTCAMELCASE._serialized_start=733 + _TESTCAMELCASE._serialized_end=817 + _TESTBOOLMAP._serialized_start=819 + _TESTBOOLMAP._serialized_end=943 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_start=897 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_end=943 + _TESTRECURSION._serialized_start=945 + _TESTRECURSION._serialized_end=1024 + _TESTSTRINGMAP._serialized_start=1027 + _TESTSTRINGMAP._serialized_end=1161 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=1161 + _TESTSTRINGSERIALIZER._serialized_start=1164 + _TESTSTRINGSERIALIZER._serialized_end=1360 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_end=1161 + _TESTMESSAGEWITHEXTENSION._serialized_start=1362 + _TESTMESSAGEWITHEXTENSION._serialized_end=1398 + _TESTEXTENSION._serialized_start=1400 + _TESTEXTENSION._serialized_end=1522 + _TESTDEFAULTENUMVALUE._serialized_start=1524 + _TESTDEFAULTENUMVALUE._serialized_end=1605 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/util/json_format_proto3_pb2.py b/MLPY/Lib/site-packages/google/protobuf/util/json_format_proto3_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..5498deafa9ea4150f3b5bf213503cf60eb30a36a --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/util/json_format_proto3_pb2.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format_proto3.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.protobuf import unittest_pb2 as google_dot_protobuf_dot_unittest__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-google/protobuf/util/json_format_proto3.proto\x12\x06proto3\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1egoogle/protobuf/unittest.proto\"\x1c\n\x0bMessageType\x12\r\n\x05value\x18\x01 \x01(\x05\"\x94\x05\n\x0bTestMessage\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x13\n\x0bint32_value\x18\x02 \x01(\x05\x12\x13\n\x0bint64_value\x18\x03 \x01(\x03\x12\x14\n\x0cuint32_value\x18\x04 \x01(\r\x12\x14\n\x0cuint64_value\x18\x05 \x01(\x04\x12\x13\n\x0b\x66loat_value\x18\x06 \x01(\x02\x12\x14\n\x0c\x64ouble_value\x18\x07 \x01(\x01\x12\x14\n\x0cstring_value\x18\x08 \x01(\t\x12\x13\n\x0b\x62ytes_value\x18\t \x01(\x0c\x12$\n\nenum_value\x18\n \x01(\x0e\x32\x10.proto3.EnumType\x12*\n\rmessage_value\x18\x0b \x01(\x0b\x32\x13.proto3.MessageType\x12\x1b\n\x13repeated_bool_value\x18\x15 \x03(\x08\x12\x1c\n\x14repeated_int32_value\x18\x16 \x03(\x05\x12\x1c\n\x14repeated_int64_value\x18\x17 \x03(\x03\x12\x1d\n\x15repeated_uint32_value\x18\x18 \x03(\r\x12\x1d\n\x15repeated_uint64_value\x18\x19 \x03(\x04\x12\x1c\n\x14repeated_float_value\x18\x1a \x03(\x02\x12\x1d\n\x15repeated_double_value\x18\x1b \x03(\x01\x12\x1d\n\x15repeated_string_value\x18\x1c \x03(\t\x12\x1c\n\x14repeated_bytes_value\x18\x1d \x03(\x0c\x12-\n\x13repeated_enum_value\x18\x1e \x03(\x0e\x32\x10.proto3.EnumType\x12\x33\n\x16repeated_message_value\x18\x1f \x03(\x0b\x32\x13.proto3.MessageType\"\x8c\x02\n\tTestOneof\x12\x1b\n\x11oneof_int32_value\x18\x01 \x01(\x05H\x00\x12\x1c\n\x12oneof_string_value\x18\x02 \x01(\tH\x00\x12\x1b\n\x11oneof_bytes_value\x18\x03 \x01(\x0cH\x00\x12,\n\x10oneof_enum_value\x18\x04 \x01(\x0e\x32\x10.proto3.EnumTypeH\x00\x12\x32\n\x13oneof_message_value\x18\x05 \x01(\x0b\x32\x13.proto3.MessageTypeH\x00\x12\x36\n\x10oneof_null_value\x18\x06 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x42\r\n\x0boneof_value\"\xe1\x04\n\x07TestMap\x12.\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\x1c.proto3.TestMap.BoolMapEntry\x12\x30\n\tint32_map\x18\x02 \x03(\x0b\x32\x1d.proto3.TestMap.Int32MapEntry\x12\x30\n\tint64_map\x18\x03 \x03(\x0b\x32\x1d.proto3.TestMap.Int64MapEntry\x12\x32\n\nuint32_map\x18\x04 \x03(\x0b\x32\x1e.proto3.TestMap.Uint32MapEntry\x12\x32\n\nuint64_map\x18\x05 \x03(\x0b\x32\x1e.proto3.TestMap.Uint64MapEntry\x12\x32\n\nstring_map\x18\x06 \x03(\x0b\x32\x1e.proto3.TestMap.StringMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x85\x06\n\rTestNestedMap\x12\x34\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\".proto3.TestNestedMap.BoolMapEntry\x12\x36\n\tint32_map\x18\x02 \x03(\x0b\x32#.proto3.TestNestedMap.Int32MapEntry\x12\x36\n\tint64_map\x18\x03 \x03(\x0b\x32#.proto3.TestNestedMap.Int64MapEntry\x12\x38\n\nuint32_map\x18\x04 \x03(\x0b\x32$.proto3.TestNestedMap.Uint32MapEntry\x12\x38\n\nuint64_map\x18\x05 \x03(\x0b\x32$.proto3.TestNestedMap.Uint64MapEntry\x12\x38\n\nstring_map\x18\x06 \x03(\x0b\x32$.proto3.TestNestedMap.StringMapEntry\x12\x32\n\x07map_map\x18\x07 \x03(\x0b\x32!.proto3.TestNestedMap.MapMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x44\n\x0bMapMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.proto3.TestNestedMap:\x02\x38\x01\"{\n\rTestStringMap\x12\x38\n\nstring_map\x18\x01 \x03(\x0b\x32$.proto3.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xee\x07\n\x0bTestWrapper\x12.\n\nbool_value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\x0bint32_value\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bint64_value\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0cuint32_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x32\n\x0cuint64_value\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\x0b\x66loat_value\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.FloatValue\x12\x32\n\x0c\x64ouble_value\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x32\n\x0cstring_value\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x62ytes_value\x18\t \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x37\n\x13repeated_bool_value\x18\x0b \x03(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x14repeated_int32_value\x18\x0c \x03(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x39\n\x14repeated_int64_value\x18\r \x03(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x15repeated_uint32_value\x18\x0e \x03(\x0b\x32\x1c.google.protobuf.UInt32Value\x12;\n\x15repeated_uint64_value\x18\x0f \x03(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x39\n\x14repeated_float_value\x18\x10 \x03(\x0b\x32\x1b.google.protobuf.FloatValue\x12;\n\x15repeated_double_value\x18\x11 \x03(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15repeated_string_value\x18\x12 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14repeated_bytes_value\x18\x13 \x03(\x0b\x32\x1b.google.protobuf.BytesValue\"n\n\rTestTimestamp\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\"k\n\x0cTestDuration\x12(\n\x05value\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x31\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x19.google.protobuf.Duration\":\n\rTestFieldMask\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"e\n\nTestStruct\x12&\n\x05value\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Struct\"\\\n\x07TestAny\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12,\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x14.google.protobuf.Any\"b\n\tTestValue\x12%\n\x05value\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12.\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\"n\n\rTestListValue\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValue\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\"\x89\x01\n\rTestBoolValue\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x34\n\x08\x62ool_map\x18\x02 \x03(\x0b\x32\".proto3.TestBoolValue.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"+\n\x12TestCustomJsonName\x12\x15\n\x05value\x18\x01 \x01(\x05R\x06@value\"J\n\x0eTestExtensions\x12\x38\n\nextensions\x18\x01 \x01(\x0b\x32$.protobuf_unittest.TestAllExtensions\"\x84\x01\n\rTestEnumValue\x12%\n\x0b\x65num_value1\x18\x01 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value2\x18\x02 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value3\x18\x03 \x01(\x0e\x32\x10.proto3.EnumType*\x1c\n\x08\x45numType\x12\x07\n\x03\x46OO\x10\x00\x12\x07\n\x03\x42\x41R\x10\x01\x42,\n\x18\x63om.google.protobuf.utilB\x10JsonFormatProto3b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_proto3_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030com.google.protobuf.utilB\020JsonFormatProto3' + _TESTMAP_BOOLMAPENTRY._options = None + _TESTMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT32MAPENTRY._options = None + _TESTMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT64MAPENTRY._options = None + _TESTMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT32MAPENTRY._options = None + _TESTMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT64MAPENTRY._options = None + _TESTMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_STRINGMAPENTRY._options = None + _TESTMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_BOOLMAPENTRY._options = None + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT32MAPENTRY._options = None + _TESTNESTEDMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT64MAPENTRY._options = None + _TESTNESTEDMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT32MAPENTRY._options = None + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT64MAPENTRY._options = None + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_STRINGMAPENTRY._options = None + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_MAPMAPENTRY._options = None + _TESTNESTEDMAP_MAPMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTBOOLVALUE_BOOLMAPENTRY._options = None + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_options = b'8\001' + _ENUMTYPE._serialized_start=4849 + _ENUMTYPE._serialized_end=4877 + _MESSAGETYPE._serialized_start=277 + _MESSAGETYPE._serialized_end=305 + _TESTMESSAGE._serialized_start=308 + _TESTMESSAGE._serialized_end=968 + _TESTONEOF._serialized_start=971 + _TESTONEOF._serialized_end=1239 + _TESTMAP._serialized_start=1242 + _TESTMAP._serialized_end=1851 + _TESTMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTMAP_INT32MAPENTRY._serialized_start=1605 + _TESTMAP_INT32MAPENTRY._serialized_end=1652 + _TESTMAP_INT64MAPENTRY._serialized_start=1654 + _TESTMAP_INT64MAPENTRY._serialized_end=1701 + _TESTMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP._serialized_start=1854 + _TESTNESTEDMAP._serialized_end=2627 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_start=1605 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_end=1652 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_start=1654 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_end=1701 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_start=2559 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_end=2627 + _TESTSTRINGMAP._serialized_start=2629 + _TESTSTRINGMAP._serialized_end=2752 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=2704 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=2752 + _TESTWRAPPER._serialized_start=2755 + _TESTWRAPPER._serialized_end=3761 + _TESTTIMESTAMP._serialized_start=3763 + _TESTTIMESTAMP._serialized_end=3873 + _TESTDURATION._serialized_start=3875 + _TESTDURATION._serialized_end=3982 + _TESTFIELDMASK._serialized_start=3984 + _TESTFIELDMASK._serialized_end=4042 + _TESTSTRUCT._serialized_start=4044 + _TESTSTRUCT._serialized_end=4145 + _TESTANY._serialized_start=4147 + _TESTANY._serialized_end=4239 + _TESTVALUE._serialized_start=4241 + _TESTVALUE._serialized_end=4339 + _TESTLISTVALUE._serialized_start=4341 + _TESTLISTVALUE._serialized_end=4451 + _TESTBOOLVALUE._serialized_start=4454 + _TESTBOOLVALUE._serialized_end=4591 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_start=1557 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_end=1603 + _TESTCUSTOMJSONNAME._serialized_start=4593 + _TESTCUSTOMJSONNAME._serialized_end=4636 + _TESTEXTENSIONS._serialized_start=4638 + _TESTEXTENSIONS._serialized_end=4712 + _TESTENUMVALUE._serialized_start=4715 + _TESTENUMVALUE._serialized_end=4847 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/google/protobuf/wrappers_pb2.py b/MLPY/Lib/site-packages/google/protobuf/wrappers_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..e49eb4c15d926cd2563aa63d90b31dd118b28da3 --- /dev/null +++ b/MLPY/Lib/site-packages/google/protobuf/wrappers_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/wrappers.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.wrappers_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rWrappersProtoP\001Z1google.golang.org/protobuf/types/known/wrapperspb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DOUBLEVALUE._serialized_start=51 + _DOUBLEVALUE._serialized_end=79 + _FLOATVALUE._serialized_start=81 + _FLOATVALUE._serialized_end=108 + _INT64VALUE._serialized_start=110 + _INT64VALUE._serialized_end=137 + _UINT64VALUE._serialized_start=139 + _UINT64VALUE._serialized_end=167 + _INT32VALUE._serialized_start=169 + _INT32VALUE._serialized_end=196 + _UINT32VALUE._serialized_start=198 + _UINT32VALUE._serialized_end=226 + _BOOLVALUE._serialized_start=228 + _BOOLVALUE._serialized_end=254 + _STRINGVALUE._serialized_start=256 + _STRINGVALUE._serialized_end=284 + _BYTESVALUE._serialized_start=286 + _BYTESVALUE._serialized_end=313 +# @@protoc_insertion_point(module_scope) diff --git a/MLPY/Lib/site-packages/grpc/__init__.py b/MLPY/Lib/site-packages/grpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..400b960ed98849f5c1f266460bff96b847deb85c --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/__init__.py @@ -0,0 +1,2348 @@ +# Copyright 2015-2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC's Python API.""" + +import abc +import contextlib +import enum +import logging +import sys + +from grpc import _compression +from grpc._cython import cygrpc as _cygrpc +from grpc._runtime_protos import protos +from grpc._runtime_protos import protos_and_services +from grpc._runtime_protos import services + +logging.getLogger(__name__).addHandler(logging.NullHandler()) + +try: + # pylint: disable=ungrouped-imports + from grpc._grpcio_metadata import __version__ +except ImportError: + __version__ = "dev0" + +############################## Future Interface ############################### + + +class FutureTimeoutError(Exception): + """Indicates that a method call on a Future timed out.""" + + +class FutureCancelledError(Exception): + """Indicates that the computation underlying a Future was cancelled.""" + + +class Future(abc.ABC): + """A representation of a computation in another control flow. + + Computations represented by a Future may be yet to be begun, + may be ongoing, or may have already completed. + """ + + @abc.abstractmethod + def cancel(self): + """Attempts to cancel the computation. + + This method does not block. + + Returns: + bool: + Returns True if the computation was canceled. + + Returns False under all other circumstances, for example: + + 1. computation has begun and could not be canceled. + 2. computation has finished + 3. computation is scheduled for execution and it is impossible + to determine its state without blocking. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancelled(self): + """Describes whether the computation was cancelled. + + This method does not block. + + Returns: + bool: + Returns True if the computation was cancelled before its result became + available. + + Returns False under all other circumstances, for example: + + 1. computation was not cancelled. + 2. computation's result is available. + """ + raise NotImplementedError() + + @abc.abstractmethod + def running(self): + """Describes whether the computation is taking place. + + This method does not block. + + Returns: + Returns True if the computation is scheduled for execution or + currently executing. + + Returns False if the computation already executed or was cancelled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def done(self): + """Describes whether the computation has taken place. + + This method does not block. + + Returns: + bool: + Returns True if the computation already executed or was cancelled. + Returns False if the computation is scheduled for execution or + currently executing. + This is exactly opposite of the running() method's result. + """ + raise NotImplementedError() + + @abc.abstractmethod + def result(self, timeout=None): + """Returns the result of the computation or raises its exception. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation to + finish or be cancelled. If None, the call will block until the + computations's termination. + + Returns: + The return value of the computation. + + Raises: + FutureTimeoutError: If a timeout value is passed and the computation + does not terminate within the allotted time. + FutureCancelledError: If the computation was cancelled. + Exception: If the computation raised an exception, this call will + raise the same exception. + """ + raise NotImplementedError() + + @abc.abstractmethod + def exception(self, timeout=None): + """Return the exception raised by the computation. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation to + terminate or be cancelled. If None, the call will block until the + computations's termination. + + Returns: + The exception raised by the computation, or None if the computation + did not raise an exception. + + Raises: + FutureTimeoutError: If a timeout value is passed and the computation + does not terminate within the allotted time. + FutureCancelledError: If the computation was cancelled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def traceback(self, timeout=None): + """Access the traceback of the exception raised by the computation. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation + to terminate or be cancelled. If None, the call will block until + the computation's termination. + + Returns: + The traceback of the exception raised by the computation, or None + if the computation did not raise an exception. + + Raises: + FutureTimeoutError: If a timeout value is passed and the computation + does not terminate within the allotted time. + FutureCancelledError: If the computation was cancelled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_done_callback(self, fn): + """Adds a function to be called at completion of the computation. + + The callback will be passed this Future object describing the outcome + of the computation. Callbacks will be invoked after the future is + terminated, whether successfully or not. + + If the computation has already completed, the callback will be called + immediately. + + Exceptions raised in the callback will be logged at ERROR level, but + will not terminate any threads of execution. + + Args: + fn: A callable taking this Future object as its single parameter. + """ + raise NotImplementedError() + + +################################ gRPC Enums ################################## + + +@enum.unique +class ChannelConnectivity(enum.Enum): + """Mirrors grpc_connectivity_state in the gRPC Core. + + Attributes: + IDLE: The channel is idle. + CONNECTING: The channel is connecting. + READY: The channel is ready to conduct RPCs. + TRANSIENT_FAILURE: The channel has seen a failure from which it expects + to recover. + SHUTDOWN: The channel has seen a failure from which it cannot recover. + """ + + IDLE = (_cygrpc.ConnectivityState.idle, "idle") + CONNECTING = (_cygrpc.ConnectivityState.connecting, "connecting") + READY = (_cygrpc.ConnectivityState.ready, "ready") + TRANSIENT_FAILURE = ( + _cygrpc.ConnectivityState.transient_failure, + "transient failure", + ) + SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, "shutdown") + + +@enum.unique +class StatusCode(enum.Enum): + """Mirrors grpc_status_code in the gRPC Core. + + Attributes: + OK: Not an error; returned on success + CANCELLED: The operation was cancelled (typically by the caller). + UNKNOWN: Unknown error. + INVALID_ARGUMENT: Client specified an invalid argument. + DEADLINE_EXCEEDED: Deadline expired before operation could complete. + NOT_FOUND: Some requested entity (e.g., file or directory) was not found. + ALREADY_EXISTS: Some entity that we attempted to create (e.g., file or directory) + already exists. + PERMISSION_DENIED: The caller does not have permission to execute the specified + operation. + UNAUTHENTICATED: The request does not have valid authentication credentials for the + operation. + RESOURCE_EXHAUSTED: Some resource has been exhausted, perhaps a per-user quota, or + perhaps the entire file system is out of space. + FAILED_PRECONDITION: Operation was rejected because the system is not in a state + required for the operation's execution. + ABORTED: The operation was aborted, typically due to a concurrency issue + like sequencer check failures, transaction aborts, etc. + UNIMPLEMENTED: Operation is not implemented or not supported/enabled in this service. + INTERNAL: Internal errors. Means some invariants expected by underlying + system has been broken. + UNAVAILABLE: The service is currently unavailable. + DATA_LOSS: Unrecoverable data loss or corruption. + """ + + OK = (_cygrpc.StatusCode.ok, "ok") + CANCELLED = (_cygrpc.StatusCode.cancelled, "cancelled") + UNKNOWN = (_cygrpc.StatusCode.unknown, "unknown") + INVALID_ARGUMENT = (_cygrpc.StatusCode.invalid_argument, "invalid argument") + DEADLINE_EXCEEDED = ( + _cygrpc.StatusCode.deadline_exceeded, + "deadline exceeded", + ) + NOT_FOUND = (_cygrpc.StatusCode.not_found, "not found") + ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, "already exists") + PERMISSION_DENIED = ( + _cygrpc.StatusCode.permission_denied, + "permission denied", + ) + RESOURCE_EXHAUSTED = ( + _cygrpc.StatusCode.resource_exhausted, + "resource exhausted", + ) + FAILED_PRECONDITION = ( + _cygrpc.StatusCode.failed_precondition, + "failed precondition", + ) + ABORTED = (_cygrpc.StatusCode.aborted, "aborted") + OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, "out of range") + UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, "unimplemented") + INTERNAL = (_cygrpc.StatusCode.internal, "internal") + UNAVAILABLE = (_cygrpc.StatusCode.unavailable, "unavailable") + DATA_LOSS = (_cygrpc.StatusCode.data_loss, "data loss") + UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, "unauthenticated") + + +############################# gRPC Status ################################ + + +class Status(abc.ABC): + """Describes the status of an RPC. + + This is an EXPERIMENTAL API. + + Attributes: + code: A StatusCode object to be sent to the client. + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + trailing_metadata: The trailing :term:`metadata` in the RPC. + """ + + +############################# gRPC Exceptions ################################ + + +class RpcError(Exception): + """Raised by the gRPC library to indicate non-OK-status RPC termination.""" + + +############################## Shared Context ################################ + + +class RpcContext(abc.ABC): + """Provides RPC-related information and action.""" + + @abc.abstractmethod + def is_active(self): + """Describes whether the RPC is active or has terminated. + + Returns: + bool: + True if RPC is active, False otherwise. + """ + raise NotImplementedError() + + @abc.abstractmethod + def time_remaining(self): + """Describes the length of allowed time remaining for the RPC. + + Returns: + A nonnegative float indicating the length of allowed time in seconds + remaining for the RPC to complete before it is considered to have + timed out, or None if no deadline was specified for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC. + + Idempotent and has no effect if the RPC has already terminated. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_callback(self, callback): + """Registers a callback to be called on RPC termination. + + Args: + callback: A no-parameter callable to be called on RPC termination. + + Returns: + True if the callback was added and will be called later; False if + the callback was not added and will not be called (because the RPC + already terminated or some other reason). + """ + raise NotImplementedError() + + +######################### Invocation-Side Context ############################ + + +class Call(RpcContext, metaclass=abc.ABCMeta): + """Invocation-side utility object for an RPC.""" + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata sent by the server. + + This method blocks until the value is available. + + Returns: + The initial :term:`metadata`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def trailing_metadata(self): + """Accesses the trailing metadata sent by the server. + + This method blocks until the value is available. + + Returns: + The trailing :term:`metadata`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def code(self): + """Accesses the status code sent by the server. + + This method blocks until the value is available. + + Returns: + The StatusCode value for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def details(self): + """Accesses the details sent by the server. + + This method blocks until the value is available. + + Returns: + The details string of the RPC. + """ + raise NotImplementedError() + + +############## Invocation-Side Interceptor Interfaces & Classes ############## + + +class ClientCallDetails(abc.ABC): + """Describes an RPC to be invoked. + + Attributes: + method: The method name of the RPC. + timeout: An optional duration of time in seconds to allow for the RPC. + metadata: Optional :term:`metadata` to be transmitted to + the service-side of the RPC. + credentials: An optional CallCredentials for the RPC. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + """ + + +class UnaryUnaryClientInterceptor(abc.ABC): + """Affords intercepting unary-unary invocations.""" + + @abc.abstractmethod + def intercept_unary_unary(self, continuation, client_call_details, request): + """Intercepts a unary-unary invocation asynchronously. + + Args: + continuation: A function that proceeds with the invocation by + executing the next interceptor in chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `response_future = continuation(client_call_details, request)` + to continue with the RPC. `continuation` returns an object that is + both a Call for the RPC and a Future. In the event of RPC + completion, the return Call-Future's result value will be + the response message of the RPC. Should the event terminate + with non-OK status, the returned Call-Future's exception value + will be an RpcError. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request: The request value for the RPC. + + Returns: + An object that is both a Call for the RPC and a Future. + In the event of RPC completion, the return Call-Future's + result value will be the response message of the RPC. + Should the event terminate with non-OK status, the returned + Call-Future's exception value will be an RpcError. + """ + raise NotImplementedError() + + +class UnaryStreamClientInterceptor(abc.ABC): + """Affords intercepting unary-stream invocations.""" + + @abc.abstractmethod + def intercept_unary_stream( + self, continuation, client_call_details, request + ): + """Intercepts a unary-stream invocation. + + Args: + continuation: A function that proceeds with the invocation by + executing the next interceptor in chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `response_iterator = continuation(client_call_details, request)` + to continue with the RPC. `continuation` returns an object that is + both a Call for the RPC and an iterator for response values. + Drawing response values from the returned Call-iterator may + raise RpcError indicating termination of the RPC with non-OK + status. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request: The request value for the RPC. + + Returns: + An object that is both a Call for the RPC and an iterator of + response values. Drawing response values from the returned + Call-iterator may raise RpcError indicating termination of + the RPC with non-OK status. This object *should* also fulfill the + Future interface, though it may not. + """ + raise NotImplementedError() + + +class StreamUnaryClientInterceptor(abc.ABC): + """Affords intercepting stream-unary invocations.""" + + @abc.abstractmethod + def intercept_stream_unary( + self, continuation, client_call_details, request_iterator + ): + """Intercepts a stream-unary invocation asynchronously. + + Args: + continuation: A function that proceeds with the invocation by + executing the next interceptor in chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `response_future = continuation(client_call_details, request_iterator)` + to continue with the RPC. `continuation` returns an object that is + both a Call for the RPC and a Future. In the event of RPC completion, + the return Call-Future's result value will be the response message + of the RPC. Should the event terminate with non-OK status, the + returned Call-Future's exception value will be an RpcError. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request_iterator: An iterator that yields request values for the RPC. + + Returns: + An object that is both a Call for the RPC and a Future. + In the event of RPC completion, the return Call-Future's + result value will be the response message of the RPC. + Should the event terminate with non-OK status, the returned + Call-Future's exception value will be an RpcError. + """ + raise NotImplementedError() + + +class StreamStreamClientInterceptor(abc.ABC): + """Affords intercepting stream-stream invocations.""" + + @abc.abstractmethod + def intercept_stream_stream( + self, continuation, client_call_details, request_iterator + ): + """Intercepts a stream-stream invocation. + + Args: + continuation: A function that proceeds with the invocation by + executing the next interceptor in chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `response_iterator = continuation(client_call_details, request_iterator)` + to continue with the RPC. `continuation` returns an object that is + both a Call for the RPC and an iterator for response values. + Drawing response values from the returned Call-iterator may + raise RpcError indicating termination of the RPC with non-OK + status. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request_iterator: An iterator that yields request values for the RPC. + + Returns: + An object that is both a Call for the RPC and an iterator of + response values. Drawing response values from the returned + Call-iterator may raise RpcError indicating termination of + the RPC with non-OK status. This object *should* also fulfill the + Future interface, though it may not. + """ + raise NotImplementedError() + + +############ Authentication & Authorization Interfaces & Classes ############# + + +class ChannelCredentials(object): + """An encapsulation of the data required to create a secure Channel. + + This class has no supported interface - it exists to define the type of its + instances and its instances exist to be passed to other functions. For + example, ssl_channel_credentials returns an instance of this class and + secure_channel requires an instance of this class. + """ + + def __init__(self, credentials): + self._credentials = credentials + + +class CallCredentials(object): + """An encapsulation of the data required to assert an identity over a call. + + A CallCredentials has to be used with secure Channel, otherwise the + metadata will not be transmitted to the server. + + A CallCredentials may be composed with ChannelCredentials to always assert + identity for every call over that Channel. + + This class has no supported interface - it exists to define the type of its + instances and its instances exist to be passed to other functions. + """ + + def __init__(self, credentials): + self._credentials = credentials + + +class AuthMetadataContext(abc.ABC): + """Provides information to call credentials metadata plugins. + + Attributes: + service_url: A string URL of the service being called into. + method_name: A string of the fully qualified method name being called. + """ + + +class AuthMetadataPluginCallback(abc.ABC): + """Callback object received by a metadata plugin.""" + + def __call__(self, metadata, error): + """Passes to the gRPC runtime authentication metadata for an RPC. + + Args: + metadata: The :term:`metadata` used to construct the CallCredentials. + error: An Exception to indicate error or None to indicate success. + """ + raise NotImplementedError() + + +class AuthMetadataPlugin(abc.ABC): + """A specification for custom authentication.""" + + def __call__(self, context, callback): + """Implements authentication by passing metadata to a callback. + + This method will be invoked asynchronously in a separate thread. + + Args: + context: An AuthMetadataContext providing information on the RPC that + the plugin is being called to authenticate. + callback: An AuthMetadataPluginCallback to be invoked either + synchronously or asynchronously. + """ + raise NotImplementedError() + + +class ServerCredentials(object): + """An encapsulation of the data required to open a secure port on a Server. + + This class has no supported interface - it exists to define the type of its + instances and its instances exist to be passed to other functions. + """ + + def __init__(self, credentials): + self._credentials = credentials + + +class ServerCertificateConfiguration(object): + """A certificate configuration for use with an SSL-enabled Server. + + Instances of this class can be returned in the certificate configuration + fetching callback. + + This class has no supported interface -- it exists to define the + type of its instances and its instances exist to be passed to + other functions. + """ + + def __init__(self, certificate_configuration): + self._certificate_configuration = certificate_configuration + + +######################## Multi-Callable Interfaces ########################### + + +class UnaryUnaryMultiCallable(abc.ABC): + """Affords invoking a unary-unary RPC from client-side.""" + + @abc.abstractmethod + def __call__( + self, + request, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Synchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional duration of time in seconds to allow + for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + The response value for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod + def with_call( + self, + request, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Synchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional durating of time in seconds to allow for + the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + The response value for the RPC and a Call value for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future( + self, + request, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + An object that is both a Call for the RPC and a Future. + In the event of RPC completion, the return Call-Future's result + value will be the response message of the RPC. + Should the event terminate with non-OK status, + the returned Call-Future's exception value will be an RpcError. + """ + raise NotImplementedError() + + +class UnaryStreamMultiCallable(abc.ABC): + """Affords invoking a unary-stream RPC from client-side.""" + + @abc.abstractmethod + def __call__( + self, + request, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. If None, the timeout is considered infinite. + metadata: An optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + An object that is a Call for the RPC, an iterator of response + values, and a Future for the RPC. Drawing response values from the + returned Call-iterator may raise RpcError indicating termination of + the RPC with non-OK status. + """ + raise NotImplementedError() + + +class StreamUnaryMultiCallable(abc.ABC): + """Affords invoking a stream-unary RPC from client-side.""" + + @abc.abstractmethod + def __call__( + self, + request_iterator, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Synchronously invokes the underlying RPC. + + Args: + request_iterator: An iterator that yields request values for + the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. If None, the timeout is considered infinite. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + The response value for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also implement grpc.Call, affording methods + such as metadata, code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod + def with_call( + self, + request_iterator, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Synchronously invokes the underlying RPC on the client. + + Args: + request_iterator: An iterator that yields request values for + the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. If None, the timeout is considered infinite. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + The response value for the RPC and a Call object for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future( + self, + request_iterator, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Asynchronously invokes the underlying RPC on the client. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. If None, the timeout is considered infinite. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + An object that is both a Call for the RPC and a Future. + In the event of RPC completion, the return Call-Future's result value + will be the response message of the RPC. Should the event terminate + with non-OK status, the returned Call-Future's exception value will + be an RpcError. + """ + raise NotImplementedError() + + +class StreamStreamMultiCallable(abc.ABC): + """Affords invoking a stream-stream RPC on client-side.""" + + @abc.abstractmethod + def __call__( + self, + request_iterator, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None, + ): + """Invokes the underlying RPC on the client. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: An optional duration of time in seconds to allow for + the RPC. If not specified, the timeout is considered infinite. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + An object that is a Call for the RPC, an iterator of response + values, and a Future for the RPC. Drawing response values from the + returned Call-iterator may raise RpcError indicating termination of + the RPC with non-OK status. + """ + raise NotImplementedError() + + +############################# Channel Interface ############################## + + +class Channel(abc.ABC): + """Affords RPC invocation via generic methods on client-side. + + Channel objects implement the Context Manager type, although they need not + support being entered and exited multiple times. + """ + + @abc.abstractmethod + def subscribe(self, callback, try_to_connect=False): + """Subscribe to this Channel's connectivity state machine. + + A Channel may be in any of the states described by ChannelConnectivity. + This method allows application to monitor the state transitions. + The typical use case is to debug or gain better visibility into gRPC + runtime's state. + + Args: + callback: A callable to be invoked with ChannelConnectivity argument. + ChannelConnectivity describes current state of the channel. + The callable will be invoked immediately upon subscription + and again for every change to ChannelConnectivity until it + is unsubscribed or this Channel object goes out of scope. + try_to_connect: A boolean indicating whether or not this Channel + should attempt to connect immediately. If set to False, gRPC + runtime decides when to connect. + """ + raise NotImplementedError() + + @abc.abstractmethod + def unsubscribe(self, callback): + """Unsubscribes a subscribed callback from this Channel's connectivity. + + Args: + callback: A callable previously registered with this Channel from + having been passed to its "subscribe" method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def unary_unary( + self, + method, + request_serializer=None, + response_deserializer=None, + _registered_method=False, + ): + """Creates a UnaryUnaryMultiCallable for a unary-unary method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. A bool representing whether the method + is registered. + + Returns: + A UnaryUnaryMultiCallable value for the named unary-unary method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def unary_stream( + self, + method, + request_serializer=None, + response_deserializer=None, + _registered_method=False, + ): + """Creates a UnaryStreamMultiCallable for a unary-stream method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None is + passed. + _registered_method: Implementation Private. A bool representing whether the method + is registered. + + Returns: + A UnaryStreamMultiCallable value for the name unary-stream method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def stream_unary( + self, + method, + request_serializer=None, + response_deserializer=None, + _registered_method=False, + ): + """Creates a StreamUnaryMultiCallable for a stream-unary method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None is + passed. + _registered_method: Implementation Private. A bool representing whether the method + is registered. + + Returns: + A StreamUnaryMultiCallable value for the named stream-unary method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def stream_stream( + self, + method, + request_serializer=None, + response_deserializer=None, + _registered_method=False, + ): + """Creates a StreamStreamMultiCallable for a stream-stream method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. A bool representing whether the method + is registered. + + Returns: + A StreamStreamMultiCallable value for the named stream-stream method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def close(self): + """Closes this Channel and releases all resources held by it. + + Closing the Channel will immediately terminate all RPCs active with the + Channel and it is not valid to invoke new RPCs with the Channel. + + This method is idempotent. + """ + raise NotImplementedError() + + def __enter__(self): + """Enters the runtime context related to the channel object.""" + raise NotImplementedError() + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exits the runtime context related to the channel object.""" + raise NotImplementedError() + + +########################## Service-Side Context ############################## + + +class ServicerContext(RpcContext, metaclass=abc.ABCMeta): + """A context object passed to method implementations.""" + + @abc.abstractmethod + def invocation_metadata(self): + """Accesses the metadata sent by the client. + + Returns: + The invocation :term:`metadata`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def peer(self): + """Identifies the peer that invoked the RPC being serviced. + + Returns: + A string identifying the peer that invoked the RPC being serviced. + The string format is determined by gRPC runtime. + """ + raise NotImplementedError() + + @abc.abstractmethod + def peer_identities(self): + """Gets one or more peer identity(s). + + Equivalent to + servicer_context.auth_context().get(servicer_context.peer_identity_key()) + + Returns: + An iterable of the identities, or None if the call is not + authenticated. Each identity is returned as a raw bytes type. + """ + raise NotImplementedError() + + @abc.abstractmethod + def peer_identity_key(self): + """The auth property used to identify the peer. + + For example, "x509_common_name" or "x509_subject_alternative_name" are + used to identify an SSL peer. + + Returns: + The auth property (string) that indicates the + peer identity, or None if the call is not authenticated. + """ + raise NotImplementedError() + + @abc.abstractmethod + def auth_context(self): + """Gets the auth context for the call. + + Returns: + A map of strings to an iterable of bytes for each auth property. + """ + raise NotImplementedError() + + def set_compression(self, compression): + """Set the compression algorithm to be used for the entire call. + + Args: + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + """ + raise NotImplementedError() + + @abc.abstractmethod + def send_initial_metadata(self, initial_metadata): + """Sends the initial metadata value to the client. + + This method need not be called by implementations if they have no + metadata to add to what the gRPC runtime will transmit. + + Args: + initial_metadata: The initial :term:`metadata`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def set_trailing_metadata(self, trailing_metadata): + """Sets the trailing metadata for the RPC. + + Sets the trailing metadata to be sent upon completion of the RPC. + + If this method is invoked multiple times throughout the lifetime of an + RPC, the value supplied in the final invocation will be the value sent + over the wire. + + This method need not be called by implementations if they have no + metadata to add to what the gRPC runtime will transmit. + + Args: + trailing_metadata: The trailing :term:`metadata`. + """ + raise NotImplementedError() + + def trailing_metadata(self): + """Access value to be used as trailing metadata upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The trailing :term:`metadata` for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def abort(self, code, details): + """Raises an exception to terminate the RPC with a non-OK status. + + The code and details passed as arguments will supercede any existing + ones. + + Args: + code: A StatusCode object to be sent to the client. + It must not be StatusCode.OK. + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + + Raises: + Exception: An exception is always raised to signal the abortion the + RPC to the gRPC runtime. + """ + raise NotImplementedError() + + @abc.abstractmethod + def abort_with_status(self, status): + """Raises an exception to terminate the RPC with a non-OK status. + + The status passed as argument will supercede any existing status code, + status message and trailing metadata. + + This is an EXPERIMENTAL API. + + Args: + status: A grpc.Status object. The status code in it must not be + StatusCode.OK. + + Raises: + Exception: An exception is always raised to signal the abortion the + RPC to the gRPC runtime. + """ + raise NotImplementedError() + + @abc.abstractmethod + def set_code(self, code): + """Sets the value to be used as status code upon RPC completion. + + This method need not be called by method implementations if they wish + the gRPC runtime to determine the status code of the RPC. + + Args: + code: A StatusCode object to be sent to the client. + """ + raise NotImplementedError() + + @abc.abstractmethod + def set_details(self, details): + """Sets the value to be used as detail string upon RPC completion. + + This method need not be called by method implementations if they have + no details to transmit. + + Args: + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + """ + raise NotImplementedError() + + def code(self): + """Accesses the value to be used as status code upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The StatusCode value for the RPC. + """ + raise NotImplementedError() + + def details(self): + """Accesses the value to be used as detail string upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The details string of the RPC. + """ + raise NotImplementedError() + + def disable_next_message_compression(self): + """Disables compression for the next response message. + + This method will override any compression configuration set during + server creation or set on the call. + """ + raise NotImplementedError() + + +##################### Service-Side Handler Interfaces ######################## + + +class RpcMethodHandler(abc.ABC): + """An implementation of a single RPC method. + + Attributes: + request_streaming: Whether the RPC supports exactly one request message + or any arbitrary number of request messages. + response_streaming: Whether the RPC supports exactly one response message + or any arbitrary number of response messages. + request_deserializer: A callable :term:`deserializer` that accepts a byte string and + returns an object suitable to be passed to this object's business + logic, or None to indicate that this object's business logic should be + passed the raw request bytes. + response_serializer: A callable :term:`serializer` that accepts an object produced + by this object's business logic and returns a byte string, or None to + indicate that the byte strings produced by this object's business logic + should be transmitted on the wire as they are. + unary_unary: This object's application-specific business logic as a + callable value that takes a request value and a ServicerContext object + and returns a response value. Only non-None if both request_streaming + and response_streaming are False. + unary_stream: This object's application-specific business logic as a + callable value that takes a request value and a ServicerContext object + and returns an iterator of response values. Only non-None if + request_streaming is False and response_streaming is True. + stream_unary: This object's application-specific business logic as a + callable value that takes an iterator of request values and a + ServicerContext object and returns a response value. Only non-None if + request_streaming is True and response_streaming is False. + stream_stream: This object's application-specific business logic as a + callable value that takes an iterator of request values and a + ServicerContext object and returns an iterator of response values. + Only non-None if request_streaming and response_streaming are both + True. + """ + + +class HandlerCallDetails(abc.ABC): + """Describes an RPC that has just arrived for service. + + Attributes: + method: The method name of the RPC. + invocation_metadata: The :term:`metadata` sent by the client. + """ + + +class GenericRpcHandler(abc.ABC): + """An implementation of arbitrarily many RPC methods.""" + + @abc.abstractmethod + def service(self, handler_call_details): + """Returns the handler for servicing the RPC. + + Args: + handler_call_details: A HandlerCallDetails describing the RPC. + + Returns: + An RpcMethodHandler with which the RPC may be serviced if the + implementation chooses to service this RPC, or None otherwise. + """ + raise NotImplementedError() + + +class ServiceRpcHandler(GenericRpcHandler, metaclass=abc.ABCMeta): + """An implementation of RPC methods belonging to a service. + + A service handles RPC methods with structured names of the form + '/Service.Name/Service.Method', where 'Service.Name' is the value + returned by service_name(), and 'Service.Method' is the method + name. A service can have multiple method names, but only a single + service name. + """ + + @abc.abstractmethod + def service_name(self): + """Returns this service's name. + + Returns: + The service name. + """ + raise NotImplementedError() + + +#################### Service-Side Interceptor Interfaces ##################### + + +class ServerInterceptor(abc.ABC): + """Affords intercepting incoming RPCs on the service-side.""" + + @abc.abstractmethod + def intercept_service(self, continuation, handler_call_details): + """Intercepts incoming RPCs before handing them over to a handler. + + State can be passed from an interceptor to downstream interceptors + via contextvars. The first interceptor is called from an empty + contextvars.Context, and the same Context is used for downstream + interceptors and for the final handler call. Note that there are no + guarantees that interceptors and handlers will be called from the + same thread. + + Args: + continuation: A function that takes a HandlerCallDetails and + proceeds to invoke the next interceptor in the chain, if any, + or the RPC handler lookup logic, with the call details passed + as an argument, and returns an RpcMethodHandler instance if + the RPC is considered serviced, or None otherwise. + handler_call_details: A HandlerCallDetails describing the RPC. + + Returns: + An RpcMethodHandler with which the RPC may be serviced if the + interceptor chooses to service this RPC, or None otherwise. + """ + raise NotImplementedError() + + +############################# Server Interface ############################### + + +class Server(abc.ABC): + """Services RPCs.""" + + @abc.abstractmethod + def add_generic_rpc_handlers(self, generic_rpc_handlers): + """Registers GenericRpcHandlers with this Server. + + This method is only safe to call before the server is started. + + Args: + generic_rpc_handlers: An iterable of GenericRpcHandlers that will be + used to service RPCs. + """ + raise NotImplementedError() + + def add_registered_method_handlers(self, service_name, method_handlers): + """Registers GenericRpcHandlers with this Server. + + This method is only safe to call before the server is started. + + If the same method have both generic and registered handler, + registered handler will take precedence. + + Args: + service_name: The service name. + method_handlers: A dictionary that maps method names to corresponding + RpcMethodHandler. + """ + + @abc.abstractmethod + def add_insecure_port(self, address): + """Opens an insecure port for accepting RPCs. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. If the port is 0, + or not specified in the address, then gRPC runtime will choose a port. + + Returns: + An integer port on which server will accept RPC requests. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_secure_port(self, address, server_credentials): + """Opens a secure port for accepting RPCs. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. + if the port is 0, or not specified in the address, then gRPC + runtime will choose a port. + server_credentials: A ServerCredentials object. + + Returns: + An integer port on which server will accept RPC requests. + """ + raise NotImplementedError() + + @abc.abstractmethod + def start(self): + """Starts this Server. + + This method may only be called once. (i.e. it is not idempotent). + """ + raise NotImplementedError() + + @abc.abstractmethod + def stop(self, grace): + """Stops this Server. + + This method immediately stop service of new RPCs in all cases. + + If a grace period is specified, this method waits until all active + RPCs are finished or until the grace period is reached. RPCs that haven't + been terminated within the grace period are aborted. + If a grace period is not specified (by passing None for `grace`), + all existing RPCs are aborted immediately and this method + blocks until the last RPC handler terminates. + + This method is idempotent and may be called at any time. + Passing a smaller grace value in a subsequent call will have + the effect of stopping the Server sooner (passing None will + have the effect of stopping the server immediately). Passing + a larger grace value in a subsequent call *will not* have the + effect of stopping the server later (i.e. the most restrictive + grace value is used). + + Args: + grace: A duration of time in seconds or None. + + Returns: + A threading.Event that will be set when this Server has completely + stopped, i.e. when running RPCs either complete or are aborted and + all handlers have terminated. + """ + raise NotImplementedError() + + def wait_for_termination(self, timeout=None): + """Block current thread until the server stops. + + This is an EXPERIMENTAL API. + + The wait will not consume computational resources during blocking, and + it will block until one of the two following conditions are met: + + 1) The server is stopped or terminated; + 2) A timeout occurs if timeout is not `None`. + + The timeout argument works in the same way as `threading.Event.wait()`. + https://docs.python.org/3/library/threading.html#threading.Event.wait + + Args: + timeout: A floating point number specifying a timeout for the + operation in seconds. + + Returns: + A bool indicates if the operation times out. + """ + raise NotImplementedError() + + +################################# Functions ################################ + + +def unary_unary_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None +): + """Creates an RpcMethodHandler for a unary-unary RPC method. + + Args: + behavior: The implementation of an RPC that accepts one request + and returns one response. + request_deserializer: An optional :term:`deserializer` for request deserialization. + response_serializer: An optional :term:`serializer` for response serialization. + + Returns: + An RpcMethodHandler object that is typically used by grpc.Server. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.RpcMethodHandler( + False, + False, + request_deserializer, + response_serializer, + behavior, + None, + None, + None, + ) + + +def unary_stream_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None +): + """Creates an RpcMethodHandler for a unary-stream RPC method. + + Args: + behavior: The implementation of an RPC that accepts one request + and returns an iterator of response values. + request_deserializer: An optional :term:`deserializer` for request deserialization. + response_serializer: An optional :term:`serializer` for response serialization. + + Returns: + An RpcMethodHandler object that is typically used by grpc.Server. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.RpcMethodHandler( + False, + True, + request_deserializer, + response_serializer, + None, + behavior, + None, + None, + ) + + +def stream_unary_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None +): + """Creates an RpcMethodHandler for a stream-unary RPC method. + + Args: + behavior: The implementation of an RPC that accepts an iterator of + request values and returns a single response value. + request_deserializer: An optional :term:`deserializer` for request deserialization. + response_serializer: An optional :term:`serializer` for response serialization. + + Returns: + An RpcMethodHandler object that is typically used by grpc.Server. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.RpcMethodHandler( + True, + False, + request_deserializer, + response_serializer, + None, + None, + behavior, + None, + ) + + +def stream_stream_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None +): + """Creates an RpcMethodHandler for a stream-stream RPC method. + + Args: + behavior: The implementation of an RPC that accepts an iterator of + request values and returns an iterator of response values. + request_deserializer: An optional :term:`deserializer` for request deserialization. + response_serializer: An optional :term:`serializer` for response serialization. + + Returns: + An RpcMethodHandler object that is typically used by grpc.Server. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.RpcMethodHandler( + True, + True, + request_deserializer, + response_serializer, + None, + None, + None, + behavior, + ) + + +def method_handlers_generic_handler(service, method_handlers): + """Creates a GenericRpcHandler from RpcMethodHandlers. + + Args: + service: The name of the service that is implemented by the + method_handlers. + method_handlers: A dictionary that maps method names to corresponding + RpcMethodHandler. + + Returns: + A GenericRpcHandler. This is typically added to the grpc.Server object + with add_generic_rpc_handlers() before starting the server. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.DictionaryGenericHandler(service, method_handlers) + + +def ssl_channel_credentials( + root_certificates=None, private_key=None, certificate_chain=None +): + """Creates a ChannelCredentials for use with an SSL-enabled Channel. + + Args: + root_certificates: The PEM-encoded root certificates as a byte string, + or None to retrieve them from a default location chosen by gRPC + runtime. + private_key: The PEM-encoded private key as a byte string, or None if no + private key should be used. + certificate_chain: The PEM-encoded certificate chain as a byte string + to use or None if no certificate chain should be used. + + Returns: + A ChannelCredentials for use with an SSL-enabled Channel. + """ + return ChannelCredentials( + _cygrpc.SSLChannelCredentials( + root_certificates, private_key, certificate_chain + ) + ) + + +def xds_channel_credentials(fallback_credentials=None): + """Creates a ChannelCredentials for use with xDS. This is an EXPERIMENTAL + API. + + Args: + fallback_credentials: Credentials to use in case it is not possible to + establish a secure connection via xDS. If no fallback_credentials + argument is supplied, a default SSLChannelCredentials is used. + """ + fallback_credentials = ( + ssl_channel_credentials() + if fallback_credentials is None + else fallback_credentials + ) + return ChannelCredentials( + _cygrpc.XDSChannelCredentials(fallback_credentials._credentials) + ) + + +def metadata_call_credentials(metadata_plugin, name=None): + """Construct CallCredentials from an AuthMetadataPlugin. + + Args: + metadata_plugin: An AuthMetadataPlugin to use for authentication. + name: An optional name for the plugin. + + Returns: + A CallCredentials. + """ + from grpc import _plugin_wrapping # pylint: disable=cyclic-import + + return _plugin_wrapping.metadata_plugin_call_credentials( + metadata_plugin, name + ) + + +def access_token_call_credentials(access_token): + """Construct CallCredentials from an access token. + + Args: + access_token: A string to place directly in the http request + authorization header, for example + "authorization: Bearer ". + + Returns: + A CallCredentials. + """ + from grpc import _auth # pylint: disable=cyclic-import + from grpc import _plugin_wrapping # pylint: disable=cyclic-import + + return _plugin_wrapping.metadata_plugin_call_credentials( + _auth.AccessTokenAuthMetadataPlugin(access_token), None + ) + + +def composite_call_credentials(*call_credentials): + """Compose multiple CallCredentials to make a new CallCredentials. + + Args: + *call_credentials: At least two CallCredentials objects. + + Returns: + A CallCredentials object composed of the given CallCredentials objects. + """ + return CallCredentials( + _cygrpc.CompositeCallCredentials( + tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials + ) + ) + ) + + +def composite_channel_credentials(channel_credentials, *call_credentials): + """Compose a ChannelCredentials and one or more CallCredentials objects. + + Args: + channel_credentials: A ChannelCredentials object. + *call_credentials: One or more CallCredentials objects. + + Returns: + A ChannelCredentials composed of the given ChannelCredentials and + CallCredentials objects. + """ + return ChannelCredentials( + _cygrpc.CompositeChannelCredentials( + tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials + ), + channel_credentials._credentials, + ) + ) + + +def ssl_server_credentials( + private_key_certificate_chain_pairs, + root_certificates=None, + require_client_auth=False, +): + """Creates a ServerCredentials for use with an SSL-enabled Server. + + Args: + private_key_certificate_chain_pairs: A list of pairs of the form + [PEM-encoded private key, PEM-encoded certificate chain]. + root_certificates: An optional byte string of PEM-encoded client root + certificates that the server will use to verify client authentication. + If omitted, require_client_auth must also be False. + require_client_auth: A boolean indicating whether or not to require + clients to be authenticated. May only be True if root_certificates + is not None. + + Returns: + A ServerCredentials for use with an SSL-enabled Server. Typically, this + object is an argument to add_secure_port() method during server setup. + """ + if not private_key_certificate_chain_pairs: + raise ValueError( + "At least one private key-certificate chain pair is required!" + ) + elif require_client_auth and root_certificates is None: + raise ValueError( + "Illegal to require client auth without providing root" + " certificates!" + ) + else: + return ServerCredentials( + _cygrpc.server_credentials_ssl( + root_certificates, + [ + _cygrpc.SslPemKeyCertPair(key, pem) + for key, pem in private_key_certificate_chain_pairs + ], + require_client_auth, + ) + ) + + +def xds_server_credentials(fallback_credentials): + """Creates a ServerCredentials for use with xDS. This is an EXPERIMENTAL + API. + + Args: + fallback_credentials: Credentials to use in case it is not possible to + establish a secure connection via xDS. No default value is provided. + """ + return ServerCredentials( + _cygrpc.xds_server_credentials(fallback_credentials._credentials) + ) + + +def insecure_server_credentials(): + """Creates a credentials object directing the server to use no credentials. + This is an EXPERIMENTAL API. + + This object cannot be used directly in a call to `add_secure_port`. + Instead, it should be used to construct other credentials objects, e.g. + with xds_server_credentials. + """ + return ServerCredentials(_cygrpc.insecure_server_credentials()) + + +def ssl_server_certificate_configuration( + private_key_certificate_chain_pairs, root_certificates=None +): + """Creates a ServerCertificateConfiguration for use with a Server. + + Args: + private_key_certificate_chain_pairs: A collection of pairs of + the form [PEM-encoded private key, PEM-encoded certificate + chain]. + root_certificates: An optional byte string of PEM-encoded client root + certificates that the server will use to verify client authentication. + + Returns: + A ServerCertificateConfiguration that can be returned in the certificate + configuration fetching callback. + """ + if private_key_certificate_chain_pairs: + return ServerCertificateConfiguration( + _cygrpc.server_certificate_config_ssl( + root_certificates, + [ + _cygrpc.SslPemKeyCertPair(key, pem) + for key, pem in private_key_certificate_chain_pairs + ], + ) + ) + else: + raise ValueError( + "At least one private key-certificate chain pair is required!" + ) + + +def dynamic_ssl_server_credentials( + initial_certificate_configuration, + certificate_configuration_fetcher, + require_client_authentication=False, +): + """Creates a ServerCredentials for use with an SSL-enabled Server. + + Args: + initial_certificate_configuration (ServerCertificateConfiguration): The + certificate configuration with which the server will be initialized. + certificate_configuration_fetcher (callable): A callable that takes no + arguments and should return a ServerCertificateConfiguration to + replace the server's current certificate, or None for no change + (i.e., the server will continue its current certificate + config). The library will call this callback on *every* new + client connection before starting the TLS handshake with the + client, thus allowing the user application to optionally + return a new ServerCertificateConfiguration that the server will then + use for the handshake. + require_client_authentication: A boolean indicating whether or not to + require clients to be authenticated. + + Returns: + A ServerCredentials. + """ + return ServerCredentials( + _cygrpc.server_credentials_ssl_dynamic_cert_config( + initial_certificate_configuration, + certificate_configuration_fetcher, + require_client_authentication, + ) + ) + + +@enum.unique +class LocalConnectionType(enum.Enum): + """Types of local connection for local credential creation. + + Attributes: + UDS: Unix domain socket connections + LOCAL_TCP: Local TCP connections. + """ + + UDS = _cygrpc.LocalConnectionType.uds + LOCAL_TCP = _cygrpc.LocalConnectionType.local_tcp + + +def local_channel_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP): + """Creates a local ChannelCredentials used for local connections. + + This is an EXPERIMENTAL API. + + Local credentials are used by local TCP endpoints (e.g. localhost:10000) + also UDS connections. + + The connections created by local channel credentials are not + encrypted, but will be checked if they are local or not. + The UDS connections are considered secure by providing peer authentication + and data confidentiality while TCP connections are considered insecure. + + It is allowed to transmit call credentials over connections created by + local channel credentials. + + Local channel credentials are useful for 1) eliminating insecure_channel usage; + 2) enable unit testing for call credentials without setting up secrets. + + Args: + local_connect_type: Local connection type (either + grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP) + + Returns: + A ChannelCredentials for use with a local Channel + """ + return ChannelCredentials( + _cygrpc.channel_credentials_local(local_connect_type.value) + ) + + +def local_server_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP): + """Creates a local ServerCredentials used for local connections. + + This is an EXPERIMENTAL API. + + Local credentials are used by local TCP endpoints (e.g. localhost:10000) + also UDS connections. + + The connections created by local server credentials are not + encrypted, but will be checked if they are local or not. + The UDS connections are considered secure by providing peer authentication + and data confidentiality while TCP connections are considered insecure. + + It is allowed to transmit call credentials over connections created by local + server credentials. + + Local server credentials are useful for 1) eliminating insecure_channel usage; + 2) enable unit testing for call credentials without setting up secrets. + + Args: + local_connect_type: Local connection type (either + grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP) + + Returns: + A ServerCredentials for use with a local Server + """ + return ServerCredentials( + _cygrpc.server_credentials_local(local_connect_type.value) + ) + + +def alts_channel_credentials(service_accounts=None): + """Creates a ChannelCredentials for use with an ALTS-enabled Channel. + + This is an EXPERIMENTAL API. + ALTS credentials API can only be used in GCP environment as it relies on + handshaker service being available. For more info about ALTS see + https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security + + Args: + service_accounts: A list of server identities accepted by the client. + If target service accounts are provided and none of them matches the + peer identity of the server, handshake will fail. The arg can be empty + if the client does not have any information about trusted server + identity. + Returns: + A ChannelCredentials for use with an ALTS-enabled Channel + """ + return ChannelCredentials( + _cygrpc.channel_credentials_alts(service_accounts or []) + ) + + +def alts_server_credentials(): + """Creates a ServerCredentials for use with an ALTS-enabled connection. + + This is an EXPERIMENTAL API. + ALTS credentials API can only be used in GCP environment as it relies on + handshaker service being available. For more info about ALTS see + https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security + + Returns: + A ServerCredentials for use with an ALTS-enabled Server + """ + return ServerCredentials(_cygrpc.server_credentials_alts()) + + +def compute_engine_channel_credentials(call_credentials): + """Creates a compute engine channel credential. + + This credential can only be used in a GCP environment as it relies on + a handshaker service. For more info about ALTS, see + https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security + + This channel credential is expected to be used as part of a composite + credential in conjunction with a call credentials that authenticates the + VM's default service account. If used with any other sort of call + credential, the connection may suddenly and unexpectedly begin failing RPCs. + """ + return ChannelCredentials( + _cygrpc.channel_credentials_compute_engine( + call_credentials._credentials + ) + ) + + +def channel_ready_future(channel): + """Creates a Future that tracks when a Channel is ready. + + Cancelling the Future does not affect the channel's state machine. + It merely decouples the Future from channel state machine. + + Args: + channel: A Channel object. + + Returns: + A Future object that matures when the channel connectivity is + ChannelConnectivity.READY. + """ + from grpc import _utilities # pylint: disable=cyclic-import + + return _utilities.channel_ready_future(channel) + + +def insecure_channel(target, options=None, compression=None): + """Creates an insecure Channel to a server. + + The returned Channel is thread-safe. + + Args: + target: The server address + options: An optional list of key-value pairs (:term:`channel_arguments` + in gRPC Core runtime) to configure the channel. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + + Returns: + A Channel. + """ + from grpc import _channel # pylint: disable=cyclic-import + + return _channel.Channel( + target, () if options is None else options, None, compression + ) + + +def secure_channel(target, credentials, options=None, compression=None): + """Creates a secure Channel to a server. + + The returned Channel is thread-safe. + + Args: + target: The server address. + credentials: A ChannelCredentials instance. + options: An optional list of key-value pairs (:term:`channel_arguments` + in gRPC Core runtime) to configure the channel. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + + Returns: + A Channel. + """ + from grpc import _channel # pylint: disable=cyclic-import + from grpc.experimental import _insecure_channel_credentials + + if credentials._credentials is _insecure_channel_credentials: + raise ValueError( + "secure_channel cannot be called with insecure credentials." + + " Call insecure_channel instead." + ) + return _channel.Channel( + target, + () if options is None else options, + credentials._credentials, + compression, + ) + + +def intercept_channel(channel, *interceptors): + """Intercepts a channel through a set of interceptors. + + Args: + channel: A Channel. + interceptors: Zero or more objects of type + UnaryUnaryClientInterceptor, + UnaryStreamClientInterceptor, + StreamUnaryClientInterceptor, or + StreamStreamClientInterceptor. + Interceptors are given control in the order they are listed. + + Returns: + A Channel that intercepts each invocation via the provided interceptors. + + Raises: + TypeError: If interceptor does not derive from any of + UnaryUnaryClientInterceptor, + UnaryStreamClientInterceptor, + StreamUnaryClientInterceptor, or + StreamStreamClientInterceptor. + """ + from grpc import _interceptor # pylint: disable=cyclic-import + + return _interceptor.intercept_channel(channel, *interceptors) + + +def server( + thread_pool, + handlers=None, + interceptors=None, + options=None, + maximum_concurrent_rpcs=None, + compression=None, + xds=False, +): + """Creates a Server with which RPCs can be serviced. + + Args: + thread_pool: A futures.ThreadPoolExecutor to be used by the Server + to execute RPC handlers. + handlers: An optional list of GenericRpcHandlers used for executing RPCs. + More handlers may be added by calling add_generic_rpc_handlers any time + before the server is started. + interceptors: An optional list of ServerInterceptor objects that observe + and optionally manipulate the incoming RPCs before handing them over to + handlers. The interceptors are given control in the order they are + specified. This is an EXPERIMENTAL API. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime) + to configure the channel. + maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server + will service before returning RESOURCE_EXHAUSTED status, or None to + indicate no limit. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. This compression algorithm will be used for the + lifetime of the server unless overridden. + xds: If set to true, retrieves server configuration via xDS. This is an + EXPERIMENTAL option. + + Returns: + A Server object. + """ + from grpc import _server # pylint: disable=cyclic-import + + return _server.create_server( + thread_pool, + () if handlers is None else handlers, + () if interceptors is None else interceptors, + () if options is None else options, + maximum_concurrent_rpcs, + compression, + xds, + ) + + +@contextlib.contextmanager +def _create_servicer_context(rpc_event, state, request_deserializer): + from grpc import _server # pylint: disable=cyclic-import + + context = _server._Context(rpc_event, state, request_deserializer) + yield context + context._finalize_state() # pylint: disable=protected-access + + +@enum.unique +class Compression(enum.IntEnum): + """Indicates the compression method to be used for an RPC. + + Attributes: + NoCompression: Do not use compression algorithm. + Deflate: Use "Deflate" compression algorithm. + Gzip: Use "Gzip" compression algorithm. + """ + + NoCompression = _compression.NoCompression + Deflate = _compression.Deflate + Gzip = _compression.Gzip + + +################################### __all__ ################################# + +__all__ = ( + "FutureTimeoutError", + "FutureCancelledError", + "Future", + "ChannelConnectivity", + "StatusCode", + "Status", + "RpcError", + "RpcContext", + "Call", + "ChannelCredentials", + "CallCredentials", + "AuthMetadataContext", + "AuthMetadataPluginCallback", + "AuthMetadataPlugin", + "Compression", + "ClientCallDetails", + "ServerCertificateConfiguration", + "ServerCredentials", + "LocalConnectionType", + "UnaryUnaryMultiCallable", + "UnaryStreamMultiCallable", + "StreamUnaryMultiCallable", + "StreamStreamMultiCallable", + "UnaryUnaryClientInterceptor", + "UnaryStreamClientInterceptor", + "StreamUnaryClientInterceptor", + "StreamStreamClientInterceptor", + "Channel", + "ServicerContext", + "RpcMethodHandler", + "HandlerCallDetails", + "GenericRpcHandler", + "ServiceRpcHandler", + "Server", + "ServerInterceptor", + "unary_unary_rpc_method_handler", + "unary_stream_rpc_method_handler", + "stream_unary_rpc_method_handler", + "stream_stream_rpc_method_handler", + "method_handlers_generic_handler", + "ssl_channel_credentials", + "metadata_call_credentials", + "access_token_call_credentials", + "composite_call_credentials", + "composite_channel_credentials", + "compute_engine_channel_credentials", + "local_channel_credentials", + "local_server_credentials", + "alts_channel_credentials", + "alts_server_credentials", + "ssl_server_credentials", + "ssl_server_certificate_configuration", + "dynamic_ssl_server_credentials", + "channel_ready_future", + "insecure_channel", + "secure_channel", + "intercept_channel", + "server", + "protos", + "services", + "protos_and_services", + "xds_channel_credentials", + "xds_server_credentials", + "insecure_server_credentials", +) + +############################### Extension Shims ################################ + +# Here to maintain backwards compatibility; avoid using these in new code! +try: + import grpc_tools + + sys.modules.update({"grpc.tools": grpc_tools}) +except ImportError: + pass +try: + import grpc_health + + sys.modules.update({"grpc.health": grpc_health}) +except ImportError: + pass +try: + import grpc_reflection + + sys.modules.update({"grpc.reflection": grpc_reflection}) +except ImportError: + pass + +# Prevents import order issue in the case of renamed path. +if sys.version_info >= (3, 6) and __name__ == "grpc": + from grpc import aio # pylint: disable=ungrouped-imports + + sys.modules.update({"grpc.aio": aio}) diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6394418c9fcccd550f33b7997d19f213537477aa Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_auth.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_auth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40c65f4d0d284b246da90236ed73a8d93c493bec Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_auth.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_channel.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_channel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13b386a08f175f08c27014175dc64c63f81eaa66 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_channel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_common.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d4054a586ab997d25aba0862cef83083301017 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_common.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_compression.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_compression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ac5f9ca70e714dd90c2d845e0379f26132d34ce Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_compression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_grpcio_metadata.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_grpcio_metadata.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b56d459e971bfc24c4ad4892b2abed0acb99bd7 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_grpcio_metadata.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_interceptor.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_interceptor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f91968e4f311d4707638fb8b831c60bd2728456f Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_interceptor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_observability.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_observability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1efff48597f3aa12adec2a562ae9ea71392cf732 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_observability.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_plugin_wrapping.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_plugin_wrapping.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b33198045d4b0a89589b4adc305b50ca369a91b Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_plugin_wrapping.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_runtime_protos.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_runtime_protos.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44348acbe1c0a867e924ab1e19bff8979c4fb81b Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_runtime_protos.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_server.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_server.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18274257dab91a47942e34db3ea7eb4a469ea7d0 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_server.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_simple_stubs.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_simple_stubs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d46e0f5bfd2ca7434faf822eaa26d07efb2fc814 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_simple_stubs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_typing.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_typing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b02cfed99258661fa7ff2b5a7147b9aae130f2 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_typing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/__pycache__/_utilities.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/__pycache__/_utilities.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d264e5983695b607976b451e62f900e4fe4673ee Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/__pycache__/_utilities.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/_auth.py b/MLPY/Lib/site-packages/grpc/_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..0af206f7f56a0b9faf8f2f4506b99b1f0fed647c --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_auth.py @@ -0,0 +1,80 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""GRPCAuthMetadataPlugins for standard authentication.""" + +import inspect +from typing import Any, Optional + +import grpc + + +def _sign_request( + callback: grpc.AuthMetadataPluginCallback, + token: Optional[str], + error: Optional[Exception], +): + metadata = (("authorization", "Bearer {}".format(token)),) + callback(metadata, error) + + +class GoogleCallCredentials(grpc.AuthMetadataPlugin): + """Metadata wrapper for GoogleCredentials from the oauth2client library.""" + + _is_jwt: bool + _credentials: Any + + # TODO(xuanwn): Give credentials an actual type. + def __init__(self, credentials: Any): + self._credentials = credentials + # Hack to determine if these are JWT creds and we need to pass + # additional_claims when getting a token + self._is_jwt = ( + "additional_claims" + in inspect.getfullargspec(credentials.get_access_token).args + ) + + def __call__( + self, + context: grpc.AuthMetadataContext, + callback: grpc.AuthMetadataPluginCallback, + ): + try: + if self._is_jwt: + access_token = self._credentials.get_access_token( + additional_claims={ + "aud": context.service_url # pytype: disable=attribute-error + } + ).access_token + else: + access_token = self._credentials.get_access_token().access_token + except Exception as exception: # pylint: disable=broad-except + _sign_request(callback, None, exception) + else: + _sign_request(callback, access_token, None) + + +class AccessTokenAuthMetadataPlugin(grpc.AuthMetadataPlugin): + """Metadata wrapper for raw access token credentials.""" + + _access_token: str + + def __init__(self, access_token: str): + self._access_token = access_token + + def __call__( + self, + context: grpc.AuthMetadataContext, + callback: grpc.AuthMetadataPluginCallback, + ): + _sign_request(callback, self._access_token, None) diff --git a/MLPY/Lib/site-packages/grpc/_channel.py b/MLPY/Lib/site-packages/grpc/_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..30ba1361f0da821fb2706ad3e4cbae245b66bd68 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_channel.py @@ -0,0 +1,2267 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Invocation-side implementation of gRPC Python.""" + +import copy +import functools +import logging +import os +import sys +import threading +import time +import types +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import grpc # pytype: disable=pyi-error +from grpc import _common # pytype: disable=pyi-error +from grpc import _compression # pytype: disable=pyi-error +from grpc import _grpcio_metadata # pytype: disable=pyi-error +from grpc import _observability # pytype: disable=pyi-error +from grpc._cython import cygrpc +from grpc._typing import ChannelArgumentType +from grpc._typing import DeserializingFunction +from grpc._typing import IntegratedCallFactory +from grpc._typing import MetadataType +from grpc._typing import NullaryCallbackType +from grpc._typing import ResponseType +from grpc._typing import SerializingFunction +from grpc._typing import UserTag +import grpc.experimental # pytype: disable=pyi-error + +_LOGGER = logging.getLogger(__name__) + +_USER_AGENT = "grpc-python/{}".format(_grpcio_metadata.__version__) + +_EMPTY_FLAGS = 0 + +# NOTE(rbellevi): No guarantees are given about the maintenance of this +# environment variable. +_DEFAULT_SINGLE_THREADED_UNARY_STREAM = ( + os.getenv("GRPC_SINGLE_THREADED_UNARY_STREAM") is not None +) + +_UNARY_UNARY_INITIAL_DUE = ( + cygrpc.OperationType.send_initial_metadata, + cygrpc.OperationType.send_message, + cygrpc.OperationType.send_close_from_client, + cygrpc.OperationType.receive_initial_metadata, + cygrpc.OperationType.receive_message, + cygrpc.OperationType.receive_status_on_client, +) +_UNARY_STREAM_INITIAL_DUE = ( + cygrpc.OperationType.send_initial_metadata, + cygrpc.OperationType.send_message, + cygrpc.OperationType.send_close_from_client, + cygrpc.OperationType.receive_initial_metadata, + cygrpc.OperationType.receive_status_on_client, +) +_STREAM_UNARY_INITIAL_DUE = ( + cygrpc.OperationType.send_initial_metadata, + cygrpc.OperationType.receive_initial_metadata, + cygrpc.OperationType.receive_message, + cygrpc.OperationType.receive_status_on_client, +) +_STREAM_STREAM_INITIAL_DUE = ( + cygrpc.OperationType.send_initial_metadata, + cygrpc.OperationType.receive_initial_metadata, + cygrpc.OperationType.receive_status_on_client, +) + +_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = ( + "Exception calling channel subscription callback!" +) + +_OK_RENDEZVOUS_REPR_FORMAT = ( + '<{} of RPC that terminated with:\n\tstatus = {}\n\tdetails = "{}"\n>' +) + +_NON_OK_RENDEZVOUS_REPR_FORMAT = ( + "<{} of RPC that terminated with:\n" + "\tstatus = {}\n" + '\tdetails = "{}"\n' + '\tdebug_error_string = "{}"\n' + ">" +) + + +def _deadline(timeout: Optional[float]) -> Optional[float]: + return None if timeout is None else time.time() + timeout + + +def _unknown_code_details( + unknown_cygrpc_code: Optional[grpc.StatusCode], details: Optional[str] +) -> str: + return 'Server sent unknown code {} and details "{}"'.format( + unknown_cygrpc_code, details + ) + + +class _RPCState(object): + condition: threading.Condition + due: Set[cygrpc.OperationType] + initial_metadata: Optional[MetadataType] + response: Any + trailing_metadata: Optional[MetadataType] + code: Optional[grpc.StatusCode] + details: Optional[str] + debug_error_string: Optional[str] + cancelled: bool + callbacks: List[NullaryCallbackType] + fork_epoch: Optional[int] + rpc_start_time: Optional[float] # In relative seconds + rpc_end_time: Optional[float] # In relative seconds + method: Optional[str] + target: Optional[str] + + def __init__( + self, + due: Sequence[cygrpc.OperationType], + initial_metadata: Optional[MetadataType], + trailing_metadata: Optional[MetadataType], + code: Optional[grpc.StatusCode], + details: Optional[str], + ): + # `condition` guards all members of _RPCState. `notify_all` is called on + # `condition` when the state of the RPC has changed. + self.condition = threading.Condition() + + # The cygrpc.OperationType objects representing events due from the RPC's + # completion queue. If an operation is in `due`, it is guaranteed that + # `operate()` has been called on a corresponding operation. But the + # converse is not true. That is, in the case of failed `operate()` + # calls, there may briefly be events in `due` that do not correspond to + # operations submitted to Core. + self.due = set(due) + self.initial_metadata = initial_metadata + self.response = None + self.trailing_metadata = trailing_metadata + self.code = code + self.details = details + self.debug_error_string = None + # The following three fields are used for observability. + # Updates to those fields do not trigger self.condition. + self.rpc_start_time = None + self.rpc_end_time = None + self.method = None + self.target = None + + # The semantics of grpc.Future.cancel and grpc.Future.cancelled are + # slightly wonky, so they have to be tracked separately from the rest of the + # result of the RPC. This field tracks whether cancellation was requested + # prior to termination of the RPC. + self.cancelled = False + self.callbacks = [] + self.fork_epoch = cygrpc.get_fork_epoch() + + def reset_postfork_child(self): + self.condition = threading.Condition() + + +def _abort(state: _RPCState, code: grpc.StatusCode, details: str) -> None: + if state.code is None: + state.code = code + state.details = details + if state.initial_metadata is None: + state.initial_metadata = () + state.trailing_metadata = () + + +def _handle_event( + event: cygrpc.BaseEvent, + state: _RPCState, + response_deserializer: Optional[DeserializingFunction], +) -> List[NullaryCallbackType]: + callbacks = [] + for batch_operation in event.batch_operations: + operation_type = batch_operation.type() + state.due.remove(operation_type) + if operation_type == cygrpc.OperationType.receive_initial_metadata: + state.initial_metadata = batch_operation.initial_metadata() + elif operation_type == cygrpc.OperationType.receive_message: + serialized_response = batch_operation.message() + if serialized_response is not None: + response = _common.deserialize( + serialized_response, response_deserializer + ) + if response is None: + details = "Exception deserializing response!" + _abort(state, grpc.StatusCode.INTERNAL, details) + else: + state.response = response + elif operation_type == cygrpc.OperationType.receive_status_on_client: + state.trailing_metadata = batch_operation.trailing_metadata() + if state.code is None: + code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get( + batch_operation.code() + ) + if code is None: + state.code = grpc.StatusCode.UNKNOWN + state.details = _unknown_code_details( + code, batch_operation.details() + ) + else: + state.code = code + state.details = batch_operation.details() + state.debug_error_string = batch_operation.error_string() + state.rpc_end_time = time.perf_counter() + _observability.maybe_record_rpc_latency(state) + callbacks.extend(state.callbacks) + state.callbacks = None + return callbacks + + +def _event_handler( + state: _RPCState, response_deserializer: Optional[DeserializingFunction] +) -> UserTag: + def handle_event(event): + with state.condition: + callbacks = _handle_event(event, state, response_deserializer) + state.condition.notify_all() + done = not state.due + for callback in callbacks: + try: + callback() + except Exception as e: # pylint: disable=broad-except + # NOTE(rbellevi): We suppress but log errors here so as not to + # kill the channel spin thread. + logging.error( + "Exception in callback %s: %s", repr(callback.func), repr(e) + ) + return done and state.fork_epoch >= cygrpc.get_fork_epoch() + + return handle_event + + +# TODO(xuanwn): Create a base class for IntegratedCall and SegregatedCall. +# pylint: disable=too-many-statements +def _consume_request_iterator( + request_iterator: Iterator, + state: _RPCState, + call: Union[cygrpc.IntegratedCall, cygrpc.SegregatedCall], + request_serializer: SerializingFunction, + event_handler: Optional[UserTag], +) -> None: + """Consume a request supplied by the user.""" + + def consume_request_iterator(): # pylint: disable=too-many-branches + # Iterate over the request iterator until it is exhausted or an error + # condition is encountered. + while True: + return_from_user_request_generator_invoked = False + try: + # The thread may die in user-code. Do not block fork for this. + cygrpc.enter_user_request_generator() + request = next(request_iterator) + except StopIteration: + break + except Exception: # pylint: disable=broad-except + cygrpc.return_from_user_request_generator() + return_from_user_request_generator_invoked = True + code = grpc.StatusCode.UNKNOWN + details = "Exception iterating requests!" + _LOGGER.exception(details) + call.cancel( + _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details + ) + _abort(state, code, details) + return + finally: + if not return_from_user_request_generator_invoked: + cygrpc.return_from_user_request_generator() + serialized_request = _common.serialize(request, request_serializer) + with state.condition: + if state.code is None and not state.cancelled: + if serialized_request is None: + code = grpc.StatusCode.INTERNAL + details = "Exception serializing request!" + call.cancel( + _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], + details, + ) + _abort(state, code, details) + return + else: + state.due.add(cygrpc.OperationType.send_message) + operations = ( + cygrpc.SendMessageOperation( + serialized_request, _EMPTY_FLAGS + ), + ) + operating = call.operate(operations, event_handler) + if not operating: + state.due.remove(cygrpc.OperationType.send_message) + return + + def _done(): + return ( + state.code is not None + or cygrpc.OperationType.send_message + not in state.due + ) + + _common.wait( + state.condition.wait, + _done, + spin_cb=functools.partial( + cygrpc.block_if_fork_in_progress, state + ), + ) + if state.code is not None: + return + else: + return + with state.condition: + if state.code is None: + state.due.add(cygrpc.OperationType.send_close_from_client) + operations = ( + cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS), + ) + operating = call.operate(operations, event_handler) + if not operating: + state.due.remove( + cygrpc.OperationType.send_close_from_client + ) + + consumption_thread = cygrpc.ForkManagedThread( + target=consume_request_iterator + ) + consumption_thread.setDaemon(True) + consumption_thread.start() + + +def _rpc_state_string(class_name: str, rpc_state: _RPCState) -> str: + """Calculates error string for RPC.""" + with rpc_state.condition: + if rpc_state.code is None: + return "<{} object>".format(class_name) + elif rpc_state.code is grpc.StatusCode.OK: + return _OK_RENDEZVOUS_REPR_FORMAT.format( + class_name, rpc_state.code, rpc_state.details + ) + else: + return _NON_OK_RENDEZVOUS_REPR_FORMAT.format( + class_name, + rpc_state.code, + rpc_state.details, + rpc_state.debug_error_string, + ) + + +class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future): + """An RPC error not tied to the execution of a particular RPC. + + The RPC represented by the state object must not be in-progress or + cancelled. + + Attributes: + _state: An instance of _RPCState. + """ + + _state: _RPCState + + def __init__(self, state: _RPCState): + with state.condition: + self._state = _RPCState( + (), + copy.deepcopy(state.initial_metadata), + copy.deepcopy(state.trailing_metadata), + state.code, + copy.deepcopy(state.details), + ) + self._state.response = copy.copy(state.response) + self._state.debug_error_string = copy.copy(state.debug_error_string) + + def initial_metadata(self) -> Optional[MetadataType]: + return self._state.initial_metadata + + def trailing_metadata(self) -> Optional[MetadataType]: + return self._state.trailing_metadata + + def code(self) -> Optional[grpc.StatusCode]: + return self._state.code + + def details(self) -> Optional[str]: + return _common.decode(self._state.details) + + def debug_error_string(self) -> Optional[str]: + return _common.decode(self._state.debug_error_string) + + def _repr(self) -> str: + return _rpc_state_string(self.__class__.__name__, self._state) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + def cancel(self) -> bool: + """See grpc.Future.cancel.""" + return False + + def cancelled(self) -> bool: + """See grpc.Future.cancelled.""" + return False + + def running(self) -> bool: + """See grpc.Future.running.""" + return False + + def done(self) -> bool: + """See grpc.Future.done.""" + return True + + def result( + self, timeout: Optional[float] = None + ) -> Any: # pylint: disable=unused-argument + """See grpc.Future.result.""" + raise self + + def exception( + self, timeout: Optional[float] = None # pylint: disable=unused-argument + ) -> Optional[Exception]: + """See grpc.Future.exception.""" + return self + + def traceback( + self, timeout: Optional[float] = None # pylint: disable=unused-argument + ) -> Optional[types.TracebackType]: + """See grpc.Future.traceback.""" + try: + raise self + except grpc.RpcError: + return sys.exc_info()[2] + + def add_done_callback( + self, + fn: Callable[[grpc.Future], None], + timeout: Optional[float] = None, # pylint: disable=unused-argument + ) -> None: + """See grpc.Future.add_done_callback.""" + fn(self) + + +class _Rendezvous(grpc.RpcError, grpc.RpcContext): + """An RPC iterator. + + Attributes: + _state: An instance of _RPCState. + _call: An instance of SegregatedCall or IntegratedCall. + In either case, the _call object is expected to have operate, cancel, + and next_event methods. + _response_deserializer: A callable taking bytes and return a Python + object. + _deadline: A float representing the deadline of the RPC in seconds. Or + possibly None, to represent an RPC with no deadline at all. + """ + + _state: _RPCState + _call: Union[cygrpc.SegregatedCall, cygrpc.IntegratedCall] + _response_deserializer: Optional[DeserializingFunction] + _deadline: Optional[float] + + def __init__( + self, + state: _RPCState, + call: Union[cygrpc.SegregatedCall, cygrpc.IntegratedCall], + response_deserializer: Optional[DeserializingFunction], + deadline: Optional[float], + ): + super(_Rendezvous, self).__init__() + self._state = state + self._call = call + self._response_deserializer = response_deserializer + self._deadline = deadline + + def is_active(self) -> bool: + """See grpc.RpcContext.is_active""" + with self._state.condition: + return self._state.code is None + + def time_remaining(self) -> Optional[float]: + """See grpc.RpcContext.time_remaining""" + with self._state.condition: + if self._deadline is None: + return None + else: + return max(self._deadline - time.time(), 0) + + def cancel(self) -> bool: + """See grpc.RpcContext.cancel""" + with self._state.condition: + if self._state.code is None: + code = grpc.StatusCode.CANCELLED + details = "Locally cancelled by application!" + self._call.cancel( + _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details + ) + self._state.cancelled = True + _abort(self._state, code, details) + self._state.condition.notify_all() + return True + else: + return False + + def add_callback(self, callback: NullaryCallbackType) -> bool: + """See grpc.RpcContext.add_callback""" + with self._state.condition: + if self._state.callbacks is None: + return False + else: + self._state.callbacks.append(callback) + return True + + def __iter__(self): + return self + + def next(self): + return self._next() + + def __next__(self): + return self._next() + + def _next(self): + raise NotImplementedError() + + def debug_error_string(self) -> Optional[str]: + raise NotImplementedError() + + def _repr(self) -> str: + return _rpc_state_string(self.__class__.__name__, self._state) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + def __del__(self) -> None: + with self._state.condition: + if self._state.code is None: + self._state.code = grpc.StatusCode.CANCELLED + self._state.details = "Cancelled upon garbage collection!" + self._state.cancelled = True + self._call.cancel( + _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code], + self._state.details, + ) + self._state.condition.notify_all() + + +class _SingleThreadedRendezvous( + _Rendezvous, grpc.Call, grpc.Future +): # pylint: disable=too-many-ancestors + """An RPC iterator operating entirely on a single thread. + + The __next__ method of _SingleThreadedRendezvous does not depend on the + existence of any other thread, including the "channel spin thread". + However, this means that its interface is entirely synchronous. So this + class cannot completely fulfill the grpc.Future interface. The result, + exception, and traceback methods will never block and will instead raise + an exception if calling the method would result in blocking. + + This means that these methods are safe to call from add_done_callback + handlers. + """ + + _state: _RPCState + + def _is_complete(self) -> bool: + return self._state.code is not None + + def cancelled(self) -> bool: + with self._state.condition: + return self._state.cancelled + + def running(self) -> bool: + with self._state.condition: + return self._state.code is None + + def done(self) -> bool: + with self._state.condition: + return self._state.code is not None + + def result(self, timeout: Optional[float] = None) -> Any: + """Returns the result of the computation or raises its exception. + + This method will never block. Instead, it will raise an exception + if calling this method would otherwise result in blocking. + + Since this method will never block, any `timeout` argument passed will + be ignored. + """ + del timeout + with self._state.condition: + if not self._is_complete(): + raise grpc.experimental.UsageError( + "_SingleThreadedRendezvous only supports result() when the" + " RPC is complete." + ) + if self._state.code is grpc.StatusCode.OK: + return self._state.response + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + raise self + + def exception(self, timeout: Optional[float] = None) -> Optional[Exception]: + """Return the exception raised by the computation. + + This method will never block. Instead, it will raise an exception + if calling this method would otherwise result in blocking. + + Since this method will never block, any `timeout` argument passed will + be ignored. + """ + del timeout + with self._state.condition: + if not self._is_complete(): + raise grpc.experimental.UsageError( + "_SingleThreadedRendezvous only supports exception() when" + " the RPC is complete." + ) + if self._state.code is grpc.StatusCode.OK: + return None + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + return self + + def traceback( + self, timeout: Optional[float] = None + ) -> Optional[types.TracebackType]: + """Access the traceback of the exception raised by the computation. + + This method will never block. Instead, it will raise an exception + if calling this method would otherwise result in blocking. + + Since this method will never block, any `timeout` argument passed will + be ignored. + """ + del timeout + with self._state.condition: + if not self._is_complete(): + raise grpc.experimental.UsageError( + "_SingleThreadedRendezvous only supports traceback() when" + " the RPC is complete." + ) + if self._state.code is grpc.StatusCode.OK: + return None + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + try: + raise self + except grpc.RpcError: + return sys.exc_info()[2] + + def add_done_callback(self, fn: Callable[[grpc.Future], None]) -> None: + with self._state.condition: + if self._state.code is None: + self._state.callbacks.append(functools.partial(fn, self)) + return + + fn(self) + + def initial_metadata(self) -> Optional[MetadataType]: + """See grpc.Call.initial_metadata""" + with self._state.condition: + # NOTE(gnossen): Based on our initial call batch, we are guaranteed + # to receive initial metadata before any messages. + while self._state.initial_metadata is None: + self._consume_next_event() + return self._state.initial_metadata + + def trailing_metadata(self) -> Optional[MetadataType]: + """See grpc.Call.trailing_metadata""" + with self._state.condition: + if self._state.trailing_metadata is None: + raise grpc.experimental.UsageError( + "Cannot get trailing metadata until RPC is completed." + ) + return self._state.trailing_metadata + + def code(self) -> Optional[grpc.StatusCode]: + """See grpc.Call.code""" + with self._state.condition: + if self._state.code is None: + raise grpc.experimental.UsageError( + "Cannot get code until RPC is completed." + ) + return self._state.code + + def details(self) -> Optional[str]: + """See grpc.Call.details""" + with self._state.condition: + if self._state.details is None: + raise grpc.experimental.UsageError( + "Cannot get details until RPC is completed." + ) + return _common.decode(self._state.details) + + def _consume_next_event(self) -> Optional[cygrpc.BaseEvent]: + event = self._call.next_event() + with self._state.condition: + callbacks = _handle_event( + event, self._state, self._response_deserializer + ) + for callback in callbacks: + # NOTE(gnossen): We intentionally allow exceptions to bubble up + # to the user when running on a single thread. + callback() + return event + + def _next_response(self) -> Any: + while True: + self._consume_next_event() + with self._state.condition: + if self._state.response is not None: + response = self._state.response + self._state.response = None + return response + elif ( + cygrpc.OperationType.receive_message not in self._state.due + ): + if self._state.code is grpc.StatusCode.OK: + raise StopIteration() + elif self._state.code is not None: + raise self + + def _next(self) -> Any: + with self._state.condition: + if self._state.code is None: + # We tentatively add the operation as expected and remove + # it if the enqueue operation fails. This allows us to guarantee that + # if an event has been submitted to the core completion queue, + # it is in `due`. If we waited until after a successful + # enqueue operation then a signal could interrupt this + # thread between the enqueue operation and the addition of the + # operation to `due`. This would cause an exception on the + # channel spin thread when the operation completes and no + # corresponding operation would be present in state.due. + # Note that, since `condition` is held through this block, there is + # no data race on `due`. + self._state.due.add(cygrpc.OperationType.receive_message) + operating = self._call.operate( + (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None + ) + if not operating: + self._state.due.remove(cygrpc.OperationType.receive_message) + elif self._state.code is grpc.StatusCode.OK: + raise StopIteration() + else: + raise self + return self._next_response() + + def debug_error_string(self) -> Optional[str]: + with self._state.condition: + if self._state.debug_error_string is None: + raise grpc.experimental.UsageError( + "Cannot get debug error string until RPC is completed." + ) + return _common.decode(self._state.debug_error_string) + + +class _MultiThreadedRendezvous( + _Rendezvous, grpc.Call, grpc.Future +): # pylint: disable=too-many-ancestors + """An RPC iterator that depends on a channel spin thread. + + This iterator relies upon a per-channel thread running in the background, + dequeueing events from the completion queue, and notifying threads waiting + on the threading.Condition object in the _RPCState object. + + This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface + and to mediate a bidirection streaming RPC. + """ + + _state: _RPCState + + def initial_metadata(self) -> Optional[MetadataType]: + """See grpc.Call.initial_metadata""" + with self._state.condition: + + def _done(): + return self._state.initial_metadata is not None + + _common.wait(self._state.condition.wait, _done) + return self._state.initial_metadata + + def trailing_metadata(self) -> Optional[MetadataType]: + """See grpc.Call.trailing_metadata""" + with self._state.condition: + + def _done(): + return self._state.trailing_metadata is not None + + _common.wait(self._state.condition.wait, _done) + return self._state.trailing_metadata + + def code(self) -> Optional[grpc.StatusCode]: + """See grpc.Call.code""" + with self._state.condition: + + def _done(): + return self._state.code is not None + + _common.wait(self._state.condition.wait, _done) + return self._state.code + + def details(self) -> Optional[str]: + """See grpc.Call.details""" + with self._state.condition: + + def _done(): + return self._state.details is not None + + _common.wait(self._state.condition.wait, _done) + return _common.decode(self._state.details) + + def debug_error_string(self) -> Optional[str]: + with self._state.condition: + + def _done(): + return self._state.debug_error_string is not None + + _common.wait(self._state.condition.wait, _done) + return _common.decode(self._state.debug_error_string) + + def cancelled(self) -> bool: + with self._state.condition: + return self._state.cancelled + + def running(self) -> bool: + with self._state.condition: + return self._state.code is None + + def done(self) -> bool: + with self._state.condition: + return self._state.code is not None + + def _is_complete(self) -> bool: + return self._state.code is not None + + def result(self, timeout: Optional[float] = None) -> Any: + """Returns the result of the computation or raises its exception. + + See grpc.Future.result for the full API contract. + """ + with self._state.condition: + timed_out = _common.wait( + self._state.condition.wait, self._is_complete, timeout=timeout + ) + if timed_out: + raise grpc.FutureTimeoutError() + else: + if self._state.code is grpc.StatusCode.OK: + return self._state.response + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + raise self + + def exception(self, timeout: Optional[float] = None) -> Optional[Exception]: + """Return the exception raised by the computation. + + See grpc.Future.exception for the full API contract. + """ + with self._state.condition: + timed_out = _common.wait( + self._state.condition.wait, self._is_complete, timeout=timeout + ) + if timed_out: + raise grpc.FutureTimeoutError() + else: + if self._state.code is grpc.StatusCode.OK: + return None + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + return self + + def traceback( + self, timeout: Optional[float] = None + ) -> Optional[types.TracebackType]: + """Access the traceback of the exception raised by the computation. + + See grpc.future.traceback for the full API contract. + """ + with self._state.condition: + timed_out = _common.wait( + self._state.condition.wait, self._is_complete, timeout=timeout + ) + if timed_out: + raise grpc.FutureTimeoutError() + else: + if self._state.code is grpc.StatusCode.OK: + return None + elif self._state.cancelled: + raise grpc.FutureCancelledError() + else: + try: + raise self + except grpc.RpcError: + return sys.exc_info()[2] + + def add_done_callback(self, fn: Callable[[grpc.Future], None]) -> None: + with self._state.condition: + if self._state.code is None: + self._state.callbacks.append(functools.partial(fn, self)) + return + + fn(self) + + def _next(self) -> Any: + with self._state.condition: + if self._state.code is None: + event_handler = _event_handler( + self._state, self._response_deserializer + ) + self._state.due.add(cygrpc.OperationType.receive_message) + operating = self._call.operate( + (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), + event_handler, + ) + if not operating: + self._state.due.remove(cygrpc.OperationType.receive_message) + elif self._state.code is grpc.StatusCode.OK: + raise StopIteration() + else: + raise self + + def _response_ready(): + return self._state.response is not None or ( + cygrpc.OperationType.receive_message not in self._state.due + and self._state.code is not None + ) + + _common.wait(self._state.condition.wait, _response_ready) + if self._state.response is not None: + response = self._state.response + self._state.response = None + return response + elif cygrpc.OperationType.receive_message not in self._state.due: + if self._state.code is grpc.StatusCode.OK: + raise StopIteration() + elif self._state.code is not None: + raise self + + +def _start_unary_request( + request: Any, + timeout: Optional[float], + request_serializer: SerializingFunction, +) -> Tuple[Optional[float], Optional[bytes], Optional[grpc.RpcError]]: + deadline = _deadline(timeout) + serialized_request = _common.serialize(request, request_serializer) + if serialized_request is None: + state = _RPCState( + (), + (), + (), + grpc.StatusCode.INTERNAL, + "Exception serializing request!", + ) + error = _InactiveRpcError(state) + return deadline, None, error + else: + return deadline, serialized_request, None + + +def _end_unary_response_blocking( + state: _RPCState, + call: cygrpc.SegregatedCall, + with_call: bool, + deadline: Optional[float], +) -> Union[ResponseType, Tuple[ResponseType, grpc.Call]]: + if state.code is grpc.StatusCode.OK: + if with_call: + rendezvous = _MultiThreadedRendezvous(state, call, None, deadline) + return state.response, rendezvous + else: + return state.response + else: + raise _InactiveRpcError(state) # pytype: disable=not-instantiable + + +def _stream_unary_invocation_operations( + metadata: Optional[MetadataType], initial_metadata_flags: int +) -> Sequence[Sequence[cygrpc.Operation]]: + return ( + ( + cygrpc.SendInitialMetadataOperation( + metadata, initial_metadata_flags + ), + cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS), + cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS), + ), + (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), + ) + + +def _stream_unary_invocation_operations_and_tags( + metadata: Optional[MetadataType], initial_metadata_flags: int +) -> Sequence[Tuple[Sequence[cygrpc.Operation], Optional[UserTag]]]: + return tuple( + ( + operations, + None, + ) + for operations in _stream_unary_invocation_operations( + metadata, initial_metadata_flags + ) + ) + + +def _determine_deadline(user_deadline: Optional[float]) -> Optional[float]: + parent_deadline = cygrpc.get_deadline_from_context() + if parent_deadline is None and user_deadline is None: + return None + elif parent_deadline is not None and user_deadline is None: + return parent_deadline + elif user_deadline is not None and parent_deadline is None: + return user_deadline + else: + return min(parent_deadline, user_deadline) + + +class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): + _channel: cygrpc.Channel + _managed_call: IntegratedCallFactory + _method: bytes + _target: bytes + _request_serializer: Optional[SerializingFunction] + _response_deserializer: Optional[DeserializingFunction] + _context: Any + _registered_call_handle: Optional[int] + + __slots__ = [ + "_channel", + "_managed_call", + "_method", + "_target", + "_request_serializer", + "_response_deserializer", + "_context", + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.Channel, + managed_call: IntegratedCallFactory, + method: bytes, + target: bytes, + request_serializer: Optional[SerializingFunction], + response_deserializer: Optional[DeserializingFunction], + _registered_call_handle: Optional[int], + ): + self._channel = channel + self._managed_call = managed_call + self._method = method + self._target = target + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._context = cygrpc.build_census_context() + self._registered_call_handle = _registered_call_handle + + def _prepare( + self, + request: Any, + timeout: Optional[float], + metadata: Optional[MetadataType], + wait_for_ready: Optional[bool], + compression: Optional[grpc.Compression], + ) -> Tuple[ + Optional[_RPCState], + Optional[Sequence[cygrpc.Operation]], + Optional[float], + Optional[grpc.RpcError], + ]: + deadline, serialized_request, rendezvous = _start_unary_request( + request, timeout, self._request_serializer + ) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + if serialized_request is None: + return None, None, None, rendezvous + else: + state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None) + operations = ( + cygrpc.SendInitialMetadataOperation( + augmented_metadata, initial_metadata_flags + ), + cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS), + cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS), + cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS), + cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS), + cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS), + ) + return state, operations, deadline, None + + def _blocking( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[_RPCState, cygrpc.SegregatedCall]: + state, operations, deadline, rendezvous = self._prepare( + request, timeout, metadata, wait_for_ready, compression + ) + if state is None: + raise rendezvous # pylint: disable-msg=raising-bad-type + else: + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._channel.segregated_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + _determine_deadline(deadline), + metadata, + None if credentials is None else credentials._credentials, + ( + ( + operations, + None, + ), + ), + self._context, + self._registered_call_handle, + ) + event = call.next_event() + _handle_event(event, state, self._response_deserializer) + return state, call + + def __call__( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + ( + state, + call, + ) = self._blocking( + request, timeout, metadata, credentials, wait_for_ready, compression + ) + return _end_unary_response_blocking(state, call, False, None) + + def with_call( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + ( + state, + call, + ) = self._blocking( + request, timeout, metadata, credentials, wait_for_ready, compression + ) + return _end_unary_response_blocking(state, call, True, None) + + def future( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _MultiThreadedRendezvous: + state, operations, deadline, rendezvous = self._prepare( + request, timeout, metadata, wait_for_ready, compression + ) + if state is None: + raise rendezvous # pylint: disable-msg=raising-bad-type + else: + event_handler = _event_handler(state, self._response_deserializer) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._managed_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + deadline, + metadata, + None if credentials is None else credentials._credentials, + (operations,), + event_handler, + self._context, + self._registered_call_handle, + ) + return _MultiThreadedRendezvous( + state, call, self._response_deserializer, deadline + ) + + +class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): + _channel: cygrpc.Channel + _method: bytes + _target: bytes + _request_serializer: Optional[SerializingFunction] + _response_deserializer: Optional[DeserializingFunction] + _context: Any + _registered_call_handle: Optional[int] + + __slots__ = [ + "_channel", + "_method", + "_target", + "_request_serializer", + "_response_deserializer", + "_context", + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.Channel, + method: bytes, + target: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + _registered_call_handle: Optional[int], + ): + self._channel = channel + self._method = method + self._target = target + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._context = cygrpc.build_census_context() + self._registered_call_handle = _registered_call_handle + + def __call__( # pylint: disable=too-many-locals + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _SingleThreadedRendezvous: + deadline = _deadline(timeout) + serialized_request = _common.serialize( + request, self._request_serializer + ) + if serialized_request is None: + state = _RPCState( + (), + (), + (), + grpc.StatusCode.INTERNAL, + "Exception serializing request!", + ) + raise _InactiveRpcError(state) + + state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None) + call_credentials = ( + None if credentials is None else credentials._credentials + ) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + operations = ( + ( + cygrpc.SendInitialMetadataOperation( + augmented_metadata, initial_metadata_flags + ), + cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS), + cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS), + ), + (cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),), + (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), + ) + operations_and_tags = tuple((ops, None) for ops in operations) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._channel.segregated_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + _determine_deadline(deadline), + metadata, + call_credentials, + operations_and_tags, + self._context, + self._registered_call_handle, + ) + return _SingleThreadedRendezvous( + state, call, self._response_deserializer, deadline + ) + + +class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): + _channel: cygrpc.Channel + _managed_call: IntegratedCallFactory + _method: bytes + _target: bytes + _request_serializer: Optional[SerializingFunction] + _response_deserializer: Optional[DeserializingFunction] + _context: Any + _registered_call_handle: Optional[int] + + __slots__ = [ + "_channel", + "_managed_call", + "_method", + "_target", + "_request_serializer", + "_response_deserializer", + "_context", + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.Channel, + managed_call: IntegratedCallFactory, + method: bytes, + target: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + _registered_call_handle: Optional[int], + ): + self._channel = channel + self._managed_call = managed_call + self._method = method + self._target = target + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._context = cygrpc.build_census_context() + self._registered_call_handle = _registered_call_handle + + def __call__( # pylint: disable=too-many-locals + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _MultiThreadedRendezvous: + deadline, serialized_request, rendezvous = _start_unary_request( + request, timeout, self._request_serializer + ) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + if serialized_request is None: + raise rendezvous # pylint: disable-msg=raising-bad-type + else: + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None) + operations = ( + ( + cygrpc.SendInitialMetadataOperation( + augmented_metadata, initial_metadata_flags + ), + cygrpc.SendMessageOperation( + serialized_request, _EMPTY_FLAGS + ), + cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS), + cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS), + ), + (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), + ) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._managed_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + _determine_deadline(deadline), + metadata, + None if credentials is None else credentials._credentials, + operations, + _event_handler(state, self._response_deserializer), + self._context, + self._registered_call_handle, + ) + return _MultiThreadedRendezvous( + state, call, self._response_deserializer, deadline + ) + + +class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): + _channel: cygrpc.Channel + _managed_call: IntegratedCallFactory + _method: bytes + _target: bytes + _request_serializer: Optional[SerializingFunction] + _response_deserializer: Optional[DeserializingFunction] + _context: Any + _registered_call_handle: Optional[int] + + __slots__ = [ + "_channel", + "_managed_call", + "_method", + "_target", + "_request_serializer", + "_response_deserializer", + "_context", + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.Channel, + managed_call: IntegratedCallFactory, + method: bytes, + target: bytes, + request_serializer: Optional[SerializingFunction], + response_deserializer: Optional[DeserializingFunction], + _registered_call_handle: Optional[int], + ): + self._channel = channel + self._managed_call = managed_call + self._method = method + self._target = target + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._context = cygrpc.build_census_context() + self._registered_call_handle = _registered_call_handle + + def _blocking( + self, + request_iterator: Iterator, + timeout: Optional[float], + metadata: Optional[MetadataType], + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + compression: Optional[grpc.Compression], + ) -> Tuple[_RPCState, cygrpc.SegregatedCall]: + deadline = _deadline(timeout) + state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._channel.segregated_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + _determine_deadline(deadline), + augmented_metadata, + None if credentials is None else credentials._credentials, + _stream_unary_invocation_operations_and_tags( + augmented_metadata, initial_metadata_flags + ), + self._context, + self._registered_call_handle, + ) + _consume_request_iterator( + request_iterator, state, call, self._request_serializer, None + ) + while True: + event = call.next_event() + with state.condition: + _handle_event(event, state, self._response_deserializer) + state.condition.notify_all() + if not state.due: + break + return state, call + + def __call__( + self, + request_iterator: Iterator, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + ( + state, + call, + ) = self._blocking( + request_iterator, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + return _end_unary_response_blocking(state, call, False, None) + + def with_call( + self, + request_iterator: Iterator, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + ( + state, + call, + ) = self._blocking( + request_iterator, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + return _end_unary_response_blocking(state, call, True, None) + + def future( + self, + request_iterator: Iterator, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _MultiThreadedRendezvous: + deadline = _deadline(timeout) + state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) + event_handler = _event_handler(state, self._response_deserializer) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._managed_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + deadline, + augmented_metadata, + None if credentials is None else credentials._credentials, + _stream_unary_invocation_operations( + metadata, initial_metadata_flags + ), + event_handler, + self._context, + self._registered_call_handle, + ) + _consume_request_iterator( + request_iterator, + state, + call, + self._request_serializer, + event_handler, + ) + return _MultiThreadedRendezvous( + state, call, self._response_deserializer, deadline + ) + + +class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): + _channel: cygrpc.Channel + _managed_call: IntegratedCallFactory + _method: bytes + _target: bytes + _request_serializer: Optional[SerializingFunction] + _response_deserializer: Optional[DeserializingFunction] + _context: Any + _registered_call_handle: Optional[int] + + __slots__ = [ + "_channel", + "_managed_call", + "_method", + "_target", + "_request_serializer", + "_response_deserializer", + "_context", + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.Channel, + managed_call: IntegratedCallFactory, + method: bytes, + target: bytes, + request_serializer: Optional[SerializingFunction], + response_deserializer: Optional[DeserializingFunction], + _registered_call_handle: Optional[int], + ): + self._channel = channel + self._managed_call = managed_call + self._method = method + self._target = target + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._context = cygrpc.build_census_context() + self._registered_call_handle = _registered_call_handle + + def __call__( + self, + request_iterator: Iterator, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _MultiThreadedRendezvous: + deadline = _deadline(timeout) + state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None) + initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready( + wait_for_ready + ) + augmented_metadata = _compression.augment_metadata( + metadata, compression + ) + operations = ( + ( + cygrpc.SendInitialMetadataOperation( + augmented_metadata, initial_metadata_flags + ), + cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS), + ), + (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),), + ) + event_handler = _event_handler(state, self._response_deserializer) + state.rpc_start_time = time.perf_counter() + state.method = _common.decode(self._method) + state.target = _common.decode(self._target) + call = self._managed_call( + cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, + self._method, + None, + _determine_deadline(deadline), + augmented_metadata, + None if credentials is None else credentials._credentials, + operations, + event_handler, + self._context, + self._registered_call_handle, + ) + _consume_request_iterator( + request_iterator, + state, + call, + self._request_serializer, + event_handler, + ) + return _MultiThreadedRendezvous( + state, call, self._response_deserializer, deadline + ) + + +class _InitialMetadataFlags(int): + """Stores immutable initial metadata flags""" + + def __new__(cls, value: int = _EMPTY_FLAGS): + value &= cygrpc.InitialMetadataFlags.used_mask + return super(_InitialMetadataFlags, cls).__new__(cls, value) + + def with_wait_for_ready(self, wait_for_ready: Optional[bool]) -> int: + if wait_for_ready is not None: + if wait_for_ready: + return self.__class__( + self + | cygrpc.InitialMetadataFlags.wait_for_ready + | cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set + ) + elif not wait_for_ready: + return self.__class__( + self & ~cygrpc.InitialMetadataFlags.wait_for_ready + | cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set + ) + return self + + +class _ChannelCallState(object): + channel: cygrpc.Channel + managed_calls: int + threading: bool + + def __init__(self, channel: cygrpc.Channel): + self.lock = threading.Lock() + self.channel = channel + self.managed_calls = 0 + self.threading = False + + def reset_postfork_child(self) -> None: + self.managed_calls = 0 + + def __del__(self): + try: + self.channel.close( + cygrpc.StatusCode.cancelled, "Channel deallocated!" + ) + except (TypeError, AttributeError): + pass + + +def _run_channel_spin_thread(state: _ChannelCallState) -> None: + def channel_spin(): + while True: + cygrpc.block_if_fork_in_progress(state) + event = state.channel.next_call_event() + if event.completion_type == cygrpc.CompletionType.queue_timeout: + continue + call_completed = event.tag(event) + if call_completed: + with state.lock: + state.managed_calls -= 1 + if state.managed_calls == 0: + return + + channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin) + channel_spin_thread.setDaemon(True) + channel_spin_thread.start() + + +def _channel_managed_call_management(state: _ChannelCallState): + # pylint: disable=too-many-arguments + def create( + flags: int, + method: bytes, + host: Optional[str], + deadline: Optional[float], + metadata: Optional[MetadataType], + credentials: Optional[cygrpc.CallCredentials], + operations: Sequence[Sequence[cygrpc.Operation]], + event_handler: UserTag, + context: Any, + _registered_call_handle: Optional[int], + ) -> cygrpc.IntegratedCall: + """Creates a cygrpc.IntegratedCall. + + Args: + flags: An integer bitfield of call flags. + method: The RPC method. + host: A host string for the created call. + deadline: A float to be the deadline of the created call or None if + the call is to have an infinite deadline. + metadata: The metadata for the call or None. + credentials: A cygrpc.CallCredentials or None. + operations: A sequence of sequences of cygrpc.Operations to be + started on the call. + event_handler: A behavior to call to handle the events resultant from + the operations on the call. + context: Context object for distributed tracing. + _registered_call_handle: An int representing the call handle of the + method, or None if the method is not registered. + Returns: + A cygrpc.IntegratedCall with which to conduct an RPC. + """ + operations_and_tags = tuple( + ( + operation, + event_handler, + ) + for operation in operations + ) + with state.lock: + call = state.channel.integrated_call( + flags, + method, + host, + deadline, + metadata, + credentials, + operations_and_tags, + context, + _registered_call_handle, + ) + if state.managed_calls == 0: + state.managed_calls = 1 + _run_channel_spin_thread(state) + else: + state.managed_calls += 1 + return call + + return create + + +class _ChannelConnectivityState(object): + lock: threading.RLock + channel: grpc.Channel + polling: bool + connectivity: grpc.ChannelConnectivity + try_to_connect: bool + # TODO(xuanwn): Refactor this: https://github.com/grpc/grpc/issues/31704 + callbacks_and_connectivities: List[ + Sequence[ + Union[ + Callable[[grpc.ChannelConnectivity], None], + Optional[grpc.ChannelConnectivity], + ] + ] + ] + delivering: bool + + def __init__(self, channel: grpc.Channel): + self.lock = threading.RLock() + self.channel = channel + self.polling = False + self.connectivity = None + self.try_to_connect = False + self.callbacks_and_connectivities = [] + self.delivering = False + + def reset_postfork_child(self) -> None: + self.polling = False + self.connectivity = None + self.try_to_connect = False + self.callbacks_and_connectivities = [] + self.delivering = False + + +def _deliveries( + state: _ChannelConnectivityState, +) -> List[Callable[[grpc.ChannelConnectivity], None]]: + callbacks_needing_update = [] + for callback_and_connectivity in state.callbacks_and_connectivities: + ( + callback, + callback_connectivity, + ) = callback_and_connectivity + if callback_connectivity is not state.connectivity: + callbacks_needing_update.append(callback) + callback_and_connectivity[1] = state.connectivity + return callbacks_needing_update + + +def _deliver( + state: _ChannelConnectivityState, + initial_connectivity: grpc.ChannelConnectivity, + initial_callbacks: Sequence[Callable[[grpc.ChannelConnectivity], None]], +) -> None: + connectivity = initial_connectivity + callbacks = initial_callbacks + while True: + for callback in callbacks: + cygrpc.block_if_fork_in_progress(state) + try: + callback(connectivity) + except Exception: # pylint: disable=broad-except + _LOGGER.exception( + _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE + ) + with state.lock: + callbacks = _deliveries(state) + if callbacks: + connectivity = state.connectivity + else: + state.delivering = False + return + + +def _spawn_delivery( + state: _ChannelConnectivityState, + callbacks: Sequence[Callable[[grpc.ChannelConnectivity], None]], +) -> None: + delivering_thread = cygrpc.ForkManagedThread( + target=_deliver, + args=( + state, + state.connectivity, + callbacks, + ), + ) + delivering_thread.setDaemon(True) + delivering_thread.start() + state.delivering = True + + +# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll. +def _poll_connectivity( + state: _ChannelConnectivityState, + channel: grpc.Channel, + initial_try_to_connect: bool, +) -> None: + try_to_connect = initial_try_to_connect + connectivity = channel.check_connectivity_state(try_to_connect) + with state.lock: + state.connectivity = ( + _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[ + connectivity + ] + ) + callbacks = tuple( + callback for callback, _ in state.callbacks_and_connectivities + ) + for callback_and_connectivity in state.callbacks_and_connectivities: + callback_and_connectivity[1] = state.connectivity + if callbacks: + _spawn_delivery(state, callbacks) + while True: + event = channel.watch_connectivity_state( + connectivity, time.time() + 0.2 + ) + cygrpc.block_if_fork_in_progress(state) + with state.lock: + if ( + not state.callbacks_and_connectivities + and not state.try_to_connect + ): + state.polling = False + state.connectivity = None + break + try_to_connect = state.try_to_connect + state.try_to_connect = False + if event.success or try_to_connect: + connectivity = channel.check_connectivity_state(try_to_connect) + with state.lock: + state.connectivity = ( + _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[ + connectivity + ] + ) + if not state.delivering: + callbacks = _deliveries(state) + if callbacks: + _spawn_delivery(state, callbacks) + + +def _subscribe( + state: _ChannelConnectivityState, + callback: Callable[[grpc.ChannelConnectivity], None], + try_to_connect: bool, +) -> None: + with state.lock: + if not state.callbacks_and_connectivities and not state.polling: + polling_thread = cygrpc.ForkManagedThread( + target=_poll_connectivity, + args=(state, state.channel, bool(try_to_connect)), + ) + polling_thread.setDaemon(True) + polling_thread.start() + state.polling = True + state.callbacks_and_connectivities.append([callback, None]) + elif not state.delivering and state.connectivity is not None: + _spawn_delivery(state, (callback,)) + state.try_to_connect |= bool(try_to_connect) + state.callbacks_and_connectivities.append( + [callback, state.connectivity] + ) + else: + state.try_to_connect |= bool(try_to_connect) + state.callbacks_and_connectivities.append([callback, None]) + + +def _unsubscribe( + state: _ChannelConnectivityState, + callback: Callable[[grpc.ChannelConnectivity], None], +) -> None: + with state.lock: + for index, (subscribed_callback, unused_connectivity) in enumerate( + state.callbacks_and_connectivities + ): + if callback == subscribed_callback: + state.callbacks_and_connectivities.pop(index) + break + + +def _augment_options( + base_options: Sequence[ChannelArgumentType], + compression: Optional[grpc.Compression], +) -> Sequence[ChannelArgumentType]: + compression_option = _compression.create_channel_option(compression) + return ( + tuple(base_options) + + compression_option + + ( + ( + cygrpc.ChannelArgKey.primary_user_agent_string, + _USER_AGENT, + ), + ) + ) + + +def _separate_channel_options( + options: Sequence[ChannelArgumentType], +) -> Tuple[Sequence[ChannelArgumentType], Sequence[ChannelArgumentType]]: + """Separates core channel options from Python channel options.""" + core_options = [] + python_options = [] + for pair in options: + if ( + pair[0] + == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream + ): + python_options.append(pair) + else: + core_options.append(pair) + return python_options, core_options + + +class Channel(grpc.Channel): + """A cygrpc.Channel-backed implementation of grpc.Channel.""" + + _single_threaded_unary_stream: bool + _channel: cygrpc.Channel + _call_state: _ChannelCallState + _connectivity_state: _ChannelConnectivityState + _target: str + _registered_call_handles: Dict[str, int] + + def __init__( + self, + target: str, + options: Sequence[ChannelArgumentType], + credentials: Optional[grpc.ChannelCredentials], + compression: Optional[grpc.Compression], + ): + """Constructor. + + Args: + target: The target to which to connect. + options: Configuration options for the channel. + credentials: A cygrpc.ChannelCredentials or None. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + """ + python_options, core_options = _separate_channel_options(options) + self._single_threaded_unary_stream = ( + _DEFAULT_SINGLE_THREADED_UNARY_STREAM + ) + self._process_python_options(python_options) + self._channel = cygrpc.Channel( + _common.encode(target), + _augment_options(core_options, compression), + credentials, + ) + self._target = target + self._call_state = _ChannelCallState(self._channel) + self._connectivity_state = _ChannelConnectivityState(self._channel) + cygrpc.fork_register_channel(self) + if cygrpc.g_gevent_activated: + cygrpc.gevent_increment_channel_count() + + def _get_registered_call_handle(self, method: str) -> int: + """ + Get the registered call handle for a method. + + This is a semi-private method. It is intended for use only by gRPC generated code. + + This method is not thread-safe. + + Args: + method: Required, the method name for the RPC. + + Returns: + The registered call handle pointer in the form of a Python Long. + """ + return self._channel.get_registered_call_handle(_common.encode(method)) + + def _process_python_options( + self, python_options: Sequence[ChannelArgumentType] + ) -> None: + """Sets channel attributes according to python-only channel options.""" + for pair in python_options: + if ( + pair[0] + == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream + ): + self._single_threaded_unary_stream = True + + def subscribe( + self, + callback: Callable[[grpc.ChannelConnectivity], None], + try_to_connect: Optional[bool] = None, + ) -> None: + _subscribe(self._connectivity_state, callback, try_to_connect) + + def unsubscribe( + self, callback: Callable[[grpc.ChannelConnectivity], None] + ) -> None: + _unsubscribe(self._connectivity_state, callback) + + # pylint: disable=arguments-differ + def unary_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.UnaryUnaryMultiCallable: + _registered_call_handle = None + if _registered_method: + _registered_call_handle = self._get_registered_call_handle(method) + return _UnaryUnaryMultiCallable( + self._channel, + _channel_managed_call_management(self._call_state), + _common.encode(method), + _common.encode(self._target), + request_serializer, + response_deserializer, + _registered_call_handle, + ) + + # pylint: disable=arguments-differ + def unary_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.UnaryStreamMultiCallable: + _registered_call_handle = None + if _registered_method: + _registered_call_handle = self._get_registered_call_handle(method) + # NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC + # on a single Python thread results in an appreciable speed-up. However, + # due to slight differences in capability, the multi-threaded variant + # remains the default. + if self._single_threaded_unary_stream: + return _SingleThreadedUnaryStreamMultiCallable( + self._channel, + _common.encode(method), + _common.encode(self._target), + request_serializer, + response_deserializer, + _registered_call_handle, + ) + else: + return _UnaryStreamMultiCallable( + self._channel, + _channel_managed_call_management(self._call_state), + _common.encode(method), + _common.encode(self._target), + request_serializer, + response_deserializer, + _registered_call_handle, + ) + + # pylint: disable=arguments-differ + def stream_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.StreamUnaryMultiCallable: + _registered_call_handle = None + if _registered_method: + _registered_call_handle = self._get_registered_call_handle(method) + return _StreamUnaryMultiCallable( + self._channel, + _channel_managed_call_management(self._call_state), + _common.encode(method), + _common.encode(self._target), + request_serializer, + response_deserializer, + _registered_call_handle, + ) + + # pylint: disable=arguments-differ + def stream_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.StreamStreamMultiCallable: + _registered_call_handle = None + if _registered_method: + _registered_call_handle = self._get_registered_call_handle(method) + return _StreamStreamMultiCallable( + self._channel, + _channel_managed_call_management(self._call_state), + _common.encode(method), + _common.encode(self._target), + request_serializer, + response_deserializer, + _registered_call_handle, + ) + + def _unsubscribe_all(self) -> None: + state = self._connectivity_state + if state: + with state.lock: + del state.callbacks_and_connectivities[:] + + def _close(self) -> None: + self._unsubscribe_all() + self._channel.close(cygrpc.StatusCode.cancelled, "Channel closed!") + cygrpc.fork_unregister_channel(self) + if cygrpc.g_gevent_activated: + cygrpc.gevent_decrement_channel_count() + + def _close_on_fork(self) -> None: + self._unsubscribe_all() + self._channel.close_on_fork( + cygrpc.StatusCode.cancelled, "Channel closed due to fork" + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._close() + return False + + def close(self) -> None: + self._close() + + def __del__(self): + # TODO(https://github.com/grpc/grpc/issues/12531): Several releases + # after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call + # here (or more likely, call self._close() here). We don't do this today + # because many valid use cases today allow the channel to be deleted + # immediately after stubs are created. After a sufficient period of time + # has passed for all users to be trusted to freeze out to their channels + # for as long as they are in use and to close them after using them, + # then deletion of this grpc._channel.Channel instance can be made to + # effect closure of the underlying cygrpc.Channel instance. + try: + self._unsubscribe_all() + except: # pylint: disable=bare-except + # Exceptions in __del__ are ignored by Python anyway, but they can + # keep spamming logs. Just silence them. + pass diff --git a/MLPY/Lib/site-packages/grpc/_common.py b/MLPY/Lib/site-packages/grpc/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..be4d9754ec638f641b56f017a9ae09f60e3ad1cb --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_common.py @@ -0,0 +1,183 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Shared implementation.""" + +import logging +import time +from typing import Any, AnyStr, Callable, Optional, Union + +import grpc +from grpc._cython import cygrpc +from grpc._typing import DeserializingFunction +from grpc._typing import SerializingFunction + +_LOGGER = logging.getLogger(__name__) + +CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = { + cygrpc.ConnectivityState.idle: grpc.ChannelConnectivity.IDLE, + cygrpc.ConnectivityState.connecting: grpc.ChannelConnectivity.CONNECTING, + cygrpc.ConnectivityState.ready: grpc.ChannelConnectivity.READY, + cygrpc.ConnectivityState.transient_failure: grpc.ChannelConnectivity.TRANSIENT_FAILURE, + cygrpc.ConnectivityState.shutdown: grpc.ChannelConnectivity.SHUTDOWN, +} + +CYGRPC_STATUS_CODE_TO_STATUS_CODE = { + cygrpc.StatusCode.ok: grpc.StatusCode.OK, + cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED, + cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN, + cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT, + cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED, + cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND, + cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS, + cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED, + cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED, + cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED, + cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION, + cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED, + cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE, + cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED, + cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL, + cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE, + cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS, +} +STATUS_CODE_TO_CYGRPC_STATUS_CODE = { + grpc_code: cygrpc_code + for cygrpc_code, grpc_code in CYGRPC_STATUS_CODE_TO_STATUS_CODE.items() +} + +MAXIMUM_WAIT_TIMEOUT = 0.1 + +_ERROR_MESSAGE_PORT_BINDING_FAILED = ( + "Failed to bind to address %s; set " + "GRPC_VERBOSITY=debug environment variable to see detailed error message." +) + + +def encode(s: AnyStr) -> bytes: + if isinstance(s, bytes): + return s + else: + return s.encode("utf8") + + +def decode(b: AnyStr) -> str: + if isinstance(b, bytes): + return b.decode("utf-8", "replace") + return b + + +def _transform( + message: Any, + transformer: Union[SerializingFunction, DeserializingFunction, None], + exception_message: str, +) -> Any: + if transformer is None: + return message + else: + try: + return transformer(message) + except Exception: # pylint: disable=broad-except + _LOGGER.exception(exception_message) + return None + + +def serialize(message: Any, serializer: Optional[SerializingFunction]) -> bytes: + return _transform(message, serializer, "Exception serializing message!") + + +def deserialize( + serialized_message: bytes, deserializer: Optional[DeserializingFunction] +) -> Any: + return _transform( + serialized_message, deserializer, "Exception deserializing message!" + ) + + +def fully_qualified_method(group: str, method: str) -> str: + return "/{}/{}".format(group, method) + + +def _wait_once( + wait_fn: Callable[..., bool], + timeout: float, + spin_cb: Optional[Callable[[], None]], +): + wait_fn(timeout=timeout) + if spin_cb is not None: + spin_cb() + + +def wait( + wait_fn: Callable[..., bool], + wait_complete_fn: Callable[[], bool], + timeout: Optional[float] = None, + spin_cb: Optional[Callable[[], None]] = None, +) -> bool: + """Blocks waiting for an event without blocking the thread indefinitely. + + See https://github.com/grpc/grpc/issues/19464 for full context. CPython's + `threading.Event.wait` and `threading.Condition.wait` methods, if invoked + without a timeout kwarg, may block the calling thread indefinitely. If the + call is made from the main thread, this means that signal handlers may not + run for an arbitrarily long period of time. + + This wrapper calls the supplied wait function with an arbitrary short + timeout to ensure that no signal handler has to wait longer than + MAXIMUM_WAIT_TIMEOUT before executing. + + Args: + wait_fn: A callable acceptable a single float-valued kwarg named + `timeout`. This function is expected to be one of `threading.Event.wait` + or `threading.Condition.wait`. + wait_complete_fn: A callable taking no arguments and returning a bool. + When this function returns true, it indicates that waiting should cease. + timeout: An optional float-valued number of seconds after which the wait + should cease. + spin_cb: An optional Callable taking no arguments and returning nothing. + This callback will be called on each iteration of the spin. This may be + used for, e.g. work related to forking. + + Returns: + True if a timeout was supplied and it was reached. False otherwise. + """ + if timeout is None: + while not wait_complete_fn(): + _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) + else: + end = time.time() + timeout + while not wait_complete_fn(): + remaining = min(end - time.time(), MAXIMUM_WAIT_TIMEOUT) + if remaining < 0: + return True + _wait_once(wait_fn, remaining, spin_cb) + return False + + +def validate_port_binding_result(address: str, port: int) -> int: + """Validates if the port binding succeed. + + If the port returned by Core is 0, the binding is failed. However, in that + case, the Core API doesn't return a detailed failing reason. The best we + can do is raising an exception to prevent further confusion. + + Args: + address: The address string to be bound. + port: An int returned by core + """ + if port == 0: + # The Core API doesn't return a failure message. The best we can do + # is raising an exception to prevent further confusion. + raise RuntimeError(_ERROR_MESSAGE_PORT_BINDING_FAILED % address) + else: + return port diff --git a/MLPY/Lib/site-packages/grpc/_compression.py b/MLPY/Lib/site-packages/grpc/_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa9f328ba9497df867a8cc0fd9c50daa6b1fdea --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_compression.py @@ -0,0 +1,71 @@ +# Copyright 2019 The gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional + +import grpc +from grpc._cython import cygrpc +from grpc._typing import MetadataType + +NoCompression = cygrpc.CompressionAlgorithm.none +Deflate = cygrpc.CompressionAlgorithm.deflate +Gzip = cygrpc.CompressionAlgorithm.gzip + +_METADATA_STRING_MAPPING = { + NoCompression: "identity", + Deflate: "deflate", + Gzip: "gzip", +} + + +def _compression_algorithm_to_metadata_value( + compression: grpc.Compression, +) -> str: + return _METADATA_STRING_MAPPING[compression] + + +def compression_algorithm_to_metadata(compression: grpc.Compression): + return ( + cygrpc.GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, + _compression_algorithm_to_metadata_value(compression), + ) + + +def create_channel_option(compression: Optional[grpc.Compression]): + return ( + ((cygrpc.GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, int(compression)),) + if compression + else () + ) + + +def augment_metadata( + metadata: Optional[MetadataType], compression: Optional[grpc.Compression] +): + if not metadata and not compression: + return None + base_metadata = tuple(metadata) if metadata else () + compression_metadata = ( + (compression_algorithm_to_metadata(compression),) if compression else () + ) + return base_metadata + compression_metadata + + +__all__ = ( + "NoCompression", + "Deflate", + "Gzip", +) diff --git a/MLPY/Lib/site-packages/grpc/_cython/__init__.py b/MLPY/Lib/site-packages/grpc/_cython/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_cython/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/_cython/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/_cython/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c88897169e6a795a52208ddf7a460d528e4cb4 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/_cython/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/_cython/_credentials/roots.pem b/MLPY/Lib/site-packages/grpc/_cython/_credentials/roots.pem new file mode 100644 index 0000000000000000000000000000000000000000..948e3b9c65d0697a07dbea62bf68c81f147201e7 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_cython/_credentials/roots.pem @@ -0,0 +1,4337 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services) +# Label: "NetLock Arany (Class Gold) Főtanúsítvány" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Label: "EC-ACC" +# Serial: -23701579247955709139626555126524820479 +# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09 +# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8 +# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99 +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB +8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy +dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 +YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 +dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh +IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD +LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG +EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g +KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD +ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu +bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg +ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R +85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm +4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV +HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd +QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t +lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB +o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 +opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo +dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW +ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN +AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y +/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k +SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy +Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS +Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl +nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 14367148294922964480859022125800977897474 +# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e +# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb +# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 146587175971765017618439757810265552097 +# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 +# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 +# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX +mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 +zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P +fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc +vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 +Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp +zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO +Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW +k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ +DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF +lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW +Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 +d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z +XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR +gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 +d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv +J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg +DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM ++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy +F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 +SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws +E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 146587176055767053814479386953112547951 +# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b +# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d +# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg +GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu +XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd +re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu +PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 +mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K +8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj +x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR +nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 +kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok +twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp +8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT +vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT +z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA +pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb +pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB +R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R +RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk +0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC +5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF +izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn +yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 146587176140553309517047991083707763997 +# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 +# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 +# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 +-----BEGIN CERTIFICATE----- +MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A +DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk +fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA +njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 146587176229350439916519468929765261721 +# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 +# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb +# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd +-----BEGIN CERTIFICATE----- +MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l +xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 +CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx +sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G4" +# Serial: 289383649854506086828220374796556676440 +# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88 +# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01 +# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88 +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw +gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL +Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg +MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw +BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0 +MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1 +c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ +bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ +2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E +T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j +5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM +C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T +DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX +wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A +2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm +nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl +N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj +c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS +5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS +Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr +hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/ +B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI +AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw +H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+ +b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk +2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol +IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk +5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY +n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- diff --git a/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__init__.py b/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10beb285354d7f50e551c9d7e15d7fd4a4b16212 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/_cython/_cygrpc/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..01824a4532d93b2bb5ea7a623ffc871bd99b9e5a --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9d3bfa55ae1c02b150ee0f4929c9649b5e9fb8219b2727b24861ed9a257d4a2 +size 8973312 diff --git a/MLPY/Lib/site-packages/grpc/_grpcio_metadata.py b/MLPY/Lib/site-packages/grpc/_grpcio_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..7cf59f33c1a5f281fb683caff6a14bf812a03215 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_grpcio_metadata.py @@ -0,0 +1 @@ +__version__ = """1.65.1""" \ No newline at end of file diff --git a/MLPY/Lib/site-packages/grpc/_interceptor.py b/MLPY/Lib/site-packages/grpc/_interceptor.py new file mode 100644 index 0000000000000000000000000000000000000000..21a4e5ee073d8b9a03ae97993b905c78dfd2ed17 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_interceptor.py @@ -0,0 +1,813 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Implementation of gRPC Python interceptors.""" + +import collections +import sys +import types +from typing import Any, Callable, Optional, Sequence, Tuple, Union + +import grpc + +from ._typing import DeserializingFunction +from ._typing import DoneCallbackType +from ._typing import MetadataType +from ._typing import RequestIterableType +from ._typing import SerializingFunction + + +class _ServicePipeline(object): + interceptors: Tuple[grpc.ServerInterceptor] + + def __init__(self, interceptors: Sequence[grpc.ServerInterceptor]): + self.interceptors = tuple(interceptors) + + def _continuation(self, thunk: Callable, index: int) -> Callable: + return lambda context: self._intercept_at(thunk, index, context) + + def _intercept_at( + self, thunk: Callable, index: int, context: grpc.HandlerCallDetails + ) -> grpc.RpcMethodHandler: + if index < len(self.interceptors): + interceptor = self.interceptors[index] + thunk = self._continuation(thunk, index + 1) + return interceptor.intercept_service(thunk, context) + else: + return thunk(context) + + def execute( + self, thunk: Callable, context: grpc.HandlerCallDetails + ) -> grpc.RpcMethodHandler: + return self._intercept_at(thunk, 0, context) + + +def service_pipeline( + interceptors: Optional[Sequence[grpc.ServerInterceptor]], +) -> Optional[_ServicePipeline]: + return _ServicePipeline(interceptors) if interceptors else None + + +class _ClientCallDetails( + collections.namedtuple( + "_ClientCallDetails", + ( + "method", + "timeout", + "metadata", + "credentials", + "wait_for_ready", + "compression", + ), + ), + grpc.ClientCallDetails, +): + pass + + +def _unwrap_client_call_details( + call_details: grpc.ClientCallDetails, + default_details: grpc.ClientCallDetails, +) -> Tuple[ + str, float, MetadataType, grpc.CallCredentials, bool, grpc.Compression +]: + try: + method = call_details.method # pytype: disable=attribute-error + except AttributeError: + method = default_details.method # pytype: disable=attribute-error + + try: + timeout = call_details.timeout # pytype: disable=attribute-error + except AttributeError: + timeout = default_details.timeout # pytype: disable=attribute-error + + try: + metadata = call_details.metadata # pytype: disable=attribute-error + except AttributeError: + metadata = default_details.metadata # pytype: disable=attribute-error + + try: + credentials = ( + call_details.credentials + ) # pytype: disable=attribute-error + except AttributeError: + credentials = ( + default_details.credentials + ) # pytype: disable=attribute-error + + try: + wait_for_ready = ( + call_details.wait_for_ready + ) # pytype: disable=attribute-error + except AttributeError: + wait_for_ready = ( + default_details.wait_for_ready + ) # pytype: disable=attribute-error + + try: + compression = ( + call_details.compression + ) # pytype: disable=attribute-error + except AttributeError: + compression = ( + default_details.compression + ) # pytype: disable=attribute-error + + return method, timeout, metadata, credentials, wait_for_ready, compression + + +class _FailureOutcome( + grpc.RpcError, grpc.Future, grpc.Call +): # pylint: disable=too-many-ancestors + _exception: Exception + _traceback: types.TracebackType + + def __init__(self, exception: Exception, traceback: types.TracebackType): + super(_FailureOutcome, self).__init__() + self._exception = exception + self._traceback = traceback + + def initial_metadata(self) -> Optional[MetadataType]: + return None + + def trailing_metadata(self) -> Optional[MetadataType]: + return None + + def code(self) -> Optional[grpc.StatusCode]: + return grpc.StatusCode.INTERNAL + + def details(self) -> Optional[str]: + return "Exception raised while intercepting the RPC" + + def cancel(self) -> bool: + return False + + def cancelled(self) -> bool: + return False + + def is_active(self) -> bool: + return False + + def time_remaining(self) -> Optional[float]: + return None + + def running(self) -> bool: + return False + + def done(self) -> bool: + return True + + def result(self, ignored_timeout: Optional[float] = None): + raise self._exception + + def exception( + self, ignored_timeout: Optional[float] = None + ) -> Optional[Exception]: + return self._exception + + def traceback( + self, ignored_timeout: Optional[float] = None + ) -> Optional[types.TracebackType]: + return self._traceback + + def add_callback(self, unused_callback) -> bool: + return False + + def add_done_callback(self, fn: DoneCallbackType) -> None: + fn(self) + + def __iter__(self): + return self + + def __next__(self): + raise self._exception + + def next(self): + return self.__next__() + + +class _UnaryOutcome(grpc.Call, grpc.Future): + _response: Any + _call: grpc.Call + + def __init__(self, response: Any, call: grpc.Call): + self._response = response + self._call = call + + def initial_metadata(self) -> Optional[MetadataType]: + return self._call.initial_metadata() + + def trailing_metadata(self) -> Optional[MetadataType]: + return self._call.trailing_metadata() + + def code(self) -> Optional[grpc.StatusCode]: + return self._call.code() + + def details(self) -> Optional[str]: + return self._call.details() + + def is_active(self) -> bool: + return self._call.is_active() + + def time_remaining(self) -> Optional[float]: + return self._call.time_remaining() + + def cancel(self) -> bool: + return self._call.cancel() + + def add_callback(self, callback) -> bool: + return self._call.add_callback(callback) + + def cancelled(self) -> bool: + return False + + def running(self) -> bool: + return False + + def done(self) -> bool: + return True + + def result(self, ignored_timeout: Optional[float] = None): + return self._response + + def exception(self, ignored_timeout: Optional[float] = None): + return None + + def traceback(self, ignored_timeout: Optional[float] = None): + return None + + def add_done_callback(self, fn: DoneCallbackType) -> None: + fn(self) + + +class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): + _thunk: Callable + _method: str + _interceptor: grpc.UnaryUnaryClientInterceptor + + def __init__( + self, + thunk: Callable, + method: str, + interceptor: grpc.UnaryUnaryClientInterceptor, + ): + self._thunk = thunk + self._method = method + self._interceptor = interceptor + + def __call__( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + response, ignored_call = self._with_call( + request, + timeout=timeout, + metadata=metadata, + credentials=credentials, + wait_for_ready=wait_for_ready, + compression=compression, + ) + return response + + def _with_call( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + try: + response, call = self._thunk(new_method).with_call( + request, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + return _UnaryOutcome(response, call) + except grpc.RpcError as rpc_error: + return rpc_error + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + call = self._interceptor.intercept_unary_unary( + continuation, client_call_details, request + ) + return call.result(), call + + def with_call( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + return self._with_call( + request, + timeout=timeout, + metadata=metadata, + credentials=credentials, + wait_for_ready=wait_for_ready, + compression=compression, + ) + + def future( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + return self._thunk(new_method).future( + request, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + + try: + return self._interceptor.intercept_unary_unary( + continuation, client_call_details, request + ) + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + +class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): + _thunk: Callable + _method: str + _interceptor: grpc.UnaryStreamClientInterceptor + + def __init__( + self, + thunk: Callable, + method: str, + interceptor: grpc.UnaryStreamClientInterceptor, + ): + self._thunk = thunk + self._method = method + self._interceptor = interceptor + + def __call__( + self, + request: Any, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ): + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + return self._thunk(new_method)( + request, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + + try: + return self._interceptor.intercept_unary_stream( + continuation, client_call_details, request + ) + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + +class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): + _thunk: Callable + _method: str + _interceptor: grpc.StreamUnaryClientInterceptor + + def __init__( + self, + thunk: Callable, + method: str, + interceptor: grpc.StreamUnaryClientInterceptor, + ): + self._thunk = thunk + self._method = method + self._interceptor = interceptor + + def __call__( + self, + request_iterator: RequestIterableType, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + response, ignored_call = self._with_call( + request_iterator, + timeout=timeout, + metadata=metadata, + credentials=credentials, + wait_for_ready=wait_for_ready, + compression=compression, + ) + return response + + def _with_call( + self, + request_iterator: RequestIterableType, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request_iterator): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + try: + response, call = self._thunk(new_method).with_call( + request_iterator, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + return _UnaryOutcome(response, call) + except grpc.RpcError as rpc_error: + return rpc_error + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + call = self._interceptor.intercept_stream_unary( + continuation, client_call_details, request_iterator + ) + return call.result(), call + + def with_call( + self, + request_iterator: RequestIterableType, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Tuple[Any, grpc.Call]: + return self._with_call( + request_iterator, + timeout=timeout, + metadata=metadata, + credentials=credentials, + wait_for_ready=wait_for_ready, + compression=compression, + ) + + def future( + self, + request_iterator: RequestIterableType, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> Any: + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request_iterator): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + return self._thunk(new_method).future( + request_iterator, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + + try: + return self._interceptor.intercept_stream_unary( + continuation, client_call_details, request_iterator + ) + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + +class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): + _thunk: Callable + _method: str + _interceptor: grpc.StreamStreamClientInterceptor + + def __init__( + self, + thunk: Callable, + method: str, + interceptor: grpc.StreamStreamClientInterceptor, + ): + self._thunk = thunk + self._method = method + self._interceptor = interceptor + + def __call__( + self, + request_iterator: RequestIterableType, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ): + client_call_details = _ClientCallDetails( + self._method, + timeout, + metadata, + credentials, + wait_for_ready, + compression, + ) + + def continuation(new_details, request_iterator): + ( + new_method, + new_timeout, + new_metadata, + new_credentials, + new_wait_for_ready, + new_compression, + ) = _unwrap_client_call_details(new_details, client_call_details) + return self._thunk(new_method)( + request_iterator, + timeout=new_timeout, + metadata=new_metadata, + credentials=new_credentials, + wait_for_ready=new_wait_for_ready, + compression=new_compression, + ) + + try: + return self._interceptor.intercept_stream_stream( + continuation, client_call_details, request_iterator + ) + except Exception as exception: # pylint:disable=broad-except + return _FailureOutcome(exception, sys.exc_info()[2]) + + +class _Channel(grpc.Channel): + _channel: grpc.Channel + _interceptor: Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + ] + + def __init__( + self, + channel: grpc.Channel, + interceptor: Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + ], + ): + self._channel = channel + self._interceptor = interceptor + + def subscribe( + self, callback: Callable, try_to_connect: Optional[bool] = False + ): + self._channel.subscribe(callback, try_to_connect=try_to_connect) + + def unsubscribe(self, callback: Callable): + self._channel.unsubscribe(callback) + + # pylint: disable=arguments-differ + def unary_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.UnaryUnaryMultiCallable: + # pytype: disable=wrong-arg-count + thunk = lambda m: self._channel.unary_unary( + m, + request_serializer, + response_deserializer, + _registered_method, + ) + # pytype: enable=wrong-arg-count + if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor): + return _UnaryUnaryMultiCallable(thunk, method, self._interceptor) + else: + return thunk(method) + + # pylint: disable=arguments-differ + def unary_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.UnaryStreamMultiCallable: + # pytype: disable=wrong-arg-count + thunk = lambda m: self._channel.unary_stream( + m, + request_serializer, + response_deserializer, + _registered_method, + ) + # pytype: enable=wrong-arg-count + if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor): + return _UnaryStreamMultiCallable(thunk, method, self._interceptor) + else: + return thunk(method) + + # pylint: disable=arguments-differ + def stream_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.StreamUnaryMultiCallable: + # pytype: disable=wrong-arg-count + thunk = lambda m: self._channel.stream_unary( + m, + request_serializer, + response_deserializer, + _registered_method, + ) + # pytype: enable=wrong-arg-count + if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor): + return _StreamUnaryMultiCallable(thunk, method, self._interceptor) + else: + return thunk(method) + + # pylint: disable=arguments-differ + def stream_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> grpc.StreamStreamMultiCallable: + # pytype: disable=wrong-arg-count + thunk = lambda m: self._channel.stream_stream( + m, + request_serializer, + response_deserializer, + _registered_method, + ) + # pytype: enable=wrong-arg-count + if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor): + return _StreamStreamMultiCallable(thunk, method, self._interceptor) + else: + return thunk(method) + + def _close(self): + self._channel.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._close() + return False + + def close(self): + self._channel.close() + + +def intercept_channel( + channel: grpc.Channel, + *interceptors: Optional[ + Sequence[ + Union[ + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + ] + ] + ], +) -> grpc.Channel: + for interceptor in reversed(list(interceptors)): + if ( + not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) + and not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) + and not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) + and not isinstance(interceptor, grpc.StreamStreamClientInterceptor) + ): + raise TypeError( + "interceptor must be " + "grpc.UnaryUnaryClientInterceptor or " + "grpc.UnaryStreamClientInterceptor or " + "grpc.StreamUnaryClientInterceptor or " + "grpc.StreamStreamClientInterceptor or " + ) + channel = _Channel(channel, interceptor) + return channel diff --git a/MLPY/Lib/site-packages/grpc/_observability.py b/MLPY/Lib/site-packages/grpc/_observability.py new file mode 100644 index 0000000000000000000000000000000000000000..5a23e3bdc8cac9822fd0fdc22ccc9f5dec1d1642 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_observability.py @@ -0,0 +1,332 @@ +# Copyright 2023 The gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import abc +import contextlib +import logging +import threading +from typing import Any, Generator, Generic, List, Optional, TypeVar + +from grpc._cython import cygrpc as _cygrpc +from grpc._typing import ChannelArgumentType + +_LOGGER = logging.getLogger(__name__) + +_channel = Any # _channel.py imports this module. +ClientCallTracerCapsule = TypeVar("ClientCallTracerCapsule") +ServerCallTracerFactoryCapsule = TypeVar("ServerCallTracerFactoryCapsule") + +_plugin_lock: threading.RLock = threading.RLock() +_OBSERVABILITY_PLUGIN: Optional["ObservabilityPlugin"] = None +_SERVICES_TO_EXCLUDE: List[bytes] = [ + b"google.monitoring.v3.MetricService", + b"google.devtools.cloudtrace.v2.TraceService", +] + + +class ServerCallTracerFactory: + """An encapsulation of a ServerCallTracerFactory. + + Instances of this class can be passed to a Channel as values for the + grpc.experimental.server_call_tracer_factory option + """ + + def __init__(self, address): + self._address = address + + def __int__(self): + return self._address + + +class ObservabilityPlugin( + Generic[ClientCallTracerCapsule, ServerCallTracerFactoryCapsule], + metaclass=abc.ABCMeta, +): + """Abstract base class for observability plugin. + + *This is a semi-private class that was intended for the exclusive use of + the gRPC team.* + + The ClientCallTracerCapsule and ClientCallTracerCapsule created by this + plugin should be inject to gRPC core using observability_init at the + start of a program, before any channels/servers are built. + + Any future methods added to this interface cannot have the + @abc.abstractmethod annotation. + + Attributes: + _stats_enabled: A bool indicates whether tracing is enabled. + _tracing_enabled: A bool indicates whether stats(metrics) is enabled. + _registered_methods: A set which stores the registered method names in + bytes. + """ + + _tracing_enabled: bool = False + _stats_enabled: bool = False + + @abc.abstractmethod + def create_client_call_tracer( + self, method_name: bytes, target: bytes + ) -> ClientCallTracerCapsule: + """Creates a ClientCallTracerCapsule. + + After register the plugin, if tracing or stats is enabled, this method + will be called after a call was created, the ClientCallTracer created + by this method will be saved to call context. + + The ClientCallTracer is an object which implements `grpc_core::ClientCallTracer` + interface and wrapped in a PyCapsule using `client_call_tracer` as name. + + Args: + method_name: The method name of the call in byte format. + target: The channel target of the call in byte format. + registered_method: Wether this method is pre-registered. + + Returns: + A PyCapsule which stores a ClientCallTracer object. + """ + raise NotImplementedError() + + @abc.abstractmethod + def delete_client_call_tracer( + self, client_call_tracer: ClientCallTracerCapsule + ) -> None: + """Deletes the ClientCallTracer stored in ClientCallTracerCapsule. + + After register the plugin, if tracing or stats is enabled, this method + will be called at the end of the call to destroy the ClientCallTracer. + + The ClientCallTracer is an object which implements `grpc_core::ClientCallTracer` + interface and wrapped in a PyCapsule using `client_call_tracer` as name. + + Args: + client_call_tracer: A PyCapsule which stores a ClientCallTracer object. + """ + raise NotImplementedError() + + @abc.abstractmethod + def save_trace_context( + self, trace_id: str, span_id: str, is_sampled: bool + ) -> None: + """Saves the trace_id and span_id related to the current span. + + After register the plugin, if tracing is enabled, this method will be + called after the server finished sending response. + + This method can be used to propagate census context. + + Args: + trace_id: The identifier for the trace associated with the span as a + 32-character hexadecimal encoded string, + e.g. 26ed0036f2eff2b7317bccce3e28d01f + span_id: The identifier for the span as a 16-character hexadecimal encoded + string. e.g. 113ec879e62583bc + is_sampled: A bool indicates whether the span is sampled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def create_server_call_tracer_factory( + self, + *, + xds: bool = False, + ) -> Optional[ServerCallTracerFactoryCapsule]: + """Creates a ServerCallTracerFactoryCapsule. + + This method will be called at server initialization time to create a + ServerCallTracerFactory, which will be registered to gRPC core. + + The ServerCallTracerFactory is an object which implements + `grpc_core::ServerCallTracerFactory` interface and wrapped in a PyCapsule + using `server_call_tracer_factory` as name. + + Args: + xds: Whether the server is xds server. + Returns: + A PyCapsule which stores a ServerCallTracerFactory object. Or None if + plugin decides not to create ServerCallTracerFactory. + """ + raise NotImplementedError() + + @abc.abstractmethod + def record_rpc_latency( + self, method: str, target: str, rpc_latency: float, status_code: Any + ) -> None: + """Record the latency of the RPC. + + After register the plugin, if stats is enabled, this method will be + called at the end of each RPC. + + Args: + method: The fully-qualified name of the RPC method being invoked. + target: The target name of the RPC method being invoked. + rpc_latency: The latency for the RPC in seconds, equals to the time between + when the client invokes the RPC and when the client receives the status. + status_code: An element of grpc.StatusCode in string format representing the + final status for the RPC. + """ + raise NotImplementedError() + + def set_tracing(self, enable: bool) -> None: + """Enable or disable tracing. + + Args: + enable: A bool indicates whether tracing should be enabled. + """ + self._tracing_enabled = enable + + def set_stats(self, enable: bool) -> None: + """Enable or disable stats(metrics). + + Args: + enable: A bool indicates whether stats should be enabled. + """ + self._stats_enabled = enable + + def save_registered_method(self, method_name: bytes) -> None: + """Saves the method name to registered_method list. + + When exporting metrics, method name for unregistered methods will be replaced + with 'other' by default. + + Args: + method_name: The method name in bytes. + """ + raise NotImplementedError() + + @property + def tracing_enabled(self) -> bool: + return self._tracing_enabled + + @property + def stats_enabled(self) -> bool: + return self._stats_enabled + + @property + def observability_enabled(self) -> bool: + return self.tracing_enabled or self.stats_enabled + + +@contextlib.contextmanager +def get_plugin() -> Generator[Optional[ObservabilityPlugin], None, None]: + """Get the ObservabilityPlugin in _observability module. + + Returns: + The ObservabilityPlugin currently registered with the _observability + module. Or None if no plugin exists at the time of calling this method. + """ + with _plugin_lock: + yield _OBSERVABILITY_PLUGIN + + +def set_plugin(observability_plugin: Optional[ObservabilityPlugin]) -> None: + """Save ObservabilityPlugin to _observability module. + + Args: + observability_plugin: The ObservabilityPlugin to save. + + Raises: + ValueError: If an ObservabilityPlugin was already registered at the + time of calling this method. + """ + global _OBSERVABILITY_PLUGIN # pylint: disable=global-statement + with _plugin_lock: + if observability_plugin and _OBSERVABILITY_PLUGIN: + raise ValueError("observability_plugin was already set!") + _OBSERVABILITY_PLUGIN = observability_plugin + + +def observability_init(observability_plugin: ObservabilityPlugin) -> None: + """Initialize observability with provided ObservabilityPlugin. + + This method have to be called at the start of a program, before any + channels/servers are built. + + Args: + observability_plugin: The ObservabilityPlugin to use. + + Raises: + ValueError: If an ObservabilityPlugin was already registered at the + time of calling this method. + """ + set_plugin(observability_plugin) + + +def observability_deinit() -> None: + """Clear the observability context, including ObservabilityPlugin and + ServerCallTracerFactory + + This method have to be called after exit observability context so that + it's possible to re-initialize again. + """ + set_plugin(None) + _cygrpc.clear_server_call_tracer_factory() + + +def delete_call_tracer(client_call_tracer_capsule: Any) -> None: + """Deletes the ClientCallTracer stored in ClientCallTracerCapsule. + + This method will be called at the end of the call to destroy the ClientCallTracer. + + The ClientCallTracer is an object which implements `grpc_core::ClientCallTracer` + interface and wrapped in a PyCapsule using `client_call_tracer` as the name. + + Args: + client_call_tracer_capsule: A PyCapsule which stores a ClientCallTracer object. + """ + with get_plugin() as plugin: + if plugin and plugin.observability_enabled: + plugin.delete_client_call_tracer(client_call_tracer_capsule) + + +def maybe_record_rpc_latency(state: "_channel._RPCState") -> None: + """Record the latency of the RPC, if the plugin is registered and stats is enabled. + + This method will be called at the end of each RPC. + + Args: + state: a grpc._channel._RPCState object which contains the stats related to the + RPC. + """ + # TODO(xuanwn): use channel args to exclude those metrics. + for exclude_prefix in _SERVICES_TO_EXCLUDE: + if exclude_prefix in state.method.encode("utf8"): + return + with get_plugin() as plugin: + if plugin and plugin.stats_enabled: + rpc_latency_s = state.rpc_end_time - state.rpc_start_time + rpc_latency_ms = rpc_latency_s * 1000 + plugin.record_rpc_latency( + state.method, state.target, rpc_latency_ms, state.code + ) + + +def create_server_call_tracer_factory_option(xds: bool) -> ChannelArgumentType: + with get_plugin() as plugin: + if plugin and plugin.stats_enabled: + server_call_tracer_factory_address = ( + _cygrpc.get_server_call_tracer_factory_address(plugin, xds) + ) + if server_call_tracer_factory_address: + return ( + ( + "grpc.experimental.server_call_tracer_factory", + ServerCallTracerFactory( + server_call_tracer_factory_address + ), + ), + ) + return () diff --git a/MLPY/Lib/site-packages/grpc/_plugin_wrapping.py b/MLPY/Lib/site-packages/grpc/_plugin_wrapping.py new file mode 100644 index 0000000000000000000000000000000000000000..3522a60f7d86fac6cebdc134be13f713b7bc7cc6 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_plugin_wrapping.py @@ -0,0 +1,136 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import logging +import threading +from typing import Callable, Optional, Type + +import grpc +from grpc import _common +from grpc._cython import cygrpc +from grpc._typing import MetadataType + +_LOGGER = logging.getLogger(__name__) + + +class _AuthMetadataContext( + collections.namedtuple( + "AuthMetadataContext", + ( + "service_url", + "method_name", + ), + ), + grpc.AuthMetadataContext, +): + pass + + +class _CallbackState(object): + def __init__(self): + self.lock = threading.Lock() + self.called = False + self.exception = None + + +class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback): + _state: _CallbackState + _callback: Callable + + def __init__(self, state: _CallbackState, callback: Callable): + self._state = state + self._callback = callback + + def __call__( + self, metadata: MetadataType, error: Optional[Type[BaseException]] + ): + with self._state.lock: + if self._state.exception is None: + if self._state.called: + raise RuntimeError( + "AuthMetadataPluginCallback invoked more than once!" + ) + else: + self._state.called = True + else: + raise RuntimeError( + 'AuthMetadataPluginCallback raised exception "{}"!'.format( + self._state.exception + ) + ) + if error is None: + self._callback(metadata, cygrpc.StatusCode.ok, None) + else: + self._callback( + None, cygrpc.StatusCode.internal, _common.encode(str(error)) + ) + + +class _Plugin(object): + _metadata_plugin: grpc.AuthMetadataPlugin + + def __init__(self, metadata_plugin: grpc.AuthMetadataPlugin): + self._metadata_plugin = metadata_plugin + self._stored_ctx = None + + try: + import contextvars # pylint: disable=wrong-import-position + + # The plugin may be invoked on a thread created by Core, which will not + # have the context propagated. This context is stored and installed in + # the thread invoking the plugin. + self._stored_ctx = contextvars.copy_context() + except ImportError: + # Support versions predating contextvars. + pass + + def __call__(self, service_url: str, method_name: str, callback: Callable): + context = _AuthMetadataContext( + _common.decode(service_url), _common.decode(method_name) + ) + callback_state = _CallbackState() + try: + self._metadata_plugin( + context, _AuthMetadataPluginCallback(callback_state, callback) + ) + except Exception as exception: # pylint: disable=broad-except + _LOGGER.exception( + 'AuthMetadataPluginCallback "%s" raised exception!', + self._metadata_plugin, + ) + with callback_state.lock: + callback_state.exception = exception + if callback_state.called: + return + callback( + None, cygrpc.StatusCode.internal, _common.encode(str(exception)) + ) + + +def metadata_plugin_call_credentials( + metadata_plugin: grpc.AuthMetadataPlugin, name: Optional[str] +) -> grpc.CallCredentials: + if name is None: + try: + effective_name = metadata_plugin.__name__ + except AttributeError: + effective_name = metadata_plugin.__class__.__name__ + else: + effective_name = name + return grpc.CallCredentials( + cygrpc.MetadataPluginCallCredentials( + _Plugin(metadata_plugin), _common.encode(effective_name) + ) + ) diff --git a/MLPY/Lib/site-packages/grpc/_runtime_protos.py b/MLPY/Lib/site-packages/grpc/_runtime_protos.py new file mode 100644 index 0000000000000000000000000000000000000000..d32de5726eb068d252735445c475c502d2aa7526 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_runtime_protos.py @@ -0,0 +1,165 @@ +# Copyright 2020 The gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import types +from typing import Tuple, Union + +_REQUIRED_SYMBOLS = ("_protos", "_services", "_protos_and_services") +_MINIMUM_VERSION = (3, 5, 0) + +_UNINSTALLED_TEMPLATE = ( + "Install the grpcio-tools package (1.32.0+) to use the {} function." +) +_VERSION_ERROR_TEMPLATE = ( + "The {} function is only on available on Python 3.X interpreters." +) + + +def _has_runtime_proto_symbols(mod: types.ModuleType) -> bool: + return all(hasattr(mod, sym) for sym in _REQUIRED_SYMBOLS) + + +def _is_grpc_tools_importable() -> bool: + try: + import grpc_tools # pylint: disable=unused-import # pytype: disable=import-error + + return True + except ImportError as e: + # NOTE: It's possible that we're encountering a transitive ImportError, so + # we check for that and re-raise if so. + if "grpc_tools" not in e.args[0]: + raise + return False + + +def _call_with_lazy_import( + fn_name: str, protobuf_path: str +) -> Union[types.ModuleType, Tuple[types.ModuleType, types.ModuleType]]: + """Calls one of the three functions, lazily importing grpc_tools. + + Args: + fn_name: The name of the function to import from grpc_tools.protoc. + protobuf_path: The path to import. + + Returns: + The appropriate module object. + """ + if sys.version_info < _MINIMUM_VERSION: + raise NotImplementedError(_VERSION_ERROR_TEMPLATE.format(fn_name)) + else: + if not _is_grpc_tools_importable(): + raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name)) + import grpc_tools.protoc # pytype: disable=import-error + + if _has_runtime_proto_symbols(grpc_tools.protoc): + fn = getattr(grpc_tools.protoc, "_" + fn_name) + return fn(protobuf_path) + else: + raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name)) + + +def protos(protobuf_path): # pylint: disable=unused-argument + """Returns a module generated by the indicated .proto file. + + THIS IS AN EXPERIMENTAL API. + + Use this function to retrieve classes corresponding to message + definitions in the .proto file. + + To inspect the contents of the returned module, use the dir function. + For example: + + ``` + protos = grpc.protos("foo.proto") + print(dir(protos)) + ``` + + The returned module object corresponds to the _pb2.py file generated + by protoc. The path is expected to be relative to an entry on sys.path + and all transitive dependencies of the file should also be resolveable + from an entry on sys.path. + + To completely disable the machinery behind this function, set the + GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true". + + Args: + protobuf_path: The path to the .proto file on the filesystem. This path + must be resolveable from an entry on sys.path and so must all of its + transitive dependencies. + + Returns: + A module object corresponding to the message code for the indicated + .proto file. Equivalent to a generated _pb2.py file. + """ + return _call_with_lazy_import("protos", protobuf_path) + + +def services(protobuf_path): # pylint: disable=unused-argument + """Returns a module generated by the indicated .proto file. + + THIS IS AN EXPERIMENTAL API. + + Use this function to retrieve classes and functions corresponding to + service definitions in the .proto file, including both stub and servicer + definitions. + + To inspect the contents of the returned module, use the dir function. + For example: + + ``` + services = grpc.services("foo.proto") + print(dir(services)) + ``` + + The returned module object corresponds to the _pb2_grpc.py file generated + by protoc. The path is expected to be relative to an entry on sys.path + and all transitive dependencies of the file should also be resolveable + from an entry on sys.path. + + To completely disable the machinery behind this function, set the + GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true". + + Args: + protobuf_path: The path to the .proto file on the filesystem. This path + must be resolveable from an entry on sys.path and so must all of its + transitive dependencies. + + Returns: + A module object corresponding to the stub/service code for the indicated + .proto file. Equivalent to a generated _pb2_grpc.py file. + """ + return _call_with_lazy_import("services", protobuf_path) + + +def protos_and_services(protobuf_path): # pylint: disable=unused-argument + """Returns a 2-tuple of modules corresponding to protos and services. + + THIS IS AN EXPERIMENTAL API. + + The return value of this function is equivalent to a call to protos and a + call to services. + + To completely disable the machinery behind this function, set the + GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true". + + Args: + protobuf_path: The path to the .proto file on the filesystem. This path + must be resolveable from an entry on sys.path and so must all of its + transitive dependencies. + + Returns: + A 2-tuple of module objects corresponding to (protos(path), services(path)). + """ + return _call_with_lazy_import("protos_and_services", protobuf_path) diff --git a/MLPY/Lib/site-packages/grpc/_server.py b/MLPY/Lib/site-packages/grpc/_server.py new file mode 100644 index 0000000000000000000000000000000000000000..8348751635cc967443490671e76675e4fc6308ae --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_server.py @@ -0,0 +1,1528 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Service-side implementation of gRPC Python.""" + +from __future__ import annotations + +import abc +import collections +from concurrent import futures +import contextvars +import enum +import logging +import threading +import time +import traceback +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import grpc # pytype: disable=pyi-error +from grpc import _common # pytype: disable=pyi-error +from grpc import _compression # pytype: disable=pyi-error +from grpc import _interceptor # pytype: disable=pyi-error +from grpc import _observability # pytype: disable=pyi-error +from grpc._cython import cygrpc +from grpc._typing import ArityAgnosticMethodHandler +from grpc._typing import ChannelArgumentType +from grpc._typing import DeserializingFunction +from grpc._typing import MetadataType +from grpc._typing import NullaryCallbackType +from grpc._typing import ResponseType +from grpc._typing import SerializingFunction +from grpc._typing import ServerCallbackTag +from grpc._typing import ServerTagCallbackType + +_LOGGER = logging.getLogger(__name__) + +_SHUTDOWN_TAG = "shutdown" +_REQUEST_CALL_TAG = "request_call" + +_RECEIVE_CLOSE_ON_SERVER_TOKEN = "receive_close_on_server" +_SEND_INITIAL_METADATA_TOKEN = "send_initial_metadata" +_RECEIVE_MESSAGE_TOKEN = "receive_message" +_SEND_MESSAGE_TOKEN = "send_message" +_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = ( + "send_initial_metadata * send_message" +) +_SEND_STATUS_FROM_SERVER_TOKEN = "send_status_from_server" +_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = ( + "send_initial_metadata * send_status_from_server" +) + +_OPEN = "open" +_CLOSED = "closed" +_CANCELLED = "cancelled" + +_EMPTY_FLAGS = 0 + +_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0 +_INF_TIMEOUT = 1e9 + + +def _serialized_request(request_event: cygrpc.BaseEvent) -> bytes: + return request_event.batch_operations[0].message() + + +def _application_code(code: grpc.StatusCode) -> cygrpc.StatusCode: + cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code) + return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code + + +def _completion_code(state: _RPCState) -> cygrpc.StatusCode: + if state.code is None: + return cygrpc.StatusCode.ok + else: + return _application_code(state.code) + + +def _abortion_code( + state: _RPCState, code: cygrpc.StatusCode +) -> cygrpc.StatusCode: + if state.code is None: + return code + else: + return _application_code(state.code) + + +def _details(state: _RPCState) -> bytes: + return b"" if state.details is None else state.details + + +class _HandlerCallDetails( + collections.namedtuple( + "_HandlerCallDetails", + ( + "method", + "invocation_metadata", + ), + ), + grpc.HandlerCallDetails, +): + pass + + +class _Method(abc.ABC): + @abc.abstractmethod + def name(self) -> Optional[str]: + raise NotImplementedError() + + @abc.abstractmethod + def handler( + self, handler_call_details: _HandlerCallDetails + ) -> Optional[grpc.RpcMethodHandler]: + raise NotImplementedError() + + +class _RegisteredMethod(_Method): + def __init__( + self, + name: str, + registered_handler: Optional[grpc.RpcMethodHandler], + ): + self._name = name + self._registered_handler = registered_handler + + def name(self) -> Optional[str]: + return self._name + + def handler( + self, handler_call_details: _HandlerCallDetails + ) -> Optional[grpc.RpcMethodHandler]: + return self._registered_handler + + +class _GenericMethod(_Method): + def __init__( + self, + generic_handlers: List[grpc.GenericRpcHandler], + ): + self._generic_handlers = generic_handlers + + def name(self) -> Optional[str]: + return None + + def handler( + self, handler_call_details: _HandlerCallDetails + ) -> Optional[grpc.RpcMethodHandler]: + # If the same method have both generic and registered handler, + # registered handler will take precedence. + for generic_handler in self._generic_handlers: + method_handler = generic_handler.service(handler_call_details) + if method_handler is not None: + return method_handler + return None + + +class _RPCState(object): + context: contextvars.Context + condition: threading.Condition + due = Set[str] + request: Any + client: str + initial_metadata_allowed: bool + compression_algorithm: Optional[grpc.Compression] + disable_next_compression: bool + trailing_metadata: Optional[MetadataType] + code: Optional[grpc.StatusCode] + details: Optional[bytes] + statused: bool + rpc_errors: List[Exception] + callbacks: Optional[List[NullaryCallbackType]] + aborted: bool + + def __init__(self): + self.context = contextvars.Context() + self.condition = threading.Condition() + self.due = set() + self.request = None + self.client = _OPEN + self.initial_metadata_allowed = True + self.compression_algorithm = None + self.disable_next_compression = False + self.trailing_metadata = None + self.code = None + self.details = None + self.statused = False + self.rpc_errors = [] + self.callbacks = [] + self.aborted = False + + +def _raise_rpc_error(state: _RPCState) -> None: + rpc_error = grpc.RpcError() + state.rpc_errors.append(rpc_error) + raise rpc_error + + +def _possibly_finish_call( + state: _RPCState, token: str +) -> ServerTagCallbackType: + state.due.remove(token) + if not _is_rpc_state_active(state) and not state.due: + callbacks = state.callbacks + state.callbacks = None + return state, callbacks + else: + return None, () + + +def _send_status_from_server(state: _RPCState, token: str) -> ServerCallbackTag: + def send_status_from_server(unused_send_status_from_server_event): + with state.condition: + return _possibly_finish_call(state, token) + + return send_status_from_server + + +def _get_initial_metadata( + state: _RPCState, metadata: Optional[MetadataType] +) -> Optional[MetadataType]: + with state.condition: + if state.compression_algorithm: + compression_metadata = ( + _compression.compression_algorithm_to_metadata( + state.compression_algorithm + ), + ) + if metadata is None: + return compression_metadata + else: + return compression_metadata + tuple(metadata) + else: + return metadata + + +def _get_initial_metadata_operation( + state: _RPCState, metadata: Optional[MetadataType] +) -> cygrpc.Operation: + operation = cygrpc.SendInitialMetadataOperation( + _get_initial_metadata(state, metadata), _EMPTY_FLAGS + ) + return operation + + +def _abort( + state: _RPCState, call: cygrpc.Call, code: cygrpc.StatusCode, details: bytes +) -> None: + if state.client is not _CANCELLED: + effective_code = _abortion_code(state, code) + effective_details = details if state.details is None else state.details + if state.initial_metadata_allowed: + operations = ( + _get_initial_metadata_operation(state, None), + cygrpc.SendStatusFromServerOperation( + state.trailing_metadata, + effective_code, + effective_details, + _EMPTY_FLAGS, + ), + ) + token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN + else: + operations = ( + cygrpc.SendStatusFromServerOperation( + state.trailing_metadata, + effective_code, + effective_details, + _EMPTY_FLAGS, + ), + ) + token = _SEND_STATUS_FROM_SERVER_TOKEN + call.start_server_batch( + operations, _send_status_from_server(state, token) + ) + state.statused = True + state.due.add(token) + + +def _receive_close_on_server(state: _RPCState) -> ServerCallbackTag: + def receive_close_on_server(receive_close_on_server_event): + with state.condition: + if receive_close_on_server_event.batch_operations[0].cancelled(): + state.client = _CANCELLED + elif state.client is _OPEN: + state.client = _CLOSED + state.condition.notify_all() + return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN) + + return receive_close_on_server + + +def _receive_message( + state: _RPCState, + call: cygrpc.Call, + request_deserializer: Optional[DeserializingFunction], +) -> ServerCallbackTag: + def receive_message(receive_message_event): + serialized_request = _serialized_request(receive_message_event) + if serialized_request is None: + with state.condition: + if state.client is _OPEN: + state.client = _CLOSED + state.condition.notify_all() + return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN) + else: + request = _common.deserialize( + serialized_request, request_deserializer + ) + with state.condition: + if request is None: + _abort( + state, + call, + cygrpc.StatusCode.internal, + b"Exception deserializing request!", + ) + else: + state.request = request + state.condition.notify_all() + return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN) + + return receive_message + + +def _send_initial_metadata(state: _RPCState) -> ServerCallbackTag: + def send_initial_metadata(unused_send_initial_metadata_event): + with state.condition: + return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN) + + return send_initial_metadata + + +def _send_message(state: _RPCState, token: str) -> ServerCallbackTag: + def send_message(unused_send_message_event): + with state.condition: + state.condition.notify_all() + return _possibly_finish_call(state, token) + + return send_message + + +class _Context(grpc.ServicerContext): + _rpc_event: cygrpc.BaseEvent + _state: _RPCState + request_deserializer: Optional[DeserializingFunction] + + def __init__( + self, + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + request_deserializer: Optional[DeserializingFunction], + ): + self._rpc_event = rpc_event + self._state = state + self._request_deserializer = request_deserializer + + def is_active(self) -> bool: + with self._state.condition: + return _is_rpc_state_active(self._state) + + def time_remaining(self) -> float: + return max(self._rpc_event.call_details.deadline - time.time(), 0) + + def cancel(self) -> None: + self._rpc_event.call.cancel() + + def add_callback(self, callback: NullaryCallbackType) -> bool: + with self._state.condition: + if self._state.callbacks is None: + return False + else: + self._state.callbacks.append(callback) + return True + + def disable_next_message_compression(self) -> None: + with self._state.condition: + self._state.disable_next_compression = True + + def invocation_metadata(self) -> Optional[MetadataType]: + return self._rpc_event.invocation_metadata + + def peer(self) -> str: + return _common.decode(self._rpc_event.call.peer()) + + def peer_identities(self) -> Optional[Sequence[bytes]]: + return cygrpc.peer_identities(self._rpc_event.call) + + def peer_identity_key(self) -> Optional[str]: + id_key = cygrpc.peer_identity_key(self._rpc_event.call) + return id_key if id_key is None else _common.decode(id_key) + + def auth_context(self) -> Mapping[str, Sequence[bytes]]: + auth_context = cygrpc.auth_context(self._rpc_event.call) + auth_context_dict = {} if auth_context is None else auth_context + return { + _common.decode(key): value + for key, value in auth_context_dict.items() + } + + def set_compression(self, compression: grpc.Compression) -> None: + with self._state.condition: + self._state.compression_algorithm = compression + + def send_initial_metadata(self, initial_metadata: MetadataType) -> None: + with self._state.condition: + if self._state.client is _CANCELLED: + _raise_rpc_error(self._state) + else: + if self._state.initial_metadata_allowed: + operation = _get_initial_metadata_operation( + self._state, initial_metadata + ) + self._rpc_event.call.start_server_batch( + (operation,), _send_initial_metadata(self._state) + ) + self._state.initial_metadata_allowed = False + self._state.due.add(_SEND_INITIAL_METADATA_TOKEN) + else: + raise ValueError("Initial metadata no longer allowed!") + + def set_trailing_metadata(self, trailing_metadata: MetadataType) -> None: + with self._state.condition: + self._state.trailing_metadata = trailing_metadata + + def trailing_metadata(self) -> Optional[MetadataType]: + return self._state.trailing_metadata + + def abort(self, code: grpc.StatusCode, details: str) -> None: + # treat OK like other invalid arguments: fail the RPC + if code == grpc.StatusCode.OK: + _LOGGER.error( + "abort() called with StatusCode.OK; returning UNKNOWN" + ) + code = grpc.StatusCode.UNKNOWN + details = "" + with self._state.condition: + self._state.code = code + self._state.details = _common.encode(details) + self._state.aborted = True + raise Exception() + + def abort_with_status(self, status: grpc.Status) -> None: + self._state.trailing_metadata = status.trailing_metadata + self.abort(status.code, status.details) + + def set_code(self, code: grpc.StatusCode) -> None: + with self._state.condition: + self._state.code = code + + def code(self) -> grpc.StatusCode: + return self._state.code + + def set_details(self, details: str) -> None: + with self._state.condition: + self._state.details = _common.encode(details) + + def details(self) -> bytes: + return self._state.details + + def _finalize_state(self) -> None: + pass + + +class _RequestIterator(object): + _state: _RPCState + _call: cygrpc.Call + _request_deserializer: Optional[DeserializingFunction] + + def __init__( + self, + state: _RPCState, + call: cygrpc.Call, + request_deserializer: Optional[DeserializingFunction], + ): + self._state = state + self._call = call + self._request_deserializer = request_deserializer + + def _raise_or_start_receive_message(self) -> None: + if self._state.client is _CANCELLED: + _raise_rpc_error(self._state) + elif not _is_rpc_state_active(self._state): + raise StopIteration() + else: + self._call.start_server_batch( + (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), + _receive_message( + self._state, self._call, self._request_deserializer + ), + ) + self._state.due.add(_RECEIVE_MESSAGE_TOKEN) + + def _look_for_request(self) -> Any: + if self._state.client is _CANCELLED: + _raise_rpc_error(self._state) + elif ( + self._state.request is None + and _RECEIVE_MESSAGE_TOKEN not in self._state.due + ): + raise StopIteration() + else: + request = self._state.request + self._state.request = None + return request + + raise AssertionError() # should never run + + def _next(self) -> Any: + with self._state.condition: + self._raise_or_start_receive_message() + while True: + self._state.condition.wait() + request = self._look_for_request() + if request is not None: + return request + + def __iter__(self) -> _RequestIterator: + return self + + def __next__(self) -> Any: + return self._next() + + def next(self) -> Any: + return self._next() + + +def _unary_request( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + request_deserializer: Optional[DeserializingFunction], +) -> Callable[[], Any]: + def unary_request(): + with state.condition: + if not _is_rpc_state_active(state): + return None + else: + rpc_event.call.start_server_batch( + (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), + _receive_message( + state, rpc_event.call, request_deserializer + ), + ) + state.due.add(_RECEIVE_MESSAGE_TOKEN) + while True: + state.condition.wait() + if state.request is None: + if state.client is _CLOSED: + details = '"{}" requires exactly one request message.'.format( + rpc_event.call_details.method + ) + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.unimplemented, + _common.encode(details), + ) + return None + elif state.client is _CANCELLED: + return None + else: + request = state.request + state.request = None + return request + + return unary_request + + +def _call_behavior( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + behavior: ArityAgnosticMethodHandler, + argument: Any, + request_deserializer: Optional[DeserializingFunction], + send_response_callback: Optional[Callable[[ResponseType], None]] = None, +) -> Tuple[Union[ResponseType, Iterator[ResponseType]], bool]: + from grpc import _create_servicer_context # pytype: disable=pyi-error + + with _create_servicer_context( + rpc_event, state, request_deserializer + ) as context: + try: + response_or_iterator = None + if send_response_callback is not None: + response_or_iterator = behavior( + argument, context, send_response_callback + ) + else: + response_or_iterator = behavior(argument, context) + return response_or_iterator, True + except Exception as exception: # pylint: disable=broad-except + with state.condition: + if state.aborted: + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.unknown, + b"RPC Aborted", + ) + elif exception not in state.rpc_errors: + try: + details = "Exception calling application: {}".format( + exception + ) + except Exception: # pylint: disable=broad-except + details = ( + "Calling application raised unprintable Exception!" + ) + _LOGGER.exception( + traceback.format_exception( + type(exception), + exception, + exception.__traceback__, + ) + ) + traceback.print_exc() + _LOGGER.exception(details) + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.unknown, + _common.encode(details), + ) + return None, False + + +def _take_response_from_response_iterator( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + response_iterator: Iterator[ResponseType], +) -> Tuple[ResponseType, bool]: + try: + return next(response_iterator), True + except StopIteration: + return None, True + except Exception as exception: # pylint: disable=broad-except + with state.condition: + if state.aborted: + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.unknown, + b"RPC Aborted", + ) + elif exception not in state.rpc_errors: + details = "Exception iterating responses: {}".format(exception) + _LOGGER.exception(details) + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.unknown, + _common.encode(details), + ) + return None, False + + +def _serialize_response( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + response: Any, + response_serializer: Optional[SerializingFunction], +) -> Optional[bytes]: + serialized_response = _common.serialize(response, response_serializer) + if serialized_response is None: + with state.condition: + _abort( + state, + rpc_event.call, + cygrpc.StatusCode.internal, + b"Failed to serialize response!", + ) + return None + else: + return serialized_response + + +def _get_send_message_op_flags_from_state( + state: _RPCState, +) -> Union[int, cygrpc.WriteFlag]: + if state.disable_next_compression: + return cygrpc.WriteFlag.no_compress + else: + return _EMPTY_FLAGS + + +def _reset_per_message_state(state: _RPCState) -> None: + with state.condition: + state.disable_next_compression = False + + +def _send_response( + rpc_event: cygrpc.BaseEvent, state: _RPCState, serialized_response: bytes +) -> bool: + with state.condition: + if not _is_rpc_state_active(state): + return False + else: + if state.initial_metadata_allowed: + operations = ( + _get_initial_metadata_operation(state, None), + cygrpc.SendMessageOperation( + serialized_response, + _get_send_message_op_flags_from_state(state), + ), + ) + state.initial_metadata_allowed = False + token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN + else: + operations = ( + cygrpc.SendMessageOperation( + serialized_response, + _get_send_message_op_flags_from_state(state), + ), + ) + token = _SEND_MESSAGE_TOKEN + rpc_event.call.start_server_batch( + operations, _send_message(state, token) + ) + state.due.add(token) + _reset_per_message_state(state) + while True: + state.condition.wait() + if token not in state.due: + return _is_rpc_state_active(state) + + +def _status( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + serialized_response: Optional[bytes], +) -> None: + with state.condition: + if state.client is not _CANCELLED: + code = _completion_code(state) + details = _details(state) + operations = [ + cygrpc.SendStatusFromServerOperation( + state.trailing_metadata, code, details, _EMPTY_FLAGS + ), + ] + if state.initial_metadata_allowed: + operations.append(_get_initial_metadata_operation(state, None)) + if serialized_response is not None: + operations.append( + cygrpc.SendMessageOperation( + serialized_response, + _get_send_message_op_flags_from_state(state), + ) + ) + rpc_event.call.start_server_batch( + operations, + _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN), + ) + state.statused = True + _reset_per_message_state(state) + state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN) + + +def _unary_response_in_pool( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + behavior: ArityAgnosticMethodHandler, + argument_thunk: Callable[[], Any], + request_deserializer: Optional[SerializingFunction], + response_serializer: Optional[SerializingFunction], +) -> None: + cygrpc.install_context_from_request_call_event(rpc_event) + + try: + argument = argument_thunk() + if argument is not None: + response, proceed = _call_behavior( + rpc_event, state, behavior, argument, request_deserializer + ) + if proceed: + serialized_response = _serialize_response( + rpc_event, state, response, response_serializer + ) + if serialized_response is not None: + _status(rpc_event, state, serialized_response) + except Exception: # pylint: disable=broad-except + traceback.print_exc() + finally: + cygrpc.uninstall_context() + + +def _stream_response_in_pool( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + behavior: ArityAgnosticMethodHandler, + argument_thunk: Callable[[], Any], + request_deserializer: Optional[DeserializingFunction], + response_serializer: Optional[SerializingFunction], +) -> None: + cygrpc.install_context_from_request_call_event(rpc_event) + + def send_response(response: Any) -> None: + if response is None: + _status(rpc_event, state, None) + else: + serialized_response = _serialize_response( + rpc_event, state, response, response_serializer + ) + if serialized_response is not None: + _send_response(rpc_event, state, serialized_response) + + try: + argument = argument_thunk() + if argument is not None: + if ( + hasattr(behavior, "experimental_non_blocking") + and behavior.experimental_non_blocking + ): + _call_behavior( + rpc_event, + state, + behavior, + argument, + request_deserializer, + send_response_callback=send_response, + ) + else: + response_iterator, proceed = _call_behavior( + rpc_event, state, behavior, argument, request_deserializer + ) + if proceed: + _send_message_callback_to_blocking_iterator_adapter( + rpc_event, state, send_response, response_iterator + ) + except Exception: # pylint: disable=broad-except + traceback.print_exc() + finally: + cygrpc.uninstall_context() + + +def _is_rpc_state_active(state: _RPCState) -> bool: + return state.client is not _CANCELLED and not state.statused + + +def _send_message_callback_to_blocking_iterator_adapter( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + send_response_callback: Callable[[ResponseType], None], + response_iterator: Iterator[ResponseType], +) -> None: + while True: + response, proceed = _take_response_from_response_iterator( + rpc_event, state, response_iterator + ) + if proceed: + send_response_callback(response) + if not _is_rpc_state_active(state): + break + else: + break + + +def _select_thread_pool_for_behavior( + behavior: ArityAgnosticMethodHandler, + default_thread_pool: futures.ThreadPoolExecutor, +) -> futures.ThreadPoolExecutor: + if hasattr(behavior, "experimental_thread_pool") and isinstance( + behavior.experimental_thread_pool, futures.ThreadPoolExecutor + ): + return behavior.experimental_thread_pool + else: + return default_thread_pool + + +def _handle_unary_unary( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_handler: grpc.RpcMethodHandler, + default_thread_pool: futures.ThreadPoolExecutor, +) -> futures.Future: + unary_request = _unary_request( + rpc_event, state, method_handler.request_deserializer + ) + thread_pool = _select_thread_pool_for_behavior( + method_handler.unary_unary, default_thread_pool + ) + return thread_pool.submit( + state.context.run, + _unary_response_in_pool, + rpc_event, + state, + method_handler.unary_unary, + unary_request, + method_handler.request_deserializer, + method_handler.response_serializer, + ) + + +def _handle_unary_stream( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_handler: grpc.RpcMethodHandler, + default_thread_pool: futures.ThreadPoolExecutor, +) -> futures.Future: + unary_request = _unary_request( + rpc_event, state, method_handler.request_deserializer + ) + thread_pool = _select_thread_pool_for_behavior( + method_handler.unary_stream, default_thread_pool + ) + return thread_pool.submit( + state.context.run, + _stream_response_in_pool, + rpc_event, + state, + method_handler.unary_stream, + unary_request, + method_handler.request_deserializer, + method_handler.response_serializer, + ) + + +def _handle_stream_unary( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_handler: grpc.RpcMethodHandler, + default_thread_pool: futures.ThreadPoolExecutor, +) -> futures.Future: + request_iterator = _RequestIterator( + state, rpc_event.call, method_handler.request_deserializer + ) + thread_pool = _select_thread_pool_for_behavior( + method_handler.stream_unary, default_thread_pool + ) + return thread_pool.submit( + state.context.run, + _unary_response_in_pool, + rpc_event, + state, + method_handler.stream_unary, + lambda: request_iterator, + method_handler.request_deserializer, + method_handler.response_serializer, + ) + + +def _handle_stream_stream( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_handler: grpc.RpcMethodHandler, + default_thread_pool: futures.ThreadPoolExecutor, +) -> futures.Future: + request_iterator = _RequestIterator( + state, rpc_event.call, method_handler.request_deserializer + ) + thread_pool = _select_thread_pool_for_behavior( + method_handler.stream_stream, default_thread_pool + ) + return thread_pool.submit( + state.context.run, + _stream_response_in_pool, + rpc_event, + state, + method_handler.stream_stream, + lambda: request_iterator, + method_handler.request_deserializer, + method_handler.response_serializer, + ) + + +def _find_method_handler( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_with_handler: _Method, + interceptor_pipeline: Optional[_interceptor._ServicePipeline], +) -> Optional[grpc.RpcMethodHandler]: + def query_handlers( + handler_call_details: _HandlerCallDetails, + ) -> Optional[grpc.RpcMethodHandler]: + return method_with_handler.handler(handler_call_details) + + method_name = method_with_handler.name() + if not method_name: + method_name = _common.decode(rpc_event.call_details.method) + + handler_call_details = _HandlerCallDetails( + method_name, + rpc_event.invocation_metadata, + ) + + if interceptor_pipeline is not None: + return state.context.run( + interceptor_pipeline.execute, query_handlers, handler_call_details + ) + else: + return state.context.run(query_handlers, handler_call_details) + + +def _reject_rpc( + rpc_event: cygrpc.BaseEvent, + rpc_state: _RPCState, + status: cygrpc.StatusCode, + details: bytes, +): + operations = ( + _get_initial_metadata_operation(rpc_state, None), + cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS), + cygrpc.SendStatusFromServerOperation( + None, status, details, _EMPTY_FLAGS + ), + ) + rpc_event.call.start_server_batch( + operations, + lambda ignored_event: ( + rpc_state, + (), + ), + ) + + +def _handle_with_method_handler( + rpc_event: cygrpc.BaseEvent, + state: _RPCState, + method_handler: grpc.RpcMethodHandler, + thread_pool: futures.ThreadPoolExecutor, +) -> futures.Future: + with state.condition: + rpc_event.call.start_server_batch( + (cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),), + _receive_close_on_server(state), + ) + state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN) + if method_handler.request_streaming: + if method_handler.response_streaming: + return _handle_stream_stream( + rpc_event, state, method_handler, thread_pool + ) + else: + return _handle_stream_unary( + rpc_event, state, method_handler, thread_pool + ) + else: + if method_handler.response_streaming: + return _handle_unary_stream( + rpc_event, state, method_handler, thread_pool + ) + else: + return _handle_unary_unary( + rpc_event, state, method_handler, thread_pool + ) + + +def _handle_call( + rpc_event: cygrpc.BaseEvent, + method_with_handler: _Method, + interceptor_pipeline: Optional[_interceptor._ServicePipeline], + thread_pool: futures.ThreadPoolExecutor, + concurrency_exceeded: bool, +) -> Tuple[Optional[_RPCState], Optional[futures.Future]]: + """Handles RPC based on provided handlers. + + When receiving a call event from Core, registered method will have it's + name as tag, we pass the tag as registered_method_name to this method, + then we can find the handler in registered_method_handlers based on + the method name. + + For call event with unregistered method, the method name will be included + in rpc_event.call_details.method and we need to query the generics handlers + to find the actual handler. + """ + if not rpc_event.success: + return None, None + if rpc_event.call_details.method or method_with_handler.name(): + rpc_state = _RPCState() + try: + method_handler = _find_method_handler( + rpc_event, + rpc_state, + method_with_handler, + interceptor_pipeline, + ) + except Exception as exception: # pylint: disable=broad-except + details = "Exception servicing handler: {}".format(exception) + _LOGGER.exception(details) + _reject_rpc( + rpc_event, + rpc_state, + cygrpc.StatusCode.unknown, + b"Error in service handler!", + ) + return rpc_state, None + if method_handler is None: + _reject_rpc( + rpc_event, + rpc_state, + cygrpc.StatusCode.unimplemented, + b"Method not found!", + ) + return rpc_state, None + elif concurrency_exceeded: + _reject_rpc( + rpc_event, + rpc_state, + cygrpc.StatusCode.resource_exhausted, + b"Concurrent RPC limit exceeded!", + ) + return rpc_state, None + else: + return ( + rpc_state, + _handle_with_method_handler( + rpc_event, rpc_state, method_handler, thread_pool + ), + ) + else: + return None, None + + +@enum.unique +class _ServerStage(enum.Enum): + STOPPED = "stopped" + STARTED = "started" + GRACE = "grace" + + +class _ServerState(object): + lock: threading.RLock + completion_queue: cygrpc.CompletionQueue + server: cygrpc.Server + generic_handlers: List[grpc.GenericRpcHandler] + registered_method_handlers: Dict[str, grpc.RpcMethodHandler] + interceptor_pipeline: Optional[_interceptor._ServicePipeline] + thread_pool: futures.ThreadPoolExecutor + stage: _ServerStage + termination_event: threading.Event + shutdown_events: List[threading.Event] + maximum_concurrent_rpcs: Optional[int] + active_rpc_count: int + rpc_states: Set[_RPCState] + due: Set[str] + server_deallocated: bool + + # pylint: disable=too-many-arguments + def __init__( + self, + completion_queue: cygrpc.CompletionQueue, + server: cygrpc.Server, + generic_handlers: Sequence[grpc.GenericRpcHandler], + interceptor_pipeline: Optional[_interceptor._ServicePipeline], + thread_pool: futures.ThreadPoolExecutor, + maximum_concurrent_rpcs: Optional[int], + ): + self.lock = threading.RLock() + self.completion_queue = completion_queue + self.server = server + self.generic_handlers = list(generic_handlers) + self.interceptor_pipeline = interceptor_pipeline + self.thread_pool = thread_pool + self.stage = _ServerStage.STOPPED + self.termination_event = threading.Event() + self.shutdown_events = [self.termination_event] + self.maximum_concurrent_rpcs = maximum_concurrent_rpcs + self.active_rpc_count = 0 + self.registered_method_handlers = {} + + # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields. + self.rpc_states = set() + self.due = set() + + # A "volatile" flag to interrupt the daemon serving thread + self.server_deallocated = False + + +def _add_generic_handlers( + state: _ServerState, generic_handlers: Iterable[grpc.GenericRpcHandler] +) -> None: + with state.lock: + state.generic_handlers.extend(generic_handlers) + + +def _add_registered_method_handlers( + state: _ServerState, method_handlers: Dict[str, grpc.RpcMethodHandler] +) -> None: + with state.lock: + state.registered_method_handlers.update(method_handlers) + + +def _add_insecure_port(state: _ServerState, address: bytes) -> int: + with state.lock: + return state.server.add_http2_port(address) + + +def _add_secure_port( + state: _ServerState, + address: bytes, + server_credentials: grpc.ServerCredentials, +) -> int: + with state.lock: + return state.server.add_http2_port( + address, server_credentials._credentials + ) + + +def _request_call(state: _ServerState) -> None: + state.server.request_call( + state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG + ) + state.due.add(_REQUEST_CALL_TAG) + + +def _request_registered_call(state: _ServerState, method: str) -> None: + registered_call_tag = method + state.server.request_registered_call( + state.completion_queue, + state.completion_queue, + method, + registered_call_tag, + ) + state.due.add(registered_call_tag) + + +# TODO(https://github.com/grpc/grpc/issues/6597): delete this function. +def _stop_serving(state: _ServerState) -> bool: + if not state.rpc_states and not state.due: + state.server.destroy() + for shutdown_event in state.shutdown_events: + shutdown_event.set() + state.stage = _ServerStage.STOPPED + return True + else: + return False + + +def _on_call_completed(state: _ServerState) -> None: + with state.lock: + state.active_rpc_count -= 1 + + +# pylint: disable=too-many-branches +def _process_event_and_continue( + state: _ServerState, event: cygrpc.BaseEvent +) -> bool: + should_continue = True + if event.tag is _SHUTDOWN_TAG: + with state.lock: + state.due.remove(_SHUTDOWN_TAG) + if _stop_serving(state): + should_continue = False + elif ( + event.tag is _REQUEST_CALL_TAG + or event.tag in state.registered_method_handlers.keys() + ): + registered_method_name = None + if event.tag in state.registered_method_handlers.keys(): + registered_method_name = event.tag + method_with_handler = _RegisteredMethod( + registered_method_name, + state.registered_method_handlers.get( + registered_method_name, None + ), + ) + else: + method_with_handler = _GenericMethod( + state.generic_handlers, + ) + with state.lock: + state.due.remove(event.tag) + concurrency_exceeded = ( + state.maximum_concurrent_rpcs is not None + and state.active_rpc_count >= state.maximum_concurrent_rpcs + ) + rpc_state, rpc_future = _handle_call( + event, + method_with_handler, + state.interceptor_pipeline, + state.thread_pool, + concurrency_exceeded, + ) + if rpc_state is not None: + state.rpc_states.add(rpc_state) + if rpc_future is not None: + state.active_rpc_count += 1 + rpc_future.add_done_callback( + lambda unused_future: _on_call_completed(state) + ) + if state.stage is _ServerStage.STARTED: + if ( + registered_method_name + in state.registered_method_handlers.keys() + ): + _request_registered_call(state, registered_method_name) + else: + _request_call(state) + elif _stop_serving(state): + should_continue = False + else: + rpc_state, callbacks = event.tag(event) + for callback in callbacks: + try: + callback() + except Exception: # pylint: disable=broad-except + _LOGGER.exception("Exception calling callback!") + if rpc_state is not None: + with state.lock: + state.rpc_states.remove(rpc_state) + if _stop_serving(state): + should_continue = False + return should_continue + + +def _serve(state: _ServerState) -> None: + while True: + timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S + event = state.completion_queue.poll(timeout) + if state.server_deallocated: + _begin_shutdown_once(state) + if event.completion_type != cygrpc.CompletionType.queue_timeout: + if not _process_event_and_continue(state, event): + return + # We want to force the deletion of the previous event + # ~before~ we poll again; if the event has a reference + # to a shutdown Call object, this can induce spinlock. + event = None + + +def _begin_shutdown_once(state: _ServerState) -> None: + with state.lock: + if state.stage is _ServerStage.STARTED: + state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG) + state.stage = _ServerStage.GRACE + state.due.add(_SHUTDOWN_TAG) + + +def _stop(state: _ServerState, grace: Optional[float]) -> threading.Event: + with state.lock: + if state.stage is _ServerStage.STOPPED: + shutdown_event = threading.Event() + shutdown_event.set() + return shutdown_event + else: + _begin_shutdown_once(state) + shutdown_event = threading.Event() + state.shutdown_events.append(shutdown_event) + if grace is None: + state.server.cancel_all_calls() + else: + + def cancel_all_calls_after_grace(): + shutdown_event.wait(timeout=grace) + with state.lock: + state.server.cancel_all_calls() + + thread = threading.Thread(target=cancel_all_calls_after_grace) + thread.start() + return shutdown_event + shutdown_event.wait() + return shutdown_event + + +def _start(state: _ServerState) -> None: + with state.lock: + if state.stage is not _ServerStage.STOPPED: + raise ValueError("Cannot start already-started server!") + state.server.start() + state.stage = _ServerStage.STARTED + # Request a call for each registered method so we can handle any of them. + for method in state.registered_method_handlers.keys(): + _request_registered_call(state, method) + # Also request a call for non-registered method. + _request_call(state) + thread = threading.Thread(target=_serve, args=(state,)) + thread.daemon = True + thread.start() + + +def _validate_generic_rpc_handlers( + generic_rpc_handlers: Iterable[grpc.GenericRpcHandler], +) -> None: + for generic_rpc_handler in generic_rpc_handlers: + service_attribute = getattr(generic_rpc_handler, "service", None) + if service_attribute is None: + raise AttributeError( + '"{}" must conform to grpc.GenericRpcHandler type but does ' + 'not have "service" method!'.format(generic_rpc_handler) + ) + + +def _augment_options( + base_options: Sequence[ChannelArgumentType], + compression: Optional[grpc.Compression], + xds: bool, +) -> Sequence[ChannelArgumentType]: + compression_option = _compression.create_channel_option(compression) + maybe_server_call_tracer_factory_option = ( + _observability.create_server_call_tracer_factory_option(xds) + ) + return ( + tuple(base_options) + + compression_option + + maybe_server_call_tracer_factory_option + ) + + +class _Server(grpc.Server): + _state: _ServerState + + # pylint: disable=too-many-arguments + def __init__( + self, + thread_pool: futures.ThreadPoolExecutor, + generic_handlers: Sequence[grpc.GenericRpcHandler], + interceptors: Sequence[grpc.ServerInterceptor], + options: Sequence[ChannelArgumentType], + maximum_concurrent_rpcs: Optional[int], + compression: Optional[grpc.Compression], + xds: bool, + ): + completion_queue = cygrpc.CompletionQueue() + server = cygrpc.Server(_augment_options(options, compression, xds), xds) + server.register_completion_queue(completion_queue) + self._state = _ServerState( + completion_queue, + server, + generic_handlers, + _interceptor.service_pipeline(interceptors), + thread_pool, + maximum_concurrent_rpcs, + ) + self._cy_server = server + + def add_generic_rpc_handlers( + self, generic_rpc_handlers: Iterable[grpc.GenericRpcHandler] + ) -> None: + _validate_generic_rpc_handlers(generic_rpc_handlers) + _add_generic_handlers(self._state, generic_rpc_handlers) + + def add_registered_method_handlers( + self, + service_name: str, + method_handlers: Dict[str, grpc.RpcMethodHandler], + ) -> None: + # Can't register method once server started. + with self._state.lock: + if self._state.stage is _ServerStage.STARTED: + return + + # TODO(xuanwn): We should validate method_handlers first. + method_to_handlers = { + _common.fully_qualified_method(service_name, method): method_handler + for method, method_handler in method_handlers.items() + } + for fully_qualified_method in method_to_handlers.keys(): + self._cy_server.register_method(fully_qualified_method) + _add_registered_method_handlers(self._state, method_to_handlers) + + def add_insecure_port(self, address: str) -> int: + return _common.validate_port_binding_result( + address, _add_insecure_port(self._state, _common.encode(address)) + ) + + def add_secure_port( + self, address: str, server_credentials: grpc.ServerCredentials + ) -> int: + return _common.validate_port_binding_result( + address, + _add_secure_port( + self._state, _common.encode(address), server_credentials + ), + ) + + def start(self) -> None: + _start(self._state) + + def wait_for_termination(self, timeout: Optional[float] = None) -> bool: + # NOTE(https://bugs.python.org/issue35935) + # Remove this workaround once threading.Event.wait() is working with + # CTRL+C across platforms. + return _common.wait( + self._state.termination_event.wait, + self._state.termination_event.is_set, + timeout=timeout, + ) + + def stop(self, grace: Optional[float]) -> threading.Event: + return _stop(self._state, grace) + + def __del__(self): + if hasattr(self, "_state"): + # We can not grab a lock in __del__(), so set a flag to signal the + # serving daemon thread (if it exists) to initiate shutdown. + self._state.server_deallocated = True + + +def create_server( + thread_pool: futures.ThreadPoolExecutor, + generic_rpc_handlers: Sequence[grpc.GenericRpcHandler], + interceptors: Sequence[grpc.ServerInterceptor], + options: Sequence[ChannelArgumentType], + maximum_concurrent_rpcs: Optional[int], + compression: Optional[grpc.Compression], + xds: bool, +) -> _Server: + _validate_generic_rpc_handlers(generic_rpc_handlers) + return _Server( + thread_pool, + generic_rpc_handlers, + interceptors, + options, + maximum_concurrent_rpcs, + compression, + xds, + ) diff --git a/MLPY/Lib/site-packages/grpc/_simple_stubs.py b/MLPY/Lib/site-packages/grpc/_simple_stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d288d6e9e9210d0dacf0934c5bc15c114db464 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_simple_stubs.py @@ -0,0 +1,588 @@ +# Copyright 2020 The gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Functions that obviate explicit stubs and explicit channels.""" + +import collections +import datetime +import logging +import os +import threading +from typing import ( + Any, + AnyStr, + Callable, + Dict, + Iterator, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +import grpc +from grpc.experimental import experimental_api + +RequestType = TypeVar("RequestType") +ResponseType = TypeVar("ResponseType") + +OptionsType = Sequence[Tuple[str, str]] +CacheKey = Tuple[ + str, + OptionsType, + Optional[grpc.ChannelCredentials], + Optional[grpc.Compression], +] + +_LOGGER = logging.getLogger(__name__) + +_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" +if _EVICTION_PERIOD_KEY in os.environ: + _EVICTION_PERIOD = datetime.timedelta( + seconds=float(os.environ[_EVICTION_PERIOD_KEY]) + ) + _LOGGER.debug( + "Setting managed channel eviction period to %s", _EVICTION_PERIOD + ) +else: + _EVICTION_PERIOD = datetime.timedelta(minutes=10) + +_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" +if _MAXIMUM_CHANNELS_KEY in os.environ: + _MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY]) + _LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS) +else: + _MAXIMUM_CHANNELS = 2**8 + +_DEFAULT_TIMEOUT_KEY = "GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS" +if _DEFAULT_TIMEOUT_KEY in os.environ: + _DEFAULT_TIMEOUT = float(os.environ[_DEFAULT_TIMEOUT_KEY]) + _LOGGER.debug("Setting default timeout seconds to %f", _DEFAULT_TIMEOUT) +else: + _DEFAULT_TIMEOUT = 60.0 + + +def _create_channel( + target: str, + options: Sequence[Tuple[str, str]], + channel_credentials: Optional[grpc.ChannelCredentials], + compression: Optional[grpc.Compression], +) -> grpc.Channel: + _LOGGER.debug( + f"Creating secure channel with credentials '{channel_credentials}', " + + f"options '{options}' and compression '{compression}'" + ) + return grpc.secure_channel( + target, + credentials=channel_credentials, + options=options, + compression=compression, + ) + + +class ChannelCache: + # NOTE(rbellevi): Untyped due to reference cycle. + _singleton = None + _lock: threading.RLock = threading.RLock() + _condition: threading.Condition = threading.Condition(lock=_lock) + _eviction_ready: threading.Event = threading.Event() + + _mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]] + _eviction_thread: threading.Thread + + def __init__(self): + self._mapping = collections.OrderedDict() + self._eviction_thread = threading.Thread( + target=ChannelCache._perform_evictions, daemon=True + ) + self._eviction_thread.start() + + @staticmethod + def get(): + with ChannelCache._lock: + if ChannelCache._singleton is None: + ChannelCache._singleton = ChannelCache() + ChannelCache._eviction_ready.wait() + return ChannelCache._singleton + + def _evict_locked(self, key: CacheKey): + channel, _ = self._mapping.pop(key) + _LOGGER.debug( + "Evicting channel %s with configuration %s.", channel, key + ) + channel.close() + del channel + + @staticmethod + def _perform_evictions(): + while True: + with ChannelCache._lock: + ChannelCache._eviction_ready.set() + if not ChannelCache._singleton._mapping: + ChannelCache._condition.wait() + elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS: + key = next(iter(ChannelCache._singleton._mapping.keys())) + ChannelCache._singleton._evict_locked(key) + # And immediately reevaluate. + else: + key, (_, eviction_time) = next( + iter(ChannelCache._singleton._mapping.items()) + ) + now = datetime.datetime.now() + if eviction_time <= now: + ChannelCache._singleton._evict_locked(key) + continue + else: + time_to_eviction = (eviction_time - now).total_seconds() + # NOTE: We aim to *eventually* coalesce to a state in + # which no overdue channels are in the cache and the + # length of the cache is longer than _MAXIMUM_CHANNELS. + # We tolerate momentary states in which these two + # criteria are not met. + ChannelCache._condition.wait(timeout=time_to_eviction) + + def get_channel( + self, + target: str, + options: Sequence[Tuple[str, str]], + channel_credentials: Optional[grpc.ChannelCredentials], + insecure: bool, + compression: Optional[grpc.Compression], + method: str, + _registered_method: bool, + ) -> Tuple[grpc.Channel, Optional[int]]: + """Get a channel from cache or creates a new channel. + + This method also takes care of register method for channel, + which means we'll register a new call handle if we're calling a + non-registered method for an existing channel. + + Returns: + A tuple with two items. The first item is the channel, second item is + the call handle if the method is registered, None if it's not registered. + """ + if insecure and channel_credentials: + raise ValueError( + "The insecure option is mutually exclusive with " + + "the channel_credentials option. Please use one " + + "or the other." + ) + if insecure: + channel_credentials = ( + grpc.experimental.insecure_channel_credentials() + ) + elif channel_credentials is None: + _LOGGER.debug("Defaulting to SSL channel credentials.") + channel_credentials = grpc.ssl_channel_credentials() + key = (target, options, channel_credentials, compression) + with self._lock: + channel_data = self._mapping.get(key, None) + call_handle = None + if channel_data is not None: + channel = channel_data[0] + # Register a new call handle if we're calling a registered method for an + # existing channel and this method is not registered. + if _registered_method: + call_handle = channel._get_registered_call_handle(method) + self._mapping.pop(key) + self._mapping[key] = ( + channel, + datetime.datetime.now() + _EVICTION_PERIOD, + ) + return channel, call_handle + else: + channel = _create_channel( + target, options, channel_credentials, compression + ) + if _registered_method: + call_handle = channel._get_registered_call_handle(method) + self._mapping[key] = ( + channel, + datetime.datetime.now() + _EVICTION_PERIOD, + ) + if ( + len(self._mapping) == 1 + or len(self._mapping) >= _MAXIMUM_CHANNELS + ): + self._condition.notify() + return channel, call_handle + + def _test_only_channel_count(self) -> int: + with self._lock: + return len(self._mapping) + + +@experimental_api +# pylint: disable=too-many-locals +def unary_unary( + request: RequestType, + target: str, + method: str, + request_serializer: Optional[Callable[[Any], bytes]] = None, + response_deserializer: Optional[Callable[[bytes], Any]] = None, + options: Sequence[Tuple[AnyStr, AnyStr]] = (), + channel_credentials: Optional[grpc.ChannelCredentials] = None, + insecure: bool = False, + call_credentials: Optional[grpc.CallCredentials] = None, + compression: Optional[grpc.Compression] = None, + wait_for_ready: Optional[bool] = None, + timeout: Optional[float] = _DEFAULT_TIMEOUT, + metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None, + _registered_method: Optional[bool] = False, +) -> ResponseType: + """Invokes a unary-unary RPC without an explicitly specified channel. + + THIS IS AN EXPERIMENTAL API. + + This is backed by a per-process cache of channels. Channels are evicted + from the cache after a fixed period by a background. Channels will also be + evicted if more than a configured maximum accumulate. + + The default eviction period is 10 minutes. One may set the environment + variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this. + + The default maximum number of channels is 256. One may set the + environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure + this. + + Args: + request: An iterator that yields request values for the RPC. + target: The server address. + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the response + message. Response goes undeserialized in case None is passed. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core + runtime) to configure the channel. + channel_credentials: A credential applied to the whole channel, e.g. the + return value of grpc.ssl_channel_credentials() or + grpc.insecure_channel_credentials(). + insecure: If True, specifies channel_credentials as + :term:`grpc.insecure_channel_credentials()`. This option is mutually + exclusive with the `channel_credentials` option. + call_credentials: A call credential applied to each call individually, + e.g. the output of grpc.metadata_call_credentials() or + grpc.access_token_call_credentials(). + compression: An optional value indicating the compression method to be + used over the lifetime of the channel, e.g. grpc.Compression.Gzip. + wait_for_ready: An optional flag indicating whether the RPC should fail + immediately if the connection is not ready at the time the RPC is + invoked, or if it should wait until the connection to the server + becomes ready. When using this option, the user will likely also want + to set a timeout. Defaults to True. + timeout: An optional duration of time in seconds to allow for the RPC, + after which an exception will be raised. If timeout is unspecified, + defaults to a timeout controlled by the + GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is + unset, defaults to 60 seconds. Supply a value of None to indicate that + no timeout should be enforced. + metadata: Optional metadata to send to the server. + + Returns: + The response to the RPC. + """ + channel, method_handle = ChannelCache.get().get_channel( + target, + options, + channel_credentials, + insecure, + compression, + method, + _registered_method, + ) + multicallable = channel.unary_unary( + method, request_serializer, response_deserializer, method_handle + ) + wait_for_ready = wait_for_ready if wait_for_ready is not None else True + return multicallable( + request, + metadata=metadata, + wait_for_ready=wait_for_ready, + credentials=call_credentials, + timeout=timeout, + ) + + +@experimental_api +# pylint: disable=too-many-locals +def unary_stream( + request: RequestType, + target: str, + method: str, + request_serializer: Optional[Callable[[Any], bytes]] = None, + response_deserializer: Optional[Callable[[bytes], Any]] = None, + options: Sequence[Tuple[AnyStr, AnyStr]] = (), + channel_credentials: Optional[grpc.ChannelCredentials] = None, + insecure: bool = False, + call_credentials: Optional[grpc.CallCredentials] = None, + compression: Optional[grpc.Compression] = None, + wait_for_ready: Optional[bool] = None, + timeout: Optional[float] = _DEFAULT_TIMEOUT, + metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None, + _registered_method: Optional[bool] = False, +) -> Iterator[ResponseType]: + """Invokes a unary-stream RPC without an explicitly specified channel. + + THIS IS AN EXPERIMENTAL API. + + This is backed by a per-process cache of channels. Channels are evicted + from the cache after a fixed period by a background. Channels will also be + evicted if more than a configured maximum accumulate. + + The default eviction period is 10 minutes. One may set the environment + variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this. + + The default maximum number of channels is 256. One may set the + environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure + this. + + Args: + request: An iterator that yields request values for the RPC. + target: The server address. + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the response + message. Response goes undeserialized in case None is passed. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core + runtime) to configure the channel. + channel_credentials: A credential applied to the whole channel, e.g. the + return value of grpc.ssl_channel_credentials(). + insecure: If True, specifies channel_credentials as + :term:`grpc.insecure_channel_credentials()`. This option is mutually + exclusive with the `channel_credentials` option. + call_credentials: A call credential applied to each call individually, + e.g. the output of grpc.metadata_call_credentials() or + grpc.access_token_call_credentials(). + compression: An optional value indicating the compression method to be + used over the lifetime of the channel, e.g. grpc.Compression.Gzip. + wait_for_ready: An optional flag indicating whether the RPC should fail + immediately if the connection is not ready at the time the RPC is + invoked, or if it should wait until the connection to the server + becomes ready. When using this option, the user will likely also want + to set a timeout. Defaults to True. + timeout: An optional duration of time in seconds to allow for the RPC, + after which an exception will be raised. If timeout is unspecified, + defaults to a timeout controlled by the + GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is + unset, defaults to 60 seconds. Supply a value of None to indicate that + no timeout should be enforced. + metadata: Optional metadata to send to the server. + + Returns: + An iterator of responses. + """ + channel, method_handle = ChannelCache.get().get_channel( + target, + options, + channel_credentials, + insecure, + compression, + method, + _registered_method, + ) + multicallable = channel.unary_stream( + method, request_serializer, response_deserializer, method_handle + ) + wait_for_ready = wait_for_ready if wait_for_ready is not None else True + return multicallable( + request, + metadata=metadata, + wait_for_ready=wait_for_ready, + credentials=call_credentials, + timeout=timeout, + ) + + +@experimental_api +# pylint: disable=too-many-locals +def stream_unary( + request_iterator: Iterator[RequestType], + target: str, + method: str, + request_serializer: Optional[Callable[[Any], bytes]] = None, + response_deserializer: Optional[Callable[[bytes], Any]] = None, + options: Sequence[Tuple[AnyStr, AnyStr]] = (), + channel_credentials: Optional[grpc.ChannelCredentials] = None, + insecure: bool = False, + call_credentials: Optional[grpc.CallCredentials] = None, + compression: Optional[grpc.Compression] = None, + wait_for_ready: Optional[bool] = None, + timeout: Optional[float] = _DEFAULT_TIMEOUT, + metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None, + _registered_method: Optional[bool] = False, +) -> ResponseType: + """Invokes a stream-unary RPC without an explicitly specified channel. + + THIS IS AN EXPERIMENTAL API. + + This is backed by a per-process cache of channels. Channels are evicted + from the cache after a fixed period by a background. Channels will also be + evicted if more than a configured maximum accumulate. + + The default eviction period is 10 minutes. One may set the environment + variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this. + + The default maximum number of channels is 256. One may set the + environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure + this. + + Args: + request_iterator: An iterator that yields request values for the RPC. + target: The server address. + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the response + message. Response goes undeserialized in case None is passed. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core + runtime) to configure the channel. + channel_credentials: A credential applied to the whole channel, e.g. the + return value of grpc.ssl_channel_credentials(). + call_credentials: A call credential applied to each call individually, + e.g. the output of grpc.metadata_call_credentials() or + grpc.access_token_call_credentials(). + insecure: If True, specifies channel_credentials as + :term:`grpc.insecure_channel_credentials()`. This option is mutually + exclusive with the `channel_credentials` option. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel, e.g. grpc.Compression.Gzip. + wait_for_ready: An optional flag indicating whether the RPC should fail + immediately if the connection is not ready at the time the RPC is + invoked, or if it should wait until the connection to the server + becomes ready. When using this option, the user will likely also want + to set a timeout. Defaults to True. + timeout: An optional duration of time in seconds to allow for the RPC, + after which an exception will be raised. If timeout is unspecified, + defaults to a timeout controlled by the + GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is + unset, defaults to 60 seconds. Supply a value of None to indicate that + no timeout should be enforced. + metadata: Optional metadata to send to the server. + + Returns: + The response to the RPC. + """ + channel, method_handle = ChannelCache.get().get_channel( + target, + options, + channel_credentials, + insecure, + compression, + method, + _registered_method, + ) + multicallable = channel.stream_unary( + method, request_serializer, response_deserializer, method_handle + ) + wait_for_ready = wait_for_ready if wait_for_ready is not None else True + return multicallable( + request_iterator, + metadata=metadata, + wait_for_ready=wait_for_ready, + credentials=call_credentials, + timeout=timeout, + ) + + +@experimental_api +# pylint: disable=too-many-locals +def stream_stream( + request_iterator: Iterator[RequestType], + target: str, + method: str, + request_serializer: Optional[Callable[[Any], bytes]] = None, + response_deserializer: Optional[Callable[[bytes], Any]] = None, + options: Sequence[Tuple[AnyStr, AnyStr]] = (), + channel_credentials: Optional[grpc.ChannelCredentials] = None, + insecure: bool = False, + call_credentials: Optional[grpc.CallCredentials] = None, + compression: Optional[grpc.Compression] = None, + wait_for_ready: Optional[bool] = None, + timeout: Optional[float] = _DEFAULT_TIMEOUT, + metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None, + _registered_method: Optional[bool] = False, +) -> Iterator[ResponseType]: + """Invokes a stream-stream RPC without an explicitly specified channel. + + THIS IS AN EXPERIMENTAL API. + + This is backed by a per-process cache of channels. Channels are evicted + from the cache after a fixed period by a background. Channels will also be + evicted if more than a configured maximum accumulate. + + The default eviction period is 10 minutes. One may set the environment + variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this. + + The default maximum number of channels is 256. One may set the + environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure + this. + + Args: + request_iterator: An iterator that yields request values for the RPC. + target: The server address. + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the response + message. Response goes undeserialized in case None is passed. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core + runtime) to configure the channel. + channel_credentials: A credential applied to the whole channel, e.g. the + return value of grpc.ssl_channel_credentials(). + call_credentials: A call credential applied to each call individually, + e.g. the output of grpc.metadata_call_credentials() or + grpc.access_token_call_credentials(). + insecure: If True, specifies channel_credentials as + :term:`grpc.insecure_channel_credentials()`. This option is mutually + exclusive with the `channel_credentials` option. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel, e.g. grpc.Compression.Gzip. + wait_for_ready: An optional flag indicating whether the RPC should fail + immediately if the connection is not ready at the time the RPC is + invoked, or if it should wait until the connection to the server + becomes ready. When using this option, the user will likely also want + to set a timeout. Defaults to True. + timeout: An optional duration of time in seconds to allow for the RPC, + after which an exception will be raised. If timeout is unspecified, + defaults to a timeout controlled by the + GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is + unset, defaults to 60 seconds. Supply a value of None to indicate that + no timeout should be enforced. + metadata: Optional metadata to send to the server. + + Returns: + An iterator of responses. + """ + channel, method_handle = ChannelCache.get().get_channel( + target, + options, + channel_credentials, + insecure, + compression, + method, + _registered_method, + ) + multicallable = channel.stream_stream( + method, request_serializer, response_deserializer, method_handle + ) + wait_for_ready = wait_for_ready if wait_for_ready is not None else True + return multicallable( + request_iterator, + metadata=metadata, + wait_for_ready=wait_for_ready, + credentials=call_credentials, + timeout=timeout, + ) diff --git a/MLPY/Lib/site-packages/grpc/_typing.py b/MLPY/Lib/site-packages/grpc/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b559b75eb3205443207217810a44967712037b --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_typing.py @@ -0,0 +1,95 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Common types for gRPC Sync API""" + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Iterator, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +from grpc._cython import cygrpc + +if TYPE_CHECKING: + from grpc import ServicerContext + from grpc._server import _RPCState + +RequestType = TypeVar("RequestType") +ResponseType = TypeVar("ResponseType") +SerializingFunction = Callable[[Any], bytes] +DeserializingFunction = Callable[[bytes], Any] +MetadataType = Sequence[Tuple[str, Union[str, bytes]]] +ChannelArgumentType = Tuple[str, Any] +DoneCallbackType = Callable[[Any], None] +NullaryCallbackType = Callable[[], None] +RequestIterableType = Iterable[Any] +ResponseIterableType = Iterable[Any] +UserTag = Callable[[cygrpc.BaseEvent], bool] +IntegratedCallFactory = Callable[ + [ + int, + bytes, + None, + Optional[float], + Optional[MetadataType], + Optional[cygrpc.CallCredentials], + Sequence[Sequence[cygrpc.Operation]], + UserTag, + Any, + ], + cygrpc.IntegratedCall, +] +ServerTagCallbackType = Tuple[ + Optional["_RPCState"], Sequence[NullaryCallbackType] +] +ServerCallbackTag = Callable[[cygrpc.BaseEvent], ServerTagCallbackType] +ArityAgnosticMethodHandler = Union[ + Callable[ + [RequestType, "ServicerContext", Callable[[ResponseType], None]], + ResponseType, + ], + Callable[ + [RequestType, "ServicerContext", Callable[[ResponseType], None]], + Iterator[ResponseType], + ], + Callable[ + [ + Iterator[RequestType], + "ServicerContext", + Callable[[ResponseType], None], + ], + ResponseType, + ], + Callable[ + [ + Iterator[RequestType], + "ServicerContext", + Callable[[ResponseType], None], + ], + Iterator[ResponseType], + ], + Callable[[RequestType, "ServicerContext"], ResponseType], + Callable[[RequestType, "ServicerContext"], Iterator[ResponseType]], + Callable[[Iterator[RequestType], "ServicerContext"], ResponseType], + Callable[ + [Iterator[RequestType], "ServicerContext"], Iterator[ResponseType] + ], +] diff --git a/MLPY/Lib/site-packages/grpc/_utilities.py b/MLPY/Lib/site-packages/grpc/_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0ae499d782b0bc4567bc635afd2da03e81780f --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/_utilities.py @@ -0,0 +1,222 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Internal utilities for gRPC Python.""" + +import collections +import logging +import threading +import time +from typing import Callable, Dict, Optional, Sequence + +import grpc # pytype: disable=pyi-error +from grpc import _common # pytype: disable=pyi-error +from grpc._typing import DoneCallbackType + +_LOGGER = logging.getLogger(__name__) + +_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = ( + 'Exception calling connectivity future "done" callback!' +) + + +class RpcMethodHandler( + collections.namedtuple( + "_RpcMethodHandler", + ( + "request_streaming", + "response_streaming", + "request_deserializer", + "response_serializer", + "unary_unary", + "unary_stream", + "stream_unary", + "stream_stream", + ), + ), + grpc.RpcMethodHandler, +): + pass + + +class DictionaryGenericHandler(grpc.ServiceRpcHandler): + _name: str + _method_handlers: Dict[str, grpc.RpcMethodHandler] + + def __init__( + self, service: str, method_handlers: Dict[str, grpc.RpcMethodHandler] + ): + self._name = service + self._method_handlers = { + _common.fully_qualified_method(service, method): method_handler + for method, method_handler in method_handlers.items() + } + + def service_name(self) -> str: + return self._name + + def service( + self, handler_call_details: grpc.HandlerCallDetails + ) -> Optional[grpc.RpcMethodHandler]: + details_method = handler_call_details.method + return self._method_handlers.get( + details_method + ) # pytype: disable=attribute-error + + +class _ChannelReadyFuture(grpc.Future): + _condition: threading.Condition + _channel: grpc.Channel + _matured: bool + _cancelled: bool + _done_callbacks: Sequence[Callable] + + def __init__(self, channel: grpc.Channel): + self._condition = threading.Condition() + self._channel = channel + + self._matured = False + self._cancelled = False + self._done_callbacks = [] + + def _block(self, timeout: Optional[float]) -> None: + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._cancelled: + raise grpc.FutureCancelledError() + elif self._matured: + return + else: + if until is None: + self._condition.wait() + else: + remaining = until - time.time() + if remaining < 0: + raise grpc.FutureTimeoutError() + else: + self._condition.wait(timeout=remaining) + + def _update(self, connectivity: Optional[grpc.ChannelConnectivity]) -> None: + with self._condition: + if ( + not self._cancelled + and connectivity is grpc.ChannelConnectivity.READY + ): + self._matured = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return + + for done_callback in done_callbacks: + try: + done_callback(self) + except Exception: # pylint: disable=broad-except + _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE) + + def cancel(self) -> bool: + with self._condition: + if not self._matured: + self._cancelled = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return False + + for done_callback in done_callbacks: + try: + done_callback(self) + except Exception: # pylint: disable=broad-except + _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE) + + return True + + def cancelled(self) -> bool: + with self._condition: + return self._cancelled + + def running(self) -> bool: + with self._condition: + return not self._cancelled and not self._matured + + def done(self) -> bool: + with self._condition: + return self._cancelled or self._matured + + def result(self, timeout: Optional[float] = None) -> None: + self._block(timeout) + + def exception(self, timeout: Optional[float] = None) -> None: + self._block(timeout) + + def traceback(self, timeout: Optional[float] = None) -> None: + self._block(timeout) + + def add_done_callback(self, fn: DoneCallbackType): + with self._condition: + if not self._cancelled and not self._matured: + self._done_callbacks.append(fn) + return + + fn(self) + + def start(self): + with self._condition: + self._channel.subscribe(self._update, try_to_connect=True) + + def __del__(self): + with self._condition: + if not self._cancelled and not self._matured: + self._channel.unsubscribe(self._update) + + +def channel_ready_future(channel: grpc.Channel) -> _ChannelReadyFuture: + ready_future = _ChannelReadyFuture(channel) + ready_future.start() + return ready_future + + +def first_version_is_lower(version1: str, version2: str) -> bool: + """ + Compares two versions in the format '1.60.1' or '1.60.1.dev0'. + + This method will be used in all stubs generated by grpcio-tools to check whether + the stub version is compatible with the runtime grpcio. + + Args: + version1: The first version string. + version2: The second version string. + + Returns: + True if version1 is lower, False otherwise. + """ + version1_list = version1.split(".") + version2_list = version2.split(".") + + try: + for i in range(3): + if int(version1_list[i]) < int(version2_list[i]): + return True + elif int(version1_list[i]) > int(version2_list[i]): + return False + except ValueError: + # Return false in case we can't convert version to int. + return False + + # The version without dev0 will be considered lower. + return len(version1_list) < len(version2_list) diff --git a/MLPY/Lib/site-packages/grpc/aio/__init__.py b/MLPY/Lib/site-packages/grpc/aio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7eac242abc6fea0557cace8336e6674915837d0 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/__init__.py @@ -0,0 +1,95 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC's Asynchronous Python API. + +gRPC Async API objects may only be used on the thread on which they were +created. AsyncIO doesn't provide thread safety for most of its APIs. +""" + +from typing import Any, Optional, Sequence, Tuple + +import grpc +from grpc._cython.cygrpc import AbortError +from grpc._cython.cygrpc import BaseError +from grpc._cython.cygrpc import EOF +from grpc._cython.cygrpc import InternalError +from grpc._cython.cygrpc import UsageError +from grpc._cython.cygrpc import init_grpc_aio +from grpc._cython.cygrpc import shutdown_grpc_aio + +from ._base_call import Call +from ._base_call import RpcContext +from ._base_call import StreamStreamCall +from ._base_call import StreamUnaryCall +from ._base_call import UnaryStreamCall +from ._base_call import UnaryUnaryCall +from ._base_channel import Channel +from ._base_channel import StreamStreamMultiCallable +from ._base_channel import StreamUnaryMultiCallable +from ._base_channel import UnaryStreamMultiCallable +from ._base_channel import UnaryUnaryMultiCallable +from ._base_server import Server +from ._base_server import ServicerContext +from ._call import AioRpcError +from ._channel import insecure_channel +from ._channel import secure_channel +from ._interceptor import ClientCallDetails +from ._interceptor import ClientInterceptor +from ._interceptor import InterceptedUnaryUnaryCall +from ._interceptor import ServerInterceptor +from ._interceptor import StreamStreamClientInterceptor +from ._interceptor import StreamUnaryClientInterceptor +from ._interceptor import UnaryStreamClientInterceptor +from ._interceptor import UnaryUnaryClientInterceptor +from ._metadata import Metadata +from ._server import server +from ._typing import ChannelArgumentType + +################################### __all__ ################################# + +__all__ = ( + "init_grpc_aio", + "shutdown_grpc_aio", + "AioRpcError", + "RpcContext", + "Call", + "UnaryUnaryCall", + "UnaryStreamCall", + "StreamUnaryCall", + "StreamStreamCall", + "Channel", + "UnaryUnaryMultiCallable", + "UnaryStreamMultiCallable", + "StreamUnaryMultiCallable", + "StreamStreamMultiCallable", + "ClientCallDetails", + "ClientInterceptor", + "UnaryStreamClientInterceptor", + "UnaryUnaryClientInterceptor", + "StreamUnaryClientInterceptor", + "StreamStreamClientInterceptor", + "InterceptedUnaryUnaryCall", + "ServerInterceptor", + "insecure_channel", + "server", + "Server", + "ServicerContext", + "EOF", + "secure_channel", + "AbortError", + "BaseError", + "UsageError", + "InternalError", + "Metadata", +) diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..003fef256f31e4c60f02dc46b868363dd285a0d7 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_call.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_call.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe98a8b0d1ab269ce55e29b4efb43405fcf04714 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_call.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_channel.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_channel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1c35112e5b7c0b01b04ca2ae9a53d245de5241d Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_channel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_server.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_server.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..996303fd1afd37e548f6b5b50fb9e609debb53e4 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_base_server.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_call.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_call.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b592f2b06fe47548562ac31a55f56d685da4763 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_call.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_channel.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_channel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4477cad21d9ef1a6d3e14785802ae409f746b398 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_channel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_interceptor.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_interceptor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d94b801d782f99eae4be6c51c4d03207dea47ad0 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_interceptor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_metadata.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_metadata.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba6578da36544cd886d2c6a488bf0a39407a544e Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_metadata.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_server.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_server.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e88f3ae87bfd98c92bad7fc354fdc76f387ae0df Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_server.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_typing.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_typing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a083a2ef76f1c0ee58151e04694d35d0c05f0b41 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_typing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/__pycache__/_utils.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edd65ab2eb77106eb3daf4d31fc18deba137c3d2 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/aio/__pycache__/_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/aio/_base_call.py b/MLPY/Lib/site-packages/grpc/aio/_base_call.py new file mode 100644 index 0000000000000000000000000000000000000000..72e467ae63db1f296c9e59ff808562a3ebfa5a3f --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_base_call.py @@ -0,0 +1,257 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract base classes for client-side Call objects. + +Call objects represents the RPC itself, and offer methods to access / modify +its information. They also offer methods to manipulate the life-cycle of the +RPC, e.g. cancellation. +""" + +from abc import ABCMeta +from abc import abstractmethod +from typing import Any, AsyncIterator, Generator, Generic, Optional, Union + +import grpc + +from ._metadata import Metadata +from ._typing import DoneCallbackType +from ._typing import EOFType +from ._typing import RequestType +from ._typing import ResponseType + +__all__ = "RpcContext", "Call", "UnaryUnaryCall", "UnaryStreamCall" + + +class RpcContext(metaclass=ABCMeta): + """Provides RPC-related information and action.""" + + @abstractmethod + def cancelled(self) -> bool: + """Return True if the RPC is cancelled. + + The RPC is cancelled when the cancellation was requested with cancel(). + + Returns: + A bool indicates whether the RPC is cancelled or not. + """ + + @abstractmethod + def done(self) -> bool: + """Return True if the RPC is done. + + An RPC is done if the RPC is completed, cancelled or aborted. + + Returns: + A bool indicates if the RPC is done. + """ + + @abstractmethod + def time_remaining(self) -> Optional[float]: + """Describes the length of allowed time remaining for the RPC. + + Returns: + A nonnegative float indicating the length of allowed time in seconds + remaining for the RPC to complete before it is considered to have + timed out, or None if no deadline was specified for the RPC. + """ + + @abstractmethod + def cancel(self) -> bool: + """Cancels the RPC. + + Idempotent and has no effect if the RPC has already terminated. + + Returns: + A bool indicates if the cancellation is performed or not. + """ + + @abstractmethod + def add_done_callback(self, callback: DoneCallbackType) -> None: + """Registers a callback to be called on RPC termination. + + Args: + callback: A callable object will be called with the call object as + its only argument. + """ + + +class Call(RpcContext, metaclass=ABCMeta): + """The abstract base class of an RPC on the client-side.""" + + @abstractmethod + async def initial_metadata(self) -> Metadata: + """Accesses the initial metadata sent by the server. + + Returns: + The initial :term:`metadata`. + """ + + @abstractmethod + async def trailing_metadata(self) -> Metadata: + """Accesses the trailing metadata sent by the server. + + Returns: + The trailing :term:`metadata`. + """ + + @abstractmethod + async def code(self) -> grpc.StatusCode: + """Accesses the status code sent by the server. + + Returns: + The StatusCode value for the RPC. + """ + + @abstractmethod + async def details(self) -> str: + """Accesses the details sent by the server. + + Returns: + The details string of the RPC. + """ + + @abstractmethod + async def wait_for_connection(self) -> None: + """Waits until connected to peer and raises aio.AioRpcError if failed. + + This is an EXPERIMENTAL method. + + This method ensures the RPC has been successfully connected. Otherwise, + an AioRpcError will be raised to explain the reason of the connection + failure. + + This method is recommended for building retry mechanisms. + """ + + +class UnaryUnaryCall( + Generic[RequestType, ResponseType], Call, metaclass=ABCMeta +): + """The abstract base class of an unary-unary RPC on the client-side.""" + + @abstractmethod + def __await__(self) -> Generator[Any, None, ResponseType]: + """Await the response message to be ready. + + Returns: + The response message of the RPC. + """ + + +class UnaryStreamCall( + Generic[RequestType, ResponseType], Call, metaclass=ABCMeta +): + @abstractmethod + def __aiter__(self) -> AsyncIterator[ResponseType]: + """Returns the async iterator representation that yields messages. + + Under the hood, it is calling the "read" method. + + Returns: + An async iterator object that yields messages. + """ + + @abstractmethod + async def read(self) -> Union[EOFType, ResponseType]: + """Reads one message from the stream. + + Read operations must be serialized when called from multiple + coroutines. + + Note that the iterator and read/write APIs may not be mixed on + a single RPC. + + Returns: + A response message, or an `grpc.aio.EOF` to indicate the end of the + stream. + """ + + +class StreamUnaryCall( + Generic[RequestType, ResponseType], Call, metaclass=ABCMeta +): + @abstractmethod + async def write(self, request: RequestType) -> None: + """Writes one message to the stream. + + Note that the iterator and read/write APIs may not be mixed on + a single RPC. + + Raises: + An RpcError exception if the write failed. + """ + + @abstractmethod + async def done_writing(self) -> None: + """Notifies server that the client is done sending messages. + + After done_writing is called, any additional invocation to the write + function will fail. This function is idempotent. + """ + + @abstractmethod + def __await__(self) -> Generator[Any, None, ResponseType]: + """Await the response message to be ready. + + Returns: + The response message of the stream. + """ + + +class StreamStreamCall( + Generic[RequestType, ResponseType], Call, metaclass=ABCMeta +): + @abstractmethod + def __aiter__(self) -> AsyncIterator[ResponseType]: + """Returns the async iterator representation that yields messages. + + Under the hood, it is calling the "read" method. + + Returns: + An async iterator object that yields messages. + """ + + @abstractmethod + async def read(self) -> Union[EOFType, ResponseType]: + """Reads one message from the stream. + + Read operations must be serialized when called from multiple + coroutines. + + Note that the iterator and read/write APIs may not be mixed on + a single RPC. + + Returns: + A response message, or an `grpc.aio.EOF` to indicate the end of the + stream. + """ + + @abstractmethod + async def write(self, request: RequestType) -> None: + """Writes one message to the stream. + + Note that the iterator and read/write APIs may not be mixed on + a single RPC. + + Raises: + An RpcError exception if the write failed. + """ + + @abstractmethod + async def done_writing(self) -> None: + """Notifies server that the client is done sending messages. + + After done_writing is called, any additional invocation to the write + function will fail. This function is idempotent. + """ diff --git a/MLPY/Lib/site-packages/grpc/aio/_base_channel.py b/MLPY/Lib/site-packages/grpc/aio/_base_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0106d979135800ac0ef4281fa548ceb27f4d13 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_base_channel.py @@ -0,0 +1,364 @@ +# Copyright 2020 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract base classes for Channel objects and Multicallable objects.""" + +import abc +from typing import Generic, Optional + +import grpc + +from . import _base_call +from ._typing import DeserializingFunction +from ._typing import MetadataType +from ._typing import RequestIterableType +from ._typing import RequestType +from ._typing import ResponseType +from ._typing import SerializingFunction + + +class UnaryUnaryMultiCallable(Generic[RequestType, ResponseType], abc.ABC): + """Enables asynchronous invocation of a unary-call RPC.""" + + @abc.abstractmethod + def __call__( + self, + request: RequestType, + *, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.UnaryUnaryCall[RequestType, ResponseType]: + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional duration of time in seconds to allow + for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + A UnaryUnaryCall object. + + Raises: + RpcError: Indicates that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + + +class UnaryStreamMultiCallable(Generic[RequestType, ResponseType], abc.ABC): + """Enables asynchronous invocation of a server-streaming RPC.""" + + @abc.abstractmethod + def __call__( + self, + request: RequestType, + *, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.UnaryStreamCall[RequestType, ResponseType]: + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional duration of time in seconds to allow + for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + A UnaryStreamCall object. + + Raises: + RpcError: Indicates that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + + +class StreamUnaryMultiCallable(abc.ABC): + """Enables asynchronous invocation of a client-streaming RPC.""" + + @abc.abstractmethod + def __call__( + self, + request_iterator: Optional[RequestIterableType] = None, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.StreamUnaryCall: + """Asynchronously invokes the underlying RPC. + + Args: + request_iterator: An optional async iterable or iterable of request + messages for the RPC. + timeout: An optional duration of time in seconds to allow + for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + A StreamUnaryCall object. + + Raises: + RpcError: Indicates that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + + +class StreamStreamMultiCallable(abc.ABC): + """Enables asynchronous invocation of a bidirectional-streaming RPC.""" + + @abc.abstractmethod + def __call__( + self, + request_iterator: Optional[RequestIterableType] = None, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.StreamStreamCall: + """Asynchronously invokes the underlying RPC. + + Args: + request_iterator: An optional async iterable or iterable of request + messages for the RPC. + timeout: An optional duration of time in seconds to allow + for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. Only valid for + secure Channel. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + + Returns: + A StreamStreamCall object. + + Raises: + RpcError: Indicates that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + + +class Channel(abc.ABC): + """Enables asynchronous RPC invocation as a client. + + Channel objects implement the Asynchronous Context Manager (aka. async + with) type, although they are not supportted to be entered and exited + multiple times. + """ + + @abc.abstractmethod + async def __aenter__(self): + """Starts an asynchronous context manager. + + Returns: + Channel the channel that was instantiated. + """ + + @abc.abstractmethod + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Finishes the asynchronous context manager by closing the channel. + + Still active RPCs will be cancelled. + """ + + @abc.abstractmethod + async def close(self, grace: Optional[float] = None): + """Closes this Channel and releases all resources held by it. + + This method immediately stops the channel from executing new RPCs in + all cases. + + If a grace period is specified, this method waits until all active + RPCs are finished or until the grace period is reached. RPCs that haven't + been terminated within the grace period are aborted. + If a grace period is not specified (by passing None for grace), + all existing RPCs are cancelled immediately. + + This method is idempotent. + """ + + @abc.abstractmethod + def get_state( + self, try_to_connect: bool = False + ) -> grpc.ChannelConnectivity: + """Checks the connectivity state of a channel. + + This is an EXPERIMENTAL API. + + If the channel reaches a stable connectivity state, it is guaranteed + that the return value of this function will eventually converge to that + state. + + Args: + try_to_connect: a bool indicate whether the Channel should try to + connect to peer or not. + + Returns: A ChannelConnectivity object. + """ + + @abc.abstractmethod + async def wait_for_state_change( + self, + last_observed_state: grpc.ChannelConnectivity, + ) -> None: + """Waits for a change in connectivity state. + + This is an EXPERIMENTAL API. + + The function blocks until there is a change in the channel connectivity + state from the "last_observed_state". If the state is already + different, this function will return immediately. + + There is an inherent race between the invocation of + "Channel.wait_for_state_change" and "Channel.get_state". The state can + change arbitrary many times during the race, so there is no way to + observe every state transition. + + If there is a need to put a timeout for this function, please refer to + "asyncio.wait_for". + + Args: + last_observed_state: A grpc.ChannelConnectivity object representing + the last known state. + """ + + @abc.abstractmethod + async def channel_ready(self) -> None: + """Creates a coroutine that blocks until the Channel is READY.""" + + @abc.abstractmethod + def unary_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> UnaryUnaryMultiCallable: + """Creates a UnaryUnaryMultiCallable for a unary-unary method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. Optional: A bool representing + whether the method is registered. + + Returns: + A UnaryUnaryMultiCallable value for the named unary-unary method. + """ + + @abc.abstractmethod + def unary_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> UnaryStreamMultiCallable: + """Creates a UnaryStreamMultiCallable for a unary-stream method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. Optional: A bool representing + whether the method is registered. + + Returns: + A UnarySteramMultiCallable value for the named unary-stream method. + """ + + @abc.abstractmethod + def stream_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> StreamUnaryMultiCallable: + """Creates a StreamUnaryMultiCallable for a stream-unary method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. Optional: A bool representing + whether the method is registered. + + Returns: + A StreamUnaryMultiCallable value for the named stream-unary method. + """ + + @abc.abstractmethod + def stream_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> StreamStreamMultiCallable: + """Creates a StreamStreamMultiCallable for a stream-stream method. + + Args: + method: The name of the RPC method. + request_serializer: Optional :term:`serializer` for serializing the request + message. Request goes unserialized in case None is passed. + response_deserializer: Optional :term:`deserializer` for deserializing the + response message. Response goes undeserialized in case None + is passed. + _registered_method: Implementation Private. Optional: A bool representing + whether the method is registered. + + Returns: + A StreamStreamMultiCallable value for the named stream-stream method. + """ diff --git a/MLPY/Lib/site-packages/grpc/aio/_base_server.py b/MLPY/Lib/site-packages/grpc/aio/_base_server.py new file mode 100644 index 0000000000000000000000000000000000000000..508ac2d83b760f7a100ed685cd83fc859415740a --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_base_server.py @@ -0,0 +1,385 @@ +# Copyright 2020 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract base classes for server-side classes.""" + +import abc +from typing import Generic, Iterable, Mapping, NoReturn, Optional, Sequence + +import grpc + +from ._metadata import Metadata +from ._typing import DoneCallbackType +from ._typing import MetadataType +from ._typing import RequestType +from ._typing import ResponseType + + +class Server(abc.ABC): + """Serves RPCs.""" + + @abc.abstractmethod + def add_generic_rpc_handlers( + self, generic_rpc_handlers: Sequence[grpc.GenericRpcHandler] + ) -> None: + """Registers GenericRpcHandlers with this Server. + + This method is only safe to call before the server is started. + + Args: + generic_rpc_handlers: A sequence of GenericRpcHandlers that will be + used to service RPCs. + """ + + @abc.abstractmethod + def add_insecure_port(self, address: str) -> int: + """Opens an insecure port for accepting RPCs. + + A port is a communication endpoint that used by networking protocols, + like TCP and UDP. To date, we only support TCP. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. If the port is 0, + or not specified in the address, then the gRPC runtime will choose a port. + + Returns: + An integer port on which the server will accept RPC requests. + """ + + @abc.abstractmethod + def add_secure_port( + self, address: str, server_credentials: grpc.ServerCredentials + ) -> int: + """Opens a secure port for accepting RPCs. + + A port is a communication endpoint that used by networking protocols, + like TCP and UDP. To date, we only support TCP. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. + if the port is 0, or not specified in the address, then the gRPC + runtime will choose a port. + server_credentials: A ServerCredentials object. + + Returns: + An integer port on which the server will accept RPC requests. + """ + + @abc.abstractmethod + async def start(self) -> None: + """Starts this Server. + + This method may only be called once. (i.e. it is not idempotent). + """ + + @abc.abstractmethod + async def stop(self, grace: Optional[float]) -> None: + """Stops this Server. + + This method immediately stops the server from servicing new RPCs in + all cases. + + If a grace period is specified, this method waits until all active + RPCs are finished or until the grace period is reached. RPCs that haven't + been terminated within the grace period are aborted. + If a grace period is not specified (by passing None for grace), all + existing RPCs are aborted immediately and this method blocks until + the last RPC handler terminates. + + This method is idempotent and may be called at any time. Passing a + smaller grace value in a subsequent call will have the effect of + stopping the Server sooner (passing None will have the effect of + stopping the server immediately). Passing a larger grace value in a + subsequent call will not have the effect of stopping the server later + (i.e. the most restrictive grace value is used). + + Args: + grace: A duration of time in seconds or None. + """ + + @abc.abstractmethod + async def wait_for_termination( + self, timeout: Optional[float] = None + ) -> bool: + """Continues current coroutine once the server stops. + + This is an EXPERIMENTAL API. + + The wait will not consume computational resources during blocking, and + it will block until one of the two following conditions are met: + + 1) The server is stopped or terminated; + 2) A timeout occurs if timeout is not `None`. + + The timeout argument works in the same way as `threading.Event.wait()`. + https://docs.python.org/3/library/threading.html#threading.Event.wait + + Args: + timeout: A floating point number specifying a timeout for the + operation in seconds. + + Returns: + A bool indicates if the operation times out. + """ + + def add_registered_method_handlers(self, service_name, method_handlers): + """Registers GenericRpcHandlers with this Server. + + This method is only safe to call before the server is started. + + Args: + service_name: The service name. + method_handlers: A dictionary that maps method names to corresponding + RpcMethodHandler. + """ + + +# pylint: disable=too-many-public-methods +class ServicerContext(Generic[RequestType, ResponseType], abc.ABC): + """A context object passed to method implementations.""" + + @abc.abstractmethod + async def read(self) -> RequestType: + """Reads one message from the RPC. + + Only one read operation is allowed simultaneously. + + Returns: + A response message of the RPC. + + Raises: + An RpcError exception if the read failed. + """ + + @abc.abstractmethod + async def write(self, message: ResponseType) -> None: + """Writes one message to the RPC. + + Only one write operation is allowed simultaneously. + + Raises: + An RpcError exception if the write failed. + """ + + @abc.abstractmethod + async def send_initial_metadata( + self, initial_metadata: MetadataType + ) -> None: + """Sends the initial metadata value to the client. + + This method need not be called by implementations if they have no + metadata to add to what the gRPC runtime will transmit. + + Args: + initial_metadata: The initial :term:`metadata`. + """ + + @abc.abstractmethod + async def abort( + self, + code: grpc.StatusCode, + details: str = "", + trailing_metadata: MetadataType = tuple(), + ) -> NoReturn: + """Raises an exception to terminate the RPC with a non-OK status. + + The code and details passed as arguments will supercede any existing + ones. + + Args: + code: A StatusCode object to be sent to the client. + It must not be StatusCode.OK. + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + trailing_metadata: A sequence of tuple represents the trailing + :term:`metadata`. + + Raises: + Exception: An exception is always raised to signal the abortion the + RPC to the gRPC runtime. + """ + + @abc.abstractmethod + def set_trailing_metadata(self, trailing_metadata: MetadataType) -> None: + """Sends the trailing metadata for the RPC. + + This method need not be called by implementations if they have no + metadata to add to what the gRPC runtime will transmit. + + Args: + trailing_metadata: The trailing :term:`metadata`. + """ + + @abc.abstractmethod + def invocation_metadata(self) -> Optional[Metadata]: + """Accesses the metadata sent by the client. + + Returns: + The invocation :term:`metadata`. + """ + + @abc.abstractmethod + def set_code(self, code: grpc.StatusCode) -> None: + """Sets the value to be used as status code upon RPC completion. + + This method need not be called by method implementations if they wish + the gRPC runtime to determine the status code of the RPC. + + Args: + code: A StatusCode object to be sent to the client. + """ + + @abc.abstractmethod + def set_details(self, details: str) -> None: + """Sets the value to be used the as detail string upon RPC completion. + + This method need not be called by method implementations if they have + no details to transmit. + + Args: + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + """ + + @abc.abstractmethod + def set_compression(self, compression: grpc.Compression) -> None: + """Set the compression algorithm to be used for the entire call. + + Args: + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + """ + + @abc.abstractmethod + def disable_next_message_compression(self) -> None: + """Disables compression for the next response message. + + This method will override any compression configuration set during + server creation or set on the call. + """ + + @abc.abstractmethod + def peer(self) -> str: + """Identifies the peer that invoked the RPC being serviced. + + Returns: + A string identifying the peer that invoked the RPC being serviced. + The string format is determined by gRPC runtime. + """ + + @abc.abstractmethod + def peer_identities(self) -> Optional[Iterable[bytes]]: + """Gets one or more peer identity(s). + + Equivalent to + servicer_context.auth_context().get(servicer_context.peer_identity_key()) + + Returns: + An iterable of the identities, or None if the call is not + authenticated. Each identity is returned as a raw bytes type. + """ + + @abc.abstractmethod + def peer_identity_key(self) -> Optional[str]: + """The auth property used to identify the peer. + + For example, "x509_common_name" or "x509_subject_alternative_name" are + used to identify an SSL peer. + + Returns: + The auth property (string) that indicates the + peer identity, or None if the call is not authenticated. + """ + + @abc.abstractmethod + def auth_context(self) -> Mapping[str, Iterable[bytes]]: + """Gets the auth context for the call. + + Returns: + A map of strings to an iterable of bytes for each auth property. + """ + + def time_remaining(self) -> float: + """Describes the length of allowed time remaining for the RPC. + + Returns: + A nonnegative float indicating the length of allowed time in seconds + remaining for the RPC to complete before it is considered to have + timed out, or None if no deadline was specified for the RPC. + """ + + def trailing_metadata(self): + """Access value to be used as trailing metadata upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The trailing :term:`metadata` for the RPC. + """ + raise NotImplementedError() + + def code(self): + """Accesses the value to be used as status code upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The StatusCode value for the RPC. + """ + raise NotImplementedError() + + def details(self): + """Accesses the value to be used as detail string upon RPC completion. + + This is an EXPERIMENTAL API. + + Returns: + The details string of the RPC. + """ + raise NotImplementedError() + + def add_done_callback(self, callback: DoneCallbackType) -> None: + """Registers a callback to be called on RPC termination. + + This is an EXPERIMENTAL API. + + Args: + callback: A callable object will be called with the servicer context + object as its only argument. + """ + + def cancelled(self) -> bool: + """Return True if the RPC is cancelled. + + The RPC is cancelled when the cancellation was requested with cancel(). + + This is an EXPERIMENTAL API. + + Returns: + A bool indicates whether the RPC is cancelled or not. + """ + + def done(self) -> bool: + """Return True if the RPC is done. + + An RPC is done if the RPC is completed, cancelled or aborted. + + This is an EXPERIMENTAL API. + + Returns: + A bool indicates if the RPC is done. + """ diff --git a/MLPY/Lib/site-packages/grpc/aio/_call.py b/MLPY/Lib/site-packages/grpc/aio/_call.py new file mode 100644 index 0000000000000000000000000000000000000000..507016640c3718ae76f2902b39f0bae2215d7969 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_call.py @@ -0,0 +1,764 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Invocation-side implementation of gRPC Asyncio Python.""" + +import asyncio +import enum +from functools import partial +import inspect +import logging +import traceback +from typing import ( + Any, + AsyncIterator, + Generator, + Generic, + Optional, + Tuple, + Union, +) + +import grpc +from grpc import _common +from grpc._cython import cygrpc + +from . import _base_call +from ._metadata import Metadata +from ._typing import DeserializingFunction +from ._typing import DoneCallbackType +from ._typing import EOFType +from ._typing import MetadatumType +from ._typing import RequestIterableType +from ._typing import RequestType +from ._typing import ResponseType +from ._typing import SerializingFunction + +__all__ = "AioRpcError", "Call", "UnaryUnaryCall", "UnaryStreamCall" + +_LOCAL_CANCELLATION_DETAILS = "Locally cancelled by application!" +_GC_CANCELLATION_DETAILS = "Cancelled upon garbage collection!" +_RPC_ALREADY_FINISHED_DETAILS = "RPC already finished." +_RPC_HALF_CLOSED_DETAILS = 'RPC is half closed after calling "done_writing".' +_API_STYLE_ERROR = ( + "The iterator and read/write APIs may not be mixed on a single RPC." +) + +_OK_CALL_REPRESENTATION = ( + '<{} of RPC that terminated with:\n\tstatus = {}\n\tdetails = "{}"\n>' +) + +_NON_OK_CALL_REPRESENTATION = ( + "<{} of RPC that terminated with:\n" + "\tstatus = {}\n" + '\tdetails = "{}"\n' + '\tdebug_error_string = "{}"\n' + ">" +) + +_LOGGER = logging.getLogger(__name__) + + +class AioRpcError(grpc.RpcError): + """An implementation of RpcError to be used by the asynchronous API. + + Raised RpcError is a snapshot of the final status of the RPC, values are + determined. Hence, its methods no longer needs to be coroutines. + """ + + _code: grpc.StatusCode + _details: Optional[str] + _initial_metadata: Optional[Metadata] + _trailing_metadata: Optional[Metadata] + _debug_error_string: Optional[str] + + def __init__( + self, + code: grpc.StatusCode, + initial_metadata: Metadata, + trailing_metadata: Metadata, + details: Optional[str] = None, + debug_error_string: Optional[str] = None, + ) -> None: + """Constructor. + + Args: + code: The status code with which the RPC has been finalized. + details: Optional details explaining the reason of the error. + initial_metadata: Optional initial metadata that could be sent by the + Server. + trailing_metadata: Optional metadata that could be sent by the Server. + """ + + super().__init__() + self._code = code + self._details = details + self._initial_metadata = initial_metadata + self._trailing_metadata = trailing_metadata + self._debug_error_string = debug_error_string + + def code(self) -> grpc.StatusCode: + """Accesses the status code sent by the server. + + Returns: + The `grpc.StatusCode` status code. + """ + return self._code + + def details(self) -> Optional[str]: + """Accesses the details sent by the server. + + Returns: + The description of the error. + """ + return self._details + + def initial_metadata(self) -> Metadata: + """Accesses the initial metadata sent by the server. + + Returns: + The initial metadata received. + """ + return self._initial_metadata + + def trailing_metadata(self) -> Metadata: + """Accesses the trailing metadata sent by the server. + + Returns: + The trailing metadata received. + """ + return self._trailing_metadata + + def debug_error_string(self) -> str: + """Accesses the debug error string sent by the server. + + Returns: + The debug error string received. + """ + return self._debug_error_string + + def _repr(self) -> str: + """Assembles the error string for the RPC error.""" + return _NON_OK_CALL_REPRESENTATION.format( + self.__class__.__name__, + self._code, + self._details, + self._debug_error_string, + ) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + def __reduce__(self): + return ( + type(self), + ( + self._code, + self._initial_metadata, + self._trailing_metadata, + self._details, + self._debug_error_string, + ), + ) + + +def _create_rpc_error( + initial_metadata: Metadata, status: cygrpc.AioRpcStatus +) -> AioRpcError: + return AioRpcError( + _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()], + Metadata.from_tuple(initial_metadata), + Metadata.from_tuple(status.trailing_metadata()), + details=status.details(), + debug_error_string=status.debug_error_string(), + ) + + +class Call: + """Base implementation of client RPC Call object. + + Implements logic around final status, metadata and cancellation. + """ + + _loop: asyncio.AbstractEventLoop + _code: grpc.StatusCode + _cython_call: cygrpc._AioCall + _metadata: Tuple[MetadatumType, ...] + _request_serializer: SerializingFunction + _response_deserializer: DeserializingFunction + + def __init__( + self, + cython_call: cygrpc._AioCall, + metadata: Metadata, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._cython_call = cython_call + self._metadata = tuple(metadata) + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __del__(self) -> None: + # The '_cython_call' object might be destructed before Call object + if hasattr(self, "_cython_call"): + if not self._cython_call.done(): + self._cancel(_GC_CANCELLATION_DETAILS) + + def cancelled(self) -> bool: + return self._cython_call.cancelled() + + def _cancel(self, details: str) -> bool: + """Forwards the application cancellation reasoning.""" + if not self._cython_call.done(): + self._cython_call.cancel(details) + return True + else: + return False + + def cancel(self) -> bool: + return self._cancel(_LOCAL_CANCELLATION_DETAILS) + + def done(self) -> bool: + return self._cython_call.done() + + def add_done_callback(self, callback: DoneCallbackType) -> None: + cb = partial(callback, self) + self._cython_call.add_done_callback(cb) + + def time_remaining(self) -> Optional[float]: + return self._cython_call.time_remaining() + + async def initial_metadata(self) -> Metadata: + raw_metadata_tuple = await self._cython_call.initial_metadata() + return Metadata.from_tuple(raw_metadata_tuple) + + async def trailing_metadata(self) -> Metadata: + raw_metadata_tuple = ( + await self._cython_call.status() + ).trailing_metadata() + return Metadata.from_tuple(raw_metadata_tuple) + + async def code(self) -> grpc.StatusCode: + cygrpc_code = (await self._cython_call.status()).code() + return _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[cygrpc_code] + + async def details(self) -> str: + return (await self._cython_call.status()).details() + + async def debug_error_string(self) -> str: + return (await self._cython_call.status()).debug_error_string() + + async def _raise_for_status(self) -> None: + if self._cython_call.is_locally_cancelled(): + raise asyncio.CancelledError() + code = await self.code() + if code != grpc.StatusCode.OK: + raise _create_rpc_error( + await self.initial_metadata(), await self._cython_call.status() + ) + + def _repr(self) -> str: + return repr(self._cython_call) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + +class _APIStyle(enum.IntEnum): + UNKNOWN = 0 + ASYNC_GENERATOR = 1 + READER_WRITER = 2 + + +class _UnaryResponseMixin(Call, Generic[ResponseType]): + _call_response: asyncio.Task + + def _init_unary_response_mixin(self, response_task: asyncio.Task): + self._call_response = response_task + + def cancel(self) -> bool: + if super().cancel(): + self._call_response.cancel() + return True + else: + return False + + def __await__(self) -> Generator[Any, None, ResponseType]: + """Wait till the ongoing RPC request finishes.""" + try: + response = yield from self._call_response + except asyncio.CancelledError: + # Even if we caught all other CancelledError, there is still + # this corner case. If the application cancels immediately after + # the Call object is created, we will observe this + # `CancelledError`. + if not self.cancelled(): + self.cancel() + raise + + # NOTE(lidiz) If we raise RpcError in the task, and users doesn't + # 'await' on it. AsyncIO will log 'Task exception was never retrieved'. + # Instead, if we move the exception raising here, the spam stops. + # Unfortunately, there can only be one 'yield from' in '__await__'. So, + # we need to access the private instance variable. + if response is cygrpc.EOF: + if self._cython_call.is_locally_cancelled(): + raise asyncio.CancelledError() + else: + raise _create_rpc_error( + self._cython_call._initial_metadata, + self._cython_call._status, + ) + else: + return response + + +class _StreamResponseMixin(Call): + _message_aiter: AsyncIterator[ResponseType] + _preparation: asyncio.Task + _response_style: _APIStyle + + def _init_stream_response_mixin(self, preparation: asyncio.Task): + self._message_aiter = None + self._preparation = preparation + self._response_style = _APIStyle.UNKNOWN + + def _update_response_style(self, style: _APIStyle): + if self._response_style is _APIStyle.UNKNOWN: + self._response_style = style + elif self._response_style is not style: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + def cancel(self) -> bool: + if super().cancel(): + self._preparation.cancel() + return True + else: + return False + + async def _fetch_stream_responses(self) -> ResponseType: + message = await self._read() + while message is not cygrpc.EOF: + yield message + message = await self._read() + + # If the read operation failed, Core should explain why. + await self._raise_for_status() + + def __aiter__(self) -> AsyncIterator[ResponseType]: + self._update_response_style(_APIStyle.ASYNC_GENERATOR) + if self._message_aiter is None: + self._message_aiter = self._fetch_stream_responses() + return self._message_aiter + + async def _read(self) -> ResponseType: + # Wait for the request being sent + await self._preparation + + # Reads response message from Core + try: + raw_response = await self._cython_call.receive_serialized_message() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + if raw_response is cygrpc.EOF: + return cygrpc.EOF + else: + return _common.deserialize( + raw_response, self._response_deserializer + ) + + async def read(self) -> Union[EOFType, ResponseType]: + if self.done(): + await self._raise_for_status() + return cygrpc.EOF + self._update_response_style(_APIStyle.READER_WRITER) + + response_message = await self._read() + + if response_message is cygrpc.EOF: + # If the read operation failed, Core should explain why. + await self._raise_for_status() + return response_message + + +class _StreamRequestMixin(Call): + _metadata_sent: asyncio.Event + _done_writing_flag: bool + _async_request_poller: Optional[asyncio.Task] + _request_style: _APIStyle + + def _init_stream_request_mixin( + self, request_iterator: Optional[RequestIterableType] + ): + self._metadata_sent = asyncio.Event() + self._done_writing_flag = False + + # If user passes in an async iterator, create a consumer Task. + if request_iterator is not None: + self._async_request_poller = self._loop.create_task( + self._consume_request_iterator(request_iterator) + ) + self._request_style = _APIStyle.ASYNC_GENERATOR + else: + self._async_request_poller = None + self._request_style = _APIStyle.READER_WRITER + + def _raise_for_different_style(self, style: _APIStyle): + if self._request_style is not style: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + def cancel(self) -> bool: + if super().cancel(): + if self._async_request_poller is not None: + self._async_request_poller.cancel() + return True + else: + return False + + def _metadata_sent_observer(self): + self._metadata_sent.set() + + async def _consume_request_iterator( + self, request_iterator: RequestIterableType + ) -> None: + try: + if inspect.isasyncgen(request_iterator) or hasattr( + request_iterator, "__aiter__" + ): + async for request in request_iterator: + try: + await self._write(request) + except AioRpcError as rpc_error: + _LOGGER.debug( + ( + "Exception while consuming the" + " request_iterator: %s" + ), + rpc_error, + ) + return + else: + for request in request_iterator: + try: + await self._write(request) + except AioRpcError as rpc_error: + _LOGGER.debug( + ( + "Exception while consuming the" + " request_iterator: %s" + ), + rpc_error, + ) + return + + await self._done_writing() + except: # pylint: disable=bare-except + # Client iterators can raise exceptions, which we should handle by + # cancelling the RPC and logging the client's error. No exceptions + # should escape this function. + _LOGGER.debug( + "Client request_iterator raised exception:\n%s", + traceback.format_exc(), + ) + self.cancel() + + async def _write(self, request: RequestType) -> None: + if self.done(): + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + if self._done_writing_flag: + raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS) + if not self._metadata_sent.is_set(): + await self._metadata_sent.wait() + if self.done(): + await self._raise_for_status() + + serialized_request = _common.serialize( + request, self._request_serializer + ) + try: + await self._cython_call.send_serialized_message(serialized_request) + except cygrpc.InternalError as err: + self._cython_call.set_internal_error(str(err)) + await self._raise_for_status() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def _done_writing(self) -> None: + if self.done(): + # If the RPC is finished, do nothing. + return + if not self._done_writing_flag: + # If the done writing is not sent before, try to send it. + self._done_writing_flag = True + try: + await self._cython_call.send_receive_close() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def write(self, request: RequestType) -> None: + self._raise_for_different_style(_APIStyle.READER_WRITER) + await self._write(request) + + async def done_writing(self) -> None: + """Signal peer that client is done writing. + + This method is idempotent. + """ + self._raise_for_different_style(_APIStyle.READER_WRITER) + await self._done_writing() + + async def wait_for_connection(self) -> None: + await self._metadata_sent.wait() + if self.done(): + await self._raise_for_status() + + +class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): + """Object for managing unary-unary RPC calls. + + Returned when an instance of `UnaryUnaryMultiCallable` object is called. + """ + + _request: RequestType + _invocation_task: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request: RequestType, + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._request = request + self._context = cygrpc.build_census_context() + self._invocation_task = loop.create_task(self._invoke()) + self._init_unary_response_mixin(self._invocation_task) + + async def _invoke(self) -> ResponseType: + serialized_request = _common.serialize( + self._request, self._request_serializer + ) + + # NOTE(lidiz) asyncio.CancelledError is not a good transport for status, + # because the asyncio.Task class do not cache the exception object. + # https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785 + try: + serialized_response = await self._cython_call.unary_unary( + serialized_request, self._metadata, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + + if self._cython_call.is_ok(): + return _common.deserialize( + serialized_response, self._response_deserializer + ) + else: + return cygrpc.EOF + + async def wait_for_connection(self) -> None: + await self._invocation_task + if self.done(): + await self._raise_for_status() + + +class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall): + """Object for managing unary-stream RPC calls. + + Returned when an instance of `UnaryStreamMultiCallable` object is called. + """ + + _request: RequestType + _send_unary_request_task: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request: RequestType, + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._request = request + self._context = cygrpc.build_census_context() + self._send_unary_request_task = loop.create_task( + self._send_unary_request() + ) + self._init_stream_response_mixin(self._send_unary_request_task) + + async def _send_unary_request(self) -> ResponseType: + serialized_request = _common.serialize( + self._request, self._request_serializer + ) + try: + await self._cython_call.initiate_unary_stream( + serialized_request, self._metadata, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def wait_for_connection(self) -> None: + await self._send_unary_request_task + if self.done(): + await self._raise_for_status() + + +# pylint: disable=too-many-ancestors +class StreamUnaryCall( + _StreamRequestMixin, _UnaryResponseMixin, Call, _base_call.StreamUnaryCall +): + """Object for managing stream-unary RPC calls. + + Returned when an instance of `StreamUnaryMultiCallable` object is called. + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + request_iterator: Optional[RequestIterableType], + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + + self._context = cygrpc.build_census_context() + self._init_stream_request_mixin(request_iterator) + self._init_unary_response_mixin(loop.create_task(self._conduct_rpc())) + + async def _conduct_rpc(self) -> ResponseType: + try: + serialized_response = await self._cython_call.stream_unary( + self._metadata, self._metadata_sent_observer, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + if self._cython_call.is_ok(): + return _common.deserialize( + serialized_response, self._response_deserializer + ) + else: + return cygrpc.EOF + + +class StreamStreamCall( + _StreamRequestMixin, _StreamResponseMixin, Call, _base_call.StreamStreamCall +): + """Object for managing stream-stream RPC calls. + + Returned when an instance of `StreamStreamMultiCallable` object is called. + """ + + _initializer: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request_iterator: Optional[RequestIterableType], + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._context = cygrpc.build_census_context() + self._initializer = self._loop.create_task(self._prepare_rpc()) + self._init_stream_request_mixin(request_iterator) + self._init_stream_response_mixin(self._initializer) + + async def _prepare_rpc(self): + """This method prepares the RPC for receiving/sending messages. + + All other operations around the stream should only happen after the + completion of this method. + """ + try: + await self._cython_call.initiate_stream_stream( + self._metadata, self._metadata_sent_observer, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + # No need to raise RpcError here, because no one will `await` this task. diff --git a/MLPY/Lib/site-packages/grpc/aio/_channel.py b/MLPY/Lib/site-packages/grpc/aio/_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..73efeb546ad1b8813d0ae0b4c9e85d0596a28018 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_channel.py @@ -0,0 +1,624 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Invocation-side implementation of gRPC Asyncio Python.""" + +import asyncio +import sys +from typing import Any, Iterable, List, Optional, Sequence + +import grpc +from grpc import _common +from grpc import _compression +from grpc import _grpcio_metadata +from grpc._cython import cygrpc + +from . import _base_call +from . import _base_channel +from ._call import StreamStreamCall +from ._call import StreamUnaryCall +from ._call import UnaryStreamCall +from ._call import UnaryUnaryCall +from ._interceptor import ClientInterceptor +from ._interceptor import InterceptedStreamStreamCall +from ._interceptor import InterceptedStreamUnaryCall +from ._interceptor import InterceptedUnaryStreamCall +from ._interceptor import InterceptedUnaryUnaryCall +from ._interceptor import StreamStreamClientInterceptor +from ._interceptor import StreamUnaryClientInterceptor +from ._interceptor import UnaryStreamClientInterceptor +from ._interceptor import UnaryUnaryClientInterceptor +from ._metadata import Metadata +from ._typing import ChannelArgumentType +from ._typing import DeserializingFunction +from ._typing import MetadataType +from ._typing import RequestIterableType +from ._typing import RequestType +from ._typing import ResponseType +from ._typing import SerializingFunction +from ._utils import _timeout_to_deadline + +_USER_AGENT = "grpc-python-asyncio/{}".format(_grpcio_metadata.__version__) + +if sys.version_info[1] < 7: + + def _all_tasks() -> Iterable[asyncio.Task]: + return asyncio.Task.all_tasks() # pylint: disable=no-member + +else: + + def _all_tasks() -> Iterable[asyncio.Task]: + return asyncio.all_tasks() + + +def _augment_channel_arguments( + base_options: ChannelArgumentType, compression: Optional[grpc.Compression] +): + compression_channel_argument = _compression.create_channel_option( + compression + ) + user_agent_channel_argument = ( + ( + cygrpc.ChannelArgKey.primary_user_agent_string, + _USER_AGENT, + ), + ) + return ( + tuple(base_options) + + compression_channel_argument + + user_agent_channel_argument + ) + + +class _BaseMultiCallable: + """Base class of all multi callable objects. + + Handles the initialization logic and stores common attributes. + """ + + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + _method: bytes + _request_serializer: SerializingFunction + _response_deserializer: DeserializingFunction + _interceptors: Optional[Sequence[ClientInterceptor]] + _references: List[Any] + _loop: asyncio.AbstractEventLoop + + # pylint: disable=too-many-arguments + def __init__( + self, + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + interceptors: Optional[Sequence[ClientInterceptor]], + references: List[Any], + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._channel = channel + self._method = method + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + self._interceptors = interceptors + self._references = references + + @staticmethod + def _init_metadata( + metadata: Optional[MetadataType] = None, + compression: Optional[grpc.Compression] = None, + ) -> Metadata: + """Based on the provided values for or initialise the final + metadata, as it should be used for the current call. + """ + metadata = metadata or Metadata() + if not isinstance(metadata, Metadata) and isinstance(metadata, tuple): + metadata = Metadata.from_tuple(metadata) + if compression: + metadata = Metadata( + *_compression.augment_metadata(metadata, compression) + ) + return metadata + + +class UnaryUnaryMultiCallable( + _BaseMultiCallable, _base_channel.UnaryUnaryMultiCallable +): + def __call__( + self, + request: RequestType, + *, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.UnaryUnaryCall[RequestType, ResponseType]: + metadata = self._init_metadata(metadata, compression) + if not self._interceptors: + call = UnaryUnaryCall( + request, + _timeout_to_deadline(timeout), + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + else: + call = InterceptedUnaryUnaryCall( + self._interceptors, + request, + timeout, + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + + return call + + +class UnaryStreamMultiCallable( + _BaseMultiCallable, _base_channel.UnaryStreamMultiCallable +): + def __call__( + self, + request: RequestType, + *, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.UnaryStreamCall[RequestType, ResponseType]: + metadata = self._init_metadata(metadata, compression) + + if not self._interceptors: + call = UnaryStreamCall( + request, + _timeout_to_deadline(timeout), + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + else: + call = InterceptedUnaryStreamCall( + self._interceptors, + request, + timeout, + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + + return call + + +class StreamUnaryMultiCallable( + _BaseMultiCallable, _base_channel.StreamUnaryMultiCallable +): + def __call__( + self, + request_iterator: Optional[RequestIterableType] = None, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.StreamUnaryCall: + metadata = self._init_metadata(metadata, compression) + + if not self._interceptors: + call = StreamUnaryCall( + request_iterator, + _timeout_to_deadline(timeout), + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + else: + call = InterceptedStreamUnaryCall( + self._interceptors, + request_iterator, + timeout, + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + + return call + + +class StreamStreamMultiCallable( + _BaseMultiCallable, _base_channel.StreamStreamMultiCallable +): + def __call__( + self, + request_iterator: Optional[RequestIterableType] = None, + timeout: Optional[float] = None, + metadata: Optional[MetadataType] = None, + credentials: Optional[grpc.CallCredentials] = None, + wait_for_ready: Optional[bool] = None, + compression: Optional[grpc.Compression] = None, + ) -> _base_call.StreamStreamCall: + metadata = self._init_metadata(metadata, compression) + + if not self._interceptors: + call = StreamStreamCall( + request_iterator, + _timeout_to_deadline(timeout), + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + else: + call = InterceptedStreamStreamCall( + self._interceptors, + request_iterator, + timeout, + metadata, + credentials, + wait_for_ready, + self._channel, + self._method, + self._request_serializer, + self._response_deserializer, + self._loop, + ) + + return call + + +class Channel(_base_channel.Channel): + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + _unary_unary_interceptors: List[UnaryUnaryClientInterceptor] + _unary_stream_interceptors: List[UnaryStreamClientInterceptor] + _stream_unary_interceptors: List[StreamUnaryClientInterceptor] + _stream_stream_interceptors: List[StreamStreamClientInterceptor] + + def __init__( + self, + target: str, + options: ChannelArgumentType, + credentials: Optional[grpc.ChannelCredentials], + compression: Optional[grpc.Compression], + interceptors: Optional[Sequence[ClientInterceptor]], + ): + """Constructor. + + Args: + target: The target to which to connect. + options: Configuration options for the channel. + credentials: A cygrpc.ChannelCredentials or None. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + interceptors: An optional list of interceptors that would be used for + intercepting any RPC executed with that channel. + """ + self._unary_unary_interceptors = [] + self._unary_stream_interceptors = [] + self._stream_unary_interceptors = [] + self._stream_stream_interceptors = [] + + if interceptors is not None: + for interceptor in interceptors: + if isinstance(interceptor, UnaryUnaryClientInterceptor): + self._unary_unary_interceptors.append(interceptor) + elif isinstance(interceptor, UnaryStreamClientInterceptor): + self._unary_stream_interceptors.append(interceptor) + elif isinstance(interceptor, StreamUnaryClientInterceptor): + self._stream_unary_interceptors.append(interceptor) + elif isinstance(interceptor, StreamStreamClientInterceptor): + self._stream_stream_interceptors.append(interceptor) + else: + raise ValueError( + "Interceptor {} must be ".format(interceptor) + + "{} or ".format(UnaryUnaryClientInterceptor.__name__) + + "{} or ".format(UnaryStreamClientInterceptor.__name__) + + "{} or ".format(StreamUnaryClientInterceptor.__name__) + + "{}. ".format(StreamStreamClientInterceptor.__name__) + ) + + self._loop = cygrpc.get_working_loop() + self._channel = cygrpc.AioChannel( + _common.encode(target), + _augment_channel_arguments(options, compression), + credentials, + self._loop, + ) + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self._close(None) + + async def _close(self, grace): # pylint: disable=too-many-branches + if self._channel.closed(): + return + + # No new calls will be accepted by the Cython channel. + self._channel.closing() + + # Iterate through running tasks + tasks = _all_tasks() + calls = [] + call_tasks = [] + for task in tasks: + try: + stack = task.get_stack(limit=1) + except AttributeError as attribute_error: + # NOTE(lidiz) tl;dr: If the Task is created with a CPython + # object, it will trigger AttributeError. + # + # In the global finalizer, the event loop schedules + # a CPython PyAsyncGenAThrow object. + # https://github.com/python/cpython/blob/00e45877e33d32bb61aa13a2033e3bba370bda4d/Lib/asyncio/base_events.py#L484 + # + # However, the PyAsyncGenAThrow object is written in C and + # failed to include the normal Python frame objects. Hence, + # this exception is a false negative, and it is safe to ignore + # the failure. It is fixed by https://github.com/python/cpython/pull/18669, + # but not available until 3.9 or 3.8.3. So, we have to keep it + # for a while. + # TODO(lidiz) drop this hack after 3.8 deprecation + if "frame" in str(attribute_error): + continue + else: + raise + + # If the Task is created by a C-extension, the stack will be empty. + if not stack: + continue + + # Locate ones created by `aio.Call`. + frame = stack[0] + candidate = frame.f_locals.get("self") + if candidate: + if isinstance(candidate, _base_call.Call): + if hasattr(candidate, "_channel"): + # For intercepted Call object + if candidate._channel is not self._channel: + continue + elif hasattr(candidate, "_cython_call"): + # For normal Call object + if candidate._cython_call._channel is not self._channel: + continue + else: + # Unidentified Call object + raise cygrpc.InternalError( + f"Unrecognized call object: {candidate}" + ) + + calls.append(candidate) + call_tasks.append(task) + + # If needed, try to wait for them to finish. + # Call objects are not always awaitables. + if grace and call_tasks: + await asyncio.wait(call_tasks, timeout=grace) + + # Time to cancel existing calls. + for call in calls: + call.cancel() + + # Destroy the channel + self._channel.close() + + async def close(self, grace: Optional[float] = None): + await self._close(grace) + + def __del__(self): + if hasattr(self, "_channel"): + if not self._channel.closed(): + self._channel.close() + + def get_state( + self, try_to_connect: bool = False + ) -> grpc.ChannelConnectivity: + result = self._channel.check_connectivity_state(try_to_connect) + return _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[result] + + async def wait_for_state_change( + self, + last_observed_state: grpc.ChannelConnectivity, + ) -> None: + assert await self._channel.watch_connectivity_state( + last_observed_state.value[0], None + ) + + async def channel_ready(self) -> None: + state = self.get_state(try_to_connect=True) + while state != grpc.ChannelConnectivity.READY: + await self.wait_for_state_change(state) + state = self.get_state(try_to_connect=True) + + # TODO(xuanwn): Implement this method after we have + # observability for Asyncio. + def _get_registered_call_handle(self, method: str) -> int: + pass + + # TODO(xuanwn): Implement _registered_method after we have + # observability for Asyncio. + # pylint: disable=arguments-differ,unused-argument + def unary_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> UnaryUnaryMultiCallable: + return UnaryUnaryMultiCallable( + self._channel, + _common.encode(method), + request_serializer, + response_deserializer, + self._unary_unary_interceptors, + [self], + self._loop, + ) + + # TODO(xuanwn): Implement _registered_method after we have + # observability for Asyncio. + # pylint: disable=arguments-differ,unused-argument + def unary_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> UnaryStreamMultiCallable: + return UnaryStreamMultiCallable( + self._channel, + _common.encode(method), + request_serializer, + response_deserializer, + self._unary_stream_interceptors, + [self], + self._loop, + ) + + # TODO(xuanwn): Implement _registered_method after we have + # observability for Asyncio. + # pylint: disable=arguments-differ,unused-argument + def stream_unary( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> StreamUnaryMultiCallable: + return StreamUnaryMultiCallable( + self._channel, + _common.encode(method), + request_serializer, + response_deserializer, + self._stream_unary_interceptors, + [self], + self._loop, + ) + + # TODO(xuanwn): Implement _registered_method after we have + # observability for Asyncio. + # pylint: disable=arguments-differ,unused-argument + def stream_stream( + self, + method: str, + request_serializer: Optional[SerializingFunction] = None, + response_deserializer: Optional[DeserializingFunction] = None, + _registered_method: Optional[bool] = False, + ) -> StreamStreamMultiCallable: + return StreamStreamMultiCallable( + self._channel, + _common.encode(method), + request_serializer, + response_deserializer, + self._stream_stream_interceptors, + [self], + self._loop, + ) + + +def insecure_channel( + target: str, + options: Optional[ChannelArgumentType] = None, + compression: Optional[grpc.Compression] = None, + interceptors: Optional[Sequence[ClientInterceptor]] = None, +): + """Creates an insecure asynchronous Channel to a server. + + Args: + target: The server address + options: An optional list of key-value pairs (:term:`channel_arguments` + in gRPC Core runtime) to configure the channel. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + interceptors: An optional sequence of interceptors that will be executed for + any call executed with this channel. + + Returns: + A Channel. + """ + return Channel( + target, + () if options is None else options, + None, + compression, + interceptors, + ) + + +def secure_channel( + target: str, + credentials: grpc.ChannelCredentials, + options: Optional[ChannelArgumentType] = None, + compression: Optional[grpc.Compression] = None, + interceptors: Optional[Sequence[ClientInterceptor]] = None, +): + """Creates a secure asynchronous Channel to a server. + + Args: + target: The server address. + credentials: A ChannelCredentials instance. + options: An optional list of key-value pairs (:term:`channel_arguments` + in gRPC Core runtime) to configure the channel. + compression: An optional value indicating the compression method to be + used over the lifetime of the channel. + interceptors: An optional sequence of interceptors that will be executed for + any call executed with this channel. + + Returns: + An aio.Channel. + """ + return Channel( + target, + () if options is None else options, + credentials._credentials, + compression, + interceptors, + ) diff --git a/MLPY/Lib/site-packages/grpc/aio/_interceptor.py b/MLPY/Lib/site-packages/grpc/aio/_interceptor.py new file mode 100644 index 0000000000000000000000000000000000000000..fd6765d9fc1245b32f62672579bfbb1e6ae85fa6 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_interceptor.py @@ -0,0 +1,1178 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Interceptors implementation of gRPC Asyncio Python.""" +from abc import ABCMeta +from abc import abstractmethod +import asyncio +import collections +import functools +from typing import ( + AsyncIterable, + Awaitable, + Callable, + Iterator, + List, + Optional, + Sequence, + Union, +) + +import grpc +from grpc._cython import cygrpc + +from . import _base_call +from ._call import AioRpcError +from ._call import StreamStreamCall +from ._call import StreamUnaryCall +from ._call import UnaryStreamCall +from ._call import UnaryUnaryCall +from ._call import _API_STYLE_ERROR +from ._call import _RPC_ALREADY_FINISHED_DETAILS +from ._call import _RPC_HALF_CLOSED_DETAILS +from ._metadata import Metadata +from ._typing import DeserializingFunction +from ._typing import DoneCallbackType +from ._typing import EOFType +from ._typing import RequestIterableType +from ._typing import RequestType +from ._typing import ResponseIterableType +from ._typing import ResponseType +from ._typing import SerializingFunction +from ._utils import _timeout_to_deadline + +_LOCAL_CANCELLATION_DETAILS = "Locally cancelled by application!" + + +class ServerInterceptor(metaclass=ABCMeta): + """Affords intercepting incoming RPCs on the service-side. + + This is an EXPERIMENTAL API. + """ + + @abstractmethod + async def intercept_service( + self, + continuation: Callable[ + [grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler] + ], + handler_call_details: grpc.HandlerCallDetails, + ) -> grpc.RpcMethodHandler: + """Intercepts incoming RPCs before handing them over to a handler. + + State can be passed from an interceptor to downstream interceptors + via contextvars. The first interceptor is called from an empty + contextvars.Context, and the same Context is used for downstream + interceptors and for the final handler call. Note that there are no + guarantees that interceptors and handlers will be called from the + same thread. + + Args: + continuation: A function that takes a HandlerCallDetails and + proceeds to invoke the next interceptor in the chain, if any, + or the RPC handler lookup logic, with the call details passed + as an argument, and returns an RpcMethodHandler instance if + the RPC is considered serviced, or None otherwise. + handler_call_details: A HandlerCallDetails describing the RPC. + + Returns: + An RpcMethodHandler with which the RPC may be serviced if the + interceptor chooses to service this RPC, or None otherwise. + """ + + +class ClientCallDetails( + collections.namedtuple( + "ClientCallDetails", + ("method", "timeout", "metadata", "credentials", "wait_for_ready"), + ), + grpc.ClientCallDetails, +): + """Describes an RPC to be invoked. + + This is an EXPERIMENTAL API. + + Args: + method: The method name of the RPC. + timeout: An optional duration of time in seconds to allow for the RPC. + metadata: Optional metadata to be transmitted to the service-side of + the RPC. + credentials: An optional CallCredentials for the RPC. + wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism. + """ + + method: str + timeout: Optional[float] + metadata: Optional[Metadata] + credentials: Optional[grpc.CallCredentials] + wait_for_ready: Optional[bool] + + +class ClientInterceptor(metaclass=ABCMeta): + """Base class used for all Aio Client Interceptor classes""" + + +class UnaryUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta): + """Affords intercepting unary-unary invocations.""" + + @abstractmethod + async def intercept_unary_unary( + self, + continuation: Callable[ + [ClientCallDetails, RequestType], UnaryUnaryCall + ], + client_call_details: ClientCallDetails, + request: RequestType, + ) -> Union[UnaryUnaryCall, ResponseType]: + """Intercepts a unary-unary invocation asynchronously. + + Args: + continuation: A coroutine that proceeds with the invocation by + executing the next interceptor in the chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `call = await continuation(client_call_details, request)` + to continue with the RPC. `continuation` returns the call to the + RPC. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request: The request value for the RPC. + + Returns: + An object with the RPC response. + + Raises: + AioRpcError: Indicating that the RPC terminated with non-OK status. + asyncio.CancelledError: Indicating that the RPC was canceled. + """ + + +class UnaryStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta): + """Affords intercepting unary-stream invocations.""" + + @abstractmethod + async def intercept_unary_stream( + self, + continuation: Callable[ + [ClientCallDetails, RequestType], UnaryStreamCall + ], + client_call_details: ClientCallDetails, + request: RequestType, + ) -> Union[ResponseIterableType, UnaryStreamCall]: + """Intercepts a unary-stream invocation asynchronously. + + The function could return the call object or an asynchronous + iterator, in case of being an asyncrhonous iterator this will + become the source of the reads done by the caller. + + Args: + continuation: A coroutine that proceeds with the invocation by + executing the next interceptor in the chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `call = await continuation(client_call_details, request)` + to continue with the RPC. `continuation` returns the call to the + RPC. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request: The request value for the RPC. + + Returns: + The RPC Call or an asynchronous iterator. + + Raises: + AioRpcError: Indicating that the RPC terminated with non-OK status. + asyncio.CancelledError: Indicating that the RPC was canceled. + """ + + +class StreamUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta): + """Affords intercepting stream-unary invocations.""" + + @abstractmethod + async def intercept_stream_unary( + self, + continuation: Callable[ + [ClientCallDetails, RequestType], StreamUnaryCall + ], + client_call_details: ClientCallDetails, + request_iterator: RequestIterableType, + ) -> StreamUnaryCall: + """Intercepts a stream-unary invocation asynchronously. + + Within the interceptor the usage of the call methods like `write` or + even awaiting the call should be done carefully, since the caller + could be expecting an untouched call, for example for start writing + messages to it. + + Args: + continuation: A coroutine that proceeds with the invocation by + executing the next interceptor in the chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `call = await continuation(client_call_details, request_iterator)` + to continue with the RPC. `continuation` returns the call to the + RPC. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request_iterator: The request iterator that will produce requests + for the RPC. + + Returns: + The RPC Call. + + Raises: + AioRpcError: Indicating that the RPC terminated with non-OK status. + asyncio.CancelledError: Indicating that the RPC was canceled. + """ + + +class StreamStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta): + """Affords intercepting stream-stream invocations.""" + + @abstractmethod + async def intercept_stream_stream( + self, + continuation: Callable[ + [ClientCallDetails, RequestType], StreamStreamCall + ], + client_call_details: ClientCallDetails, + request_iterator: RequestIterableType, + ) -> Union[ResponseIterableType, StreamStreamCall]: + """Intercepts a stream-stream invocation asynchronously. + + Within the interceptor the usage of the call methods like `write` or + even awaiting the call should be done carefully, since the caller + could be expecting an untouched call, for example for start writing + messages to it. + + The function could return the call object or an asynchronous + iterator, in case of being an asyncrhonous iterator this will + become the source of the reads done by the caller. + + Args: + continuation: A coroutine that proceeds with the invocation by + executing the next interceptor in the chain or invoking the + actual RPC on the underlying Channel. It is the interceptor's + responsibility to call it if it decides to move the RPC forward. + The interceptor can use + `call = await continuation(client_call_details, request_iterator)` + to continue with the RPC. `continuation` returns the call to the + RPC. + client_call_details: A ClientCallDetails object describing the + outgoing RPC. + request_iterator: The request iterator that will produce requests + for the RPC. + + Returns: + The RPC Call or an asynchronous iterator. + + Raises: + AioRpcError: Indicating that the RPC terminated with non-OK status. + asyncio.CancelledError: Indicating that the RPC was canceled. + """ + + +class InterceptedCall: + """Base implementation for all intercepted call arities. + + Interceptors might have some work to do before the RPC invocation with + the capacity of changing the invocation parameters, and some work to do + after the RPC invocation with the capacity for accessing to the wrapped + `UnaryUnaryCall`. + + It handles also early and later cancellations, when the RPC has not even + started and the execution is still held by the interceptors or when the + RPC has finished but again the execution is still held by the interceptors. + + Once the RPC is finally executed, all methods are finally done against the + intercepted call, being at the same time the same call returned to the + interceptors. + + As a base class for all of the interceptors implements the logic around + final status, metadata and cancellation. + """ + + _interceptors_task: asyncio.Task + _pending_add_done_callbacks: Sequence[DoneCallbackType] + + def __init__(self, interceptors_task: asyncio.Task) -> None: + self._interceptors_task = interceptors_task + self._pending_add_done_callbacks = [] + self._interceptors_task.add_done_callback( + self._fire_or_add_pending_done_callbacks + ) + + def __del__(self): + self.cancel() + + def _fire_or_add_pending_done_callbacks( + self, interceptors_task: asyncio.Task + ) -> None: + if not self._pending_add_done_callbacks: + return + + call_completed = False + + try: + call = interceptors_task.result() + if call.done(): + call_completed = True + except (AioRpcError, asyncio.CancelledError): + call_completed = True + + if call_completed: + for callback in self._pending_add_done_callbacks: + callback(self) + else: + for callback in self._pending_add_done_callbacks: + callback = functools.partial( + self._wrap_add_done_callback, callback + ) + call.add_done_callback(callback) + + self._pending_add_done_callbacks = [] + + def _wrap_add_done_callback( + self, callback: DoneCallbackType, unused_call: _base_call.Call + ) -> None: + callback(self) + + def cancel(self) -> bool: + if not self._interceptors_task.done(): + # There is no yet the intercepted call available, + # Trying to cancel it by using the generic Asyncio + # cancellation method. + return self._interceptors_task.cancel() + + try: + call = self._interceptors_task.result() + except AioRpcError: + return False + except asyncio.CancelledError: + return False + + return call.cancel() + + def cancelled(self) -> bool: + if not self._interceptors_task.done(): + return False + + try: + call = self._interceptors_task.result() + except AioRpcError as err: + return err.code() == grpc.StatusCode.CANCELLED + except asyncio.CancelledError: + return True + + return call.cancelled() + + def done(self) -> bool: + if not self._interceptors_task.done(): + return False + + try: + call = self._interceptors_task.result() + except (AioRpcError, asyncio.CancelledError): + return True + + return call.done() + + def add_done_callback(self, callback: DoneCallbackType) -> None: + if not self._interceptors_task.done(): + self._pending_add_done_callbacks.append(callback) + return + + try: + call = self._interceptors_task.result() + except (AioRpcError, asyncio.CancelledError): + callback(self) + return + + if call.done(): + callback(self) + else: + callback = functools.partial(self._wrap_add_done_callback, callback) + call.add_done_callback(callback) + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + async def initial_metadata(self) -> Optional[Metadata]: + try: + call = await self._interceptors_task + except AioRpcError as err: + return err.initial_metadata() + except asyncio.CancelledError: + return None + + return await call.initial_metadata() + + async def trailing_metadata(self) -> Optional[Metadata]: + try: + call = await self._interceptors_task + except AioRpcError as err: + return err.trailing_metadata() + except asyncio.CancelledError: + return None + + return await call.trailing_metadata() + + async def code(self) -> grpc.StatusCode: + try: + call = await self._interceptors_task + except AioRpcError as err: + return err.code() + except asyncio.CancelledError: + return grpc.StatusCode.CANCELLED + + return await call.code() + + async def details(self) -> str: + try: + call = await self._interceptors_task + except AioRpcError as err: + return err.details() + except asyncio.CancelledError: + return _LOCAL_CANCELLATION_DETAILS + + return await call.details() + + async def debug_error_string(self) -> Optional[str]: + try: + call = await self._interceptors_task + except AioRpcError as err: + return err.debug_error_string() + except asyncio.CancelledError: + return "" + + return await call.debug_error_string() + + async def wait_for_connection(self) -> None: + call = await self._interceptors_task + return await call.wait_for_connection() + + +class _InterceptedUnaryResponseMixin: + def __await__(self): + call = yield from self._interceptors_task.__await__() + response = yield from call.__await__() + return response + + +class _InterceptedStreamResponseMixin: + _response_aiter: Optional[AsyncIterable[ResponseType]] + + def _init_stream_response_mixin(self) -> None: + # Is initalized later, otherwise if the iterator is not finally + # consumed a logging warning is emmited by Asyncio. + self._response_aiter = None + + async def _wait_for_interceptor_task_response_iterator( + self, + ) -> ResponseType: + call = await self._interceptors_task + async for response in call: + yield response + + def __aiter__(self) -> AsyncIterable[ResponseType]: + if self._response_aiter is None: + self._response_aiter = ( + self._wait_for_interceptor_task_response_iterator() + ) + return self._response_aiter + + async def read(self) -> Union[EOFType, ResponseType]: + if self._response_aiter is None: + self._response_aiter = ( + self._wait_for_interceptor_task_response_iterator() + ) + try: + return await self._response_aiter.asend(None) + except StopAsyncIteration: + return cygrpc.EOF + + +class _InterceptedStreamRequestMixin: + _write_to_iterator_async_gen: Optional[AsyncIterable[RequestType]] + _write_to_iterator_queue: Optional[asyncio.Queue] + _status_code_task: Optional[asyncio.Task] + + _FINISH_ITERATOR_SENTINEL = object() + + def _init_stream_request_mixin( + self, request_iterator: Optional[RequestIterableType] + ) -> RequestIterableType: + if request_iterator is None: + # We provide our own request iterator which is a proxy + # of the futures writes that will be done by the caller. + self._write_to_iterator_queue = asyncio.Queue(maxsize=1) + self._write_to_iterator_async_gen = ( + self._proxy_writes_as_request_iterator() + ) + self._status_code_task = None + request_iterator = self._write_to_iterator_async_gen + else: + self._write_to_iterator_queue = None + + return request_iterator + + async def _proxy_writes_as_request_iterator(self): + await self._interceptors_task + + while True: + value = await self._write_to_iterator_queue.get() + if ( + value + is _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL + ): + break + yield value + + async def _write_to_iterator_queue_interruptible( + self, request: RequestType, call: InterceptedCall + ): + # Write the specified 'request' to the request iterator queue using the + # specified 'call' to allow for interruption of the write in the case + # of abrupt termination of the call. + if self._status_code_task is None: + self._status_code_task = self._loop.create_task(call.code()) + + await asyncio.wait( + ( + self._loop.create_task( + self._write_to_iterator_queue.put(request) + ), + self._status_code_task, + ), + return_when=asyncio.FIRST_COMPLETED, + ) + + async def write(self, request: RequestType) -> None: + # If no queue was created it means that requests + # should be expected through an iterators provided + # by the caller. + if self._write_to_iterator_queue is None: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + try: + call = await self._interceptors_task + except (asyncio.CancelledError, AioRpcError): + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + + if call.done(): + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + elif call._done_writing_flag: + raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS) + + await self._write_to_iterator_queue_interruptible(request, call) + + if call.done(): + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + + async def done_writing(self) -> None: + """Signal peer that client is done writing. + + This method is idempotent. + """ + # If no queue was created it means that requests + # should be expected through an iterators provided + # by the caller. + if self._write_to_iterator_queue is None: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + try: + call = await self._interceptors_task + except asyncio.CancelledError: + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + + await self._write_to_iterator_queue_interruptible( + _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL, call + ) + + +class InterceptedUnaryUnaryCall( + _InterceptedUnaryResponseMixin, InterceptedCall, _base_call.UnaryUnaryCall +): + """Used for running a `UnaryUnaryCall` wrapped by interceptors. + + For the `__await__` method is it is proxied to the intercepted call only when + the interceptor task is finished. + """ + + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + + # pylint: disable=too-many-arguments + def __init__( + self, + interceptors: Sequence[UnaryUnaryClientInterceptor], + request: RequestType, + timeout: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._channel = channel + interceptors_task = loop.create_task( + self._invoke( + interceptors, + method, + timeout, + metadata, + credentials, + wait_for_ready, + request, + request_serializer, + response_deserializer, + ) + ) + super().__init__(interceptors_task) + + # pylint: disable=too-many-arguments + async def _invoke( + self, + interceptors: Sequence[UnaryUnaryClientInterceptor], + method: bytes, + timeout: Optional[float], + metadata: Optional[Metadata], + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + request: RequestType, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + ) -> UnaryUnaryCall: + """Run the RPC call wrapped in interceptors""" + + async def _run_interceptor( + interceptors: List[UnaryUnaryClientInterceptor], + client_call_details: ClientCallDetails, + request: RequestType, + ) -> _base_call.UnaryUnaryCall: + if interceptors: + continuation = functools.partial( + _run_interceptor, interceptors[1:] + ) + call_or_response = await interceptors[0].intercept_unary_unary( + continuation, client_call_details, request + ) + + if isinstance(call_or_response, _base_call.UnaryUnaryCall): + return call_or_response + else: + return UnaryUnaryCallResponse(call_or_response) + + else: + return UnaryUnaryCall( + request, + _timeout_to_deadline(client_call_details.timeout), + client_call_details.metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + self._channel, + client_call_details.method, + request_serializer, + response_deserializer, + self._loop, + ) + + client_call_details = ClientCallDetails( + method, timeout, metadata, credentials, wait_for_ready + ) + return await _run_interceptor( + list(interceptors), client_call_details, request + ) + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + +class InterceptedUnaryStreamCall( + _InterceptedStreamResponseMixin, InterceptedCall, _base_call.UnaryStreamCall +): + """Used for running a `UnaryStreamCall` wrapped by interceptors.""" + + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + _last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall] + + # pylint: disable=too-many-arguments + def __init__( + self, + interceptors: Sequence[UnaryStreamClientInterceptor], + request: RequestType, + timeout: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._channel = channel + self._init_stream_response_mixin() + self._last_returned_call_from_interceptors = None + interceptors_task = loop.create_task( + self._invoke( + interceptors, + method, + timeout, + metadata, + credentials, + wait_for_ready, + request, + request_serializer, + response_deserializer, + ) + ) + super().__init__(interceptors_task) + + # pylint: disable=too-many-arguments + async def _invoke( + self, + interceptors: Sequence[UnaryStreamClientInterceptor], + method: bytes, + timeout: Optional[float], + metadata: Optional[Metadata], + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + request: RequestType, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + ) -> UnaryStreamCall: + """Run the RPC call wrapped in interceptors""" + + async def _run_interceptor( + interceptors: List[UnaryStreamClientInterceptor], + client_call_details: ClientCallDetails, + request: RequestType, + ) -> _base_call.UnaryStreamCall: + if interceptors: + continuation = functools.partial( + _run_interceptor, interceptors[1:] + ) + + call_or_response_iterator = await interceptors[ + 0 + ].intercept_unary_stream( + continuation, client_call_details, request + ) + + if isinstance( + call_or_response_iterator, _base_call.UnaryStreamCall + ): + self._last_returned_call_from_interceptors = ( + call_or_response_iterator + ) + else: + self._last_returned_call_from_interceptors = ( + UnaryStreamCallResponseIterator( + self._last_returned_call_from_interceptors, + call_or_response_iterator, + ) + ) + return self._last_returned_call_from_interceptors + else: + self._last_returned_call_from_interceptors = UnaryStreamCall( + request, + _timeout_to_deadline(client_call_details.timeout), + client_call_details.metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + self._channel, + client_call_details.method, + request_serializer, + response_deserializer, + self._loop, + ) + + return self._last_returned_call_from_interceptors + + client_call_details = ClientCallDetails( + method, timeout, metadata, credentials, wait_for_ready + ) + return await _run_interceptor( + list(interceptors), client_call_details, request + ) + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + +class InterceptedStreamUnaryCall( + _InterceptedUnaryResponseMixin, + _InterceptedStreamRequestMixin, + InterceptedCall, + _base_call.StreamUnaryCall, +): + """Used for running a `StreamUnaryCall` wrapped by interceptors. + + For the `__await__` method is it is proxied to the intercepted call only when + the interceptor task is finished. + """ + + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + + # pylint: disable=too-many-arguments + def __init__( + self, + interceptors: Sequence[StreamUnaryClientInterceptor], + request_iterator: Optional[RequestIterableType], + timeout: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._channel = channel + request_iterator = self._init_stream_request_mixin(request_iterator) + interceptors_task = loop.create_task( + self._invoke( + interceptors, + method, + timeout, + metadata, + credentials, + wait_for_ready, + request_iterator, + request_serializer, + response_deserializer, + ) + ) + super().__init__(interceptors_task) + + # pylint: disable=too-many-arguments + async def _invoke( + self, + interceptors: Sequence[StreamUnaryClientInterceptor], + method: bytes, + timeout: Optional[float], + metadata: Optional[Metadata], + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + request_iterator: RequestIterableType, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + ) -> StreamUnaryCall: + """Run the RPC call wrapped in interceptors""" + + async def _run_interceptor( + interceptors: Iterator[StreamUnaryClientInterceptor], + client_call_details: ClientCallDetails, + request_iterator: RequestIterableType, + ) -> _base_call.StreamUnaryCall: + if interceptors: + continuation = functools.partial( + _run_interceptor, interceptors[1:] + ) + + return await interceptors[0].intercept_stream_unary( + continuation, client_call_details, request_iterator + ) + else: + return StreamUnaryCall( + request_iterator, + _timeout_to_deadline(client_call_details.timeout), + client_call_details.metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + self._channel, + client_call_details.method, + request_serializer, + response_deserializer, + self._loop, + ) + + client_call_details = ClientCallDetails( + method, timeout, metadata, credentials, wait_for_ready + ) + return await _run_interceptor( + list(interceptors), client_call_details, request_iterator + ) + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + +class InterceptedStreamStreamCall( + _InterceptedStreamResponseMixin, + _InterceptedStreamRequestMixin, + InterceptedCall, + _base_call.StreamStreamCall, +): + """Used for running a `StreamStreamCall` wrapped by interceptors.""" + + _loop: asyncio.AbstractEventLoop + _channel: cygrpc.AioChannel + _last_returned_call_from_interceptors = Optional[ + _base_call.StreamStreamCall + ] + + # pylint: disable=too-many-arguments + def __init__( + self, + interceptors: Sequence[StreamStreamClientInterceptor], + request_iterator: Optional[RequestIterableType], + timeout: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._channel = channel + self._init_stream_response_mixin() + request_iterator = self._init_stream_request_mixin(request_iterator) + self._last_returned_call_from_interceptors = None + interceptors_task = loop.create_task( + self._invoke( + interceptors, + method, + timeout, + metadata, + credentials, + wait_for_ready, + request_iterator, + request_serializer, + response_deserializer, + ) + ) + super().__init__(interceptors_task) + + # pylint: disable=too-many-arguments + async def _invoke( + self, + interceptors: Sequence[StreamStreamClientInterceptor], + method: bytes, + timeout: Optional[float], + metadata: Optional[Metadata], + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + request_iterator: RequestIterableType, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + ) -> StreamStreamCall: + """Run the RPC call wrapped in interceptors""" + + async def _run_interceptor( + interceptors: List[StreamStreamClientInterceptor], + client_call_details: ClientCallDetails, + request_iterator: RequestIterableType, + ) -> _base_call.StreamStreamCall: + if interceptors: + continuation = functools.partial( + _run_interceptor, interceptors[1:] + ) + + call_or_response_iterator = await interceptors[ + 0 + ].intercept_stream_stream( + continuation, client_call_details, request_iterator + ) + + if isinstance( + call_or_response_iterator, _base_call.StreamStreamCall + ): + self._last_returned_call_from_interceptors = ( + call_or_response_iterator + ) + else: + self._last_returned_call_from_interceptors = ( + StreamStreamCallResponseIterator( + self._last_returned_call_from_interceptors, + call_or_response_iterator, + ) + ) + return self._last_returned_call_from_interceptors + else: + self._last_returned_call_from_interceptors = StreamStreamCall( + request_iterator, + _timeout_to_deadline(client_call_details.timeout), + client_call_details.metadata, + client_call_details.credentials, + client_call_details.wait_for_ready, + self._channel, + client_call_details.method, + request_serializer, + response_deserializer, + self._loop, + ) + return self._last_returned_call_from_interceptors + + client_call_details = ClientCallDetails( + method, timeout, metadata, credentials, wait_for_ready + ) + return await _run_interceptor( + list(interceptors), client_call_details, request_iterator + ) + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + +class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall): + """Final UnaryUnaryCall class finished with a response.""" + + _response: ResponseType + + def __init__(self, response: ResponseType) -> None: + self._response = response + + def cancel(self) -> bool: + return False + + def cancelled(self) -> bool: + return False + + def done(self) -> bool: + return True + + def add_done_callback(self, unused_callback) -> None: + raise NotImplementedError() + + def time_remaining(self) -> Optional[float]: + raise NotImplementedError() + + async def initial_metadata(self) -> Optional[Metadata]: + return None + + async def trailing_metadata(self) -> Optional[Metadata]: + return None + + async def code(self) -> grpc.StatusCode: + return grpc.StatusCode.OK + + async def details(self) -> str: + return "" + + async def debug_error_string(self) -> Optional[str]: + return None + + def __await__(self): + if False: # pylint: disable=using-constant-test + # This code path is never used, but a yield statement is needed + # for telling the interpreter that __await__ is a generator. + yield None + return self._response + + async def wait_for_connection(self) -> None: + pass + + +class _StreamCallResponseIterator: + _call: Union[_base_call.UnaryStreamCall, _base_call.StreamStreamCall] + _response_iterator: AsyncIterable[ResponseType] + + def __init__( + self, + call: Union[_base_call.UnaryStreamCall, _base_call.StreamStreamCall], + response_iterator: AsyncIterable[ResponseType], + ) -> None: + self._response_iterator = response_iterator + self._call = call + + def cancel(self) -> bool: + return self._call.cancel() + + def cancelled(self) -> bool: + return self._call.cancelled() + + def done(self) -> bool: + return self._call.done() + + def add_done_callback(self, callback) -> None: + self._call.add_done_callback(callback) + + def time_remaining(self) -> Optional[float]: + return self._call.time_remaining() + + async def initial_metadata(self) -> Optional[Metadata]: + return await self._call.initial_metadata() + + async def trailing_metadata(self) -> Optional[Metadata]: + return await self._call.trailing_metadata() + + async def code(self) -> grpc.StatusCode: + return await self._call.code() + + async def details(self) -> str: + return await self._call.details() + + async def debug_error_string(self) -> Optional[str]: + return await self._call.debug_error_string() + + def __aiter__(self): + return self._response_iterator.__aiter__() + + async def wait_for_connection(self) -> None: + return await self._call.wait_for_connection() + + +class UnaryStreamCallResponseIterator( + _StreamCallResponseIterator, _base_call.UnaryStreamCall +): + """UnaryStreamCall class wich uses an alternative response iterator.""" + + async def read(self) -> Union[EOFType, ResponseType]: + # Behind the scenes everyting goes through the + # async iterator. So this path should not be reached. + raise NotImplementedError() + + +class StreamStreamCallResponseIterator( + _StreamCallResponseIterator, _base_call.StreamStreamCall +): + """StreamStreamCall class wich uses an alternative response iterator.""" + + async def read(self) -> Union[EOFType, ResponseType]: + # Behind the scenes everyting goes through the + # async iterator. So this path should not be reached. + raise NotImplementedError() + + async def write(self, request: RequestType) -> None: + # Behind the scenes everyting goes through the + # async iterator provided by the InterceptedStreamStreamCall. + # So this path should not be reached. + raise NotImplementedError() + + async def done_writing(self) -> None: + # Behind the scenes everyting goes through the + # async iterator provided by the InterceptedStreamStreamCall. + # So this path should not be reached. + raise NotImplementedError() + + @property + def _done_writing_flag(self) -> bool: + return self._call._done_writing_flag diff --git a/MLPY/Lib/site-packages/grpc/aio/_metadata.py b/MLPY/Lib/site-packages/grpc/aio/_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..2d481417b394e796b7cf7ff5bf530a9c23d1d8f3 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_metadata.py @@ -0,0 +1,137 @@ +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Implementation of the metadata abstraction for gRPC Asyncio Python.""" +from collections import OrderedDict +from collections import abc +from typing import Any, Iterator, List, Optional, Tuple, Union + +MetadataKey = str +MetadataValue = Union[str, bytes] + + +class Metadata(abc.Collection): + """Metadata abstraction for the asynchronous calls and interceptors. + + The metadata is a mapping from str -> List[str] + + Traits + * Multiple entries are allowed for the same key + * The order of the values by key is preserved + * Getting by an element by key, retrieves the first mapped value + * Supports an immutable view of the data + * Allows partial mutation on the data without recreating the new object from scratch. + """ + + def __init__(self, *args: Tuple[MetadataKey, MetadataValue]) -> None: + self._metadata = OrderedDict() + for md_key, md_value in args: + self.add(md_key, md_value) + + @classmethod + def from_tuple(cls, raw_metadata: tuple): + if raw_metadata: + return cls(*raw_metadata) + return cls() + + def add(self, key: MetadataKey, value: MetadataValue) -> None: + self._metadata.setdefault(key, []) + self._metadata[key].append(value) + + def __len__(self) -> int: + """Return the total number of elements that there are in the metadata, + including multiple values for the same key. + """ + return sum(map(len, self._metadata.values())) + + def __getitem__(self, key: MetadataKey) -> MetadataValue: + """When calling [], the first element of all those + mapped for is returned. + """ + try: + return self._metadata[key][0] + except (ValueError, IndexError) as e: + raise KeyError("{0!r}".format(key)) from e + + def __setitem__(self, key: MetadataKey, value: MetadataValue) -> None: + """Calling metadata[] = + Maps to the first instance of . + """ + if key not in self: + self._metadata[key] = [value] + else: + current_values = self.get_all(key) + self._metadata[key] = [value, *current_values[1:]] + + def __delitem__(self, key: MetadataKey) -> None: + """``del metadata[]`` deletes the first mapping for .""" + current_values = self.get_all(key) + if not current_values: + raise KeyError(repr(key)) + self._metadata[key] = current_values[1:] + + def delete_all(self, key: MetadataKey) -> None: + """Delete all mappings for .""" + del self._metadata[key] + + def __iter__(self) -> Iterator[Tuple[MetadataKey, MetadataValue]]: + for key, values in self._metadata.items(): + for value in values: + yield (key, value) + + def keys(self) -> abc.KeysView: + return abc.KeysView(self) + + def values(self) -> abc.ValuesView: + return abc.ValuesView(self) + + def items(self) -> abc.ItemsView: + return abc.ItemsView(self) + + def get( + self, key: MetadataKey, default: MetadataValue = None + ) -> Optional[MetadataValue]: + try: + return self[key] + except KeyError: + return default + + def get_all(self, key: MetadataKey) -> List[MetadataValue]: + """For compatibility with other Metadata abstraction objects (like in Java), + this would return all items under the desired . + """ + return self._metadata.get(key, []) + + def set_all(self, key: MetadataKey, values: List[MetadataValue]) -> None: + self._metadata[key] = values + + def __contains__(self, key: MetadataKey) -> bool: + return key in self._metadata + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self._metadata == other._metadata + if isinstance(other, tuple): + return tuple(self) == other + return NotImplemented # pytype: disable=bad-return-type + + def __add__(self, other: Any) -> "Metadata": + if isinstance(other, self.__class__): + return Metadata(*(tuple(self) + tuple(other))) + if isinstance(other, tuple): + return Metadata(*(tuple(self) + other)) + return NotImplemented # pytype: disable=bad-return-type + + def __repr__(self) -> str: + view = tuple(self) + return "{0}({1!r})".format(self.__class__.__name__, view) diff --git a/MLPY/Lib/site-packages/grpc/aio/_server.py b/MLPY/Lib/site-packages/grpc/aio/_server.py new file mode 100644 index 0000000000000000000000000000000000000000..b15f0f1197db246b75176baf09be2d72961aacb0 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_server.py @@ -0,0 +1,239 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Server-side implementation of gRPC Asyncio Python.""" + +from concurrent.futures import Executor +from typing import Any, Dict, Optional, Sequence + +import grpc +from grpc import _common +from grpc import _compression +from grpc._cython import cygrpc + +from . import _base_server +from ._interceptor import ServerInterceptor +from ._typing import ChannelArgumentType + + +def _augment_channel_arguments( + base_options: ChannelArgumentType, compression: Optional[grpc.Compression] +): + compression_option = _compression.create_channel_option(compression) + return tuple(base_options) + compression_option + + +class Server(_base_server.Server): + """Serves RPCs.""" + + def __init__( + self, + thread_pool: Optional[Executor], + generic_handlers: Optional[Sequence[grpc.GenericRpcHandler]], + interceptors: Optional[Sequence[Any]], + options: ChannelArgumentType, + maximum_concurrent_rpcs: Optional[int], + compression: Optional[grpc.Compression], + ): + self._loop = cygrpc.get_working_loop() + if interceptors: + invalid_interceptors = [ + interceptor + for interceptor in interceptors + if not isinstance(interceptor, ServerInterceptor) + ] + if invalid_interceptors: + raise ValueError( + "Interceptor must be ServerInterceptor, the " + f"following are invalid: {invalid_interceptors}" + ) + self._server = cygrpc.AioServer( + self._loop, + thread_pool, + generic_handlers, + interceptors, + _augment_channel_arguments(options, compression), + maximum_concurrent_rpcs, + ) + + def add_generic_rpc_handlers( + self, generic_rpc_handlers: Sequence[grpc.GenericRpcHandler] + ) -> None: + """Registers GenericRpcHandlers with this Server. + + This method is only safe to call before the server is started. + + Args: + generic_rpc_handlers: A sequence of GenericRpcHandlers that will be + used to service RPCs. + """ + self._server.add_generic_rpc_handlers(generic_rpc_handlers) + + def add_registered_method_handlers( + self, + service_name: str, + method_handlers: Dict[str, grpc.RpcMethodHandler], + ) -> None: + # TODO(xuanwn): Implement this for AsyncIO. + pass + + def add_insecure_port(self, address: str) -> int: + """Opens an insecure port for accepting RPCs. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. If the port is 0, + or not specified in the address, then the gRPC runtime will choose a port. + + Returns: + An integer port on which the server will accept RPC requests. + """ + return _common.validate_port_binding_result( + address, self._server.add_insecure_port(_common.encode(address)) + ) + + def add_secure_port( + self, address: str, server_credentials: grpc.ServerCredentials + ) -> int: + """Opens a secure port for accepting RPCs. + + This method may only be called before starting the server. + + Args: + address: The address for which to open a port. + if the port is 0, or not specified in the address, then the gRPC + runtime will choose a port. + server_credentials: A ServerCredentials object. + + Returns: + An integer port on which the server will accept RPC requests. + """ + return _common.validate_port_binding_result( + address, + self._server.add_secure_port( + _common.encode(address), server_credentials + ), + ) + + async def start(self) -> None: + """Starts this Server. + + This method may only be called once. (i.e. it is not idempotent). + """ + await self._server.start() + + async def stop(self, grace: Optional[float]) -> None: + """Stops this Server. + + This method immediately stops the server from servicing new RPCs in + all cases. + + If a grace period is specified, this method waits until all active + RPCs are finished or until the grace period is reached. RPCs that haven't + been terminated within the grace period are aborted. + If a grace period is not specified (by passing None for grace), all + existing RPCs are aborted immediately and this method blocks until + the last RPC handler terminates. + + This method is idempotent and may be called at any time. Passing a + smaller grace value in a subsequent call will have the effect of + stopping the Server sooner (passing None will have the effect of + stopping the server immediately). Passing a larger grace value in a + subsequent call will not have the effect of stopping the server later + (i.e. the most restrictive grace value is used). + + Args: + grace: A duration of time in seconds or None. + """ + await self._server.shutdown(grace) + + async def wait_for_termination( + self, timeout: Optional[float] = None + ) -> bool: + """Block current coroutine until the server stops. + + This is an EXPERIMENTAL API. + + The wait will not consume computational resources during blocking, and + it will block until one of the two following conditions are met: + + 1) The server is stopped or terminated; + 2) A timeout occurs if timeout is not `None`. + + The timeout argument works in the same way as `threading.Event.wait()`. + https://docs.python.org/3/library/threading.html#threading.Event.wait + + Args: + timeout: A floating point number specifying a timeout for the + operation in seconds. + + Returns: + A bool indicates if the operation times out. + """ + return await self._server.wait_for_termination(timeout) + + def __del__(self): + """Schedules a graceful shutdown in current event loop. + + The Cython AioServer doesn't hold a ref-count to this class. It should + be safe to slightly extend the underlying Cython object's life span. + """ + if hasattr(self, "_server"): + if self._server.is_running(): + cygrpc.schedule_coro_threadsafe( + self._server.shutdown(None), + self._loop, + ) + + +def server( + migration_thread_pool: Optional[Executor] = None, + handlers: Optional[Sequence[grpc.GenericRpcHandler]] = None, + interceptors: Optional[Sequence[Any]] = None, + options: Optional[ChannelArgumentType] = None, + maximum_concurrent_rpcs: Optional[int] = None, + compression: Optional[grpc.Compression] = None, +): + """Creates a Server with which RPCs can be serviced. + + Args: + migration_thread_pool: A futures.ThreadPoolExecutor to be used by the + Server to execute non-AsyncIO RPC handlers for migration purpose. + handlers: An optional list of GenericRpcHandlers used for executing RPCs. + More handlers may be added by calling add_generic_rpc_handlers any time + before the server is started. + interceptors: An optional list of ServerInterceptor objects that observe + and optionally manipulate the incoming RPCs before handing them over to + handlers. The interceptors are given control in the order they are + specified. This is an EXPERIMENTAL API. + options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime) + to configure the channel. + maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server + will service before returning RESOURCE_EXHAUSTED status, or None to + indicate no limit. + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. This compression algorithm will be used for the + lifetime of the server unless overridden by set_compression. + + Returns: + A Server object. + """ + return Server( + migration_thread_pool, + () if handlers is None else handlers, + () if interceptors is None else interceptors, + () if options is None else options, + maximum_concurrent_rpcs, + compression, + ) diff --git a/MLPY/Lib/site-packages/grpc/aio/_typing.py b/MLPY/Lib/site-packages/grpc/aio/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..243e2e281e97a5e0e871a20c892ecf007731abff --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_typing.py @@ -0,0 +1,43 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Common types for gRPC Async API""" + +from typing import ( + Any, + AsyncIterable, + Callable, + Iterable, + Sequence, + Tuple, + TypeVar, + Union, +) + +from grpc._cython.cygrpc import EOF + +from ._metadata import Metadata +from ._metadata import MetadataKey +from ._metadata import MetadataValue + +RequestType = TypeVar("RequestType") +ResponseType = TypeVar("ResponseType") +SerializingFunction = Callable[[Any], bytes] +DeserializingFunction = Callable[[bytes], Any] +MetadatumType = Tuple[MetadataKey, MetadataValue] +MetadataType = Union[Metadata, Sequence[MetadatumType]] +ChannelArgumentType = Sequence[Tuple[str, Any]] +EOFType = type(EOF) +DoneCallbackType = Callable[[Any], None] +RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]] +ResponseIterableType = AsyncIterable[Any] diff --git a/MLPY/Lib/site-packages/grpc/aio/_utils.py b/MLPY/Lib/site-packages/grpc/aio/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1403d0dd6dd8ba5e63ffd4f753c6730311f4da63 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/aio/_utils.py @@ -0,0 +1,22 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Internal utilities used by the gRPC Aio module.""" +import time +from typing import Optional + + +def _timeout_to_deadline(timeout: Optional[float]) -> Optional[float]: + if timeout is None: + return None + return time.time() + timeout diff --git a/MLPY/Lib/site-packages/grpc/beta/__init__.py b/MLPY/Lib/site-packages/grpc/beta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4f1da956aadca9ea5ec9122b1319f3bc38a6c70 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/_client_adaptations.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_client_adaptations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86c4f80c6f9b1e1d606f93f9b59722c05c14a573 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_client_adaptations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/_metadata.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_metadata.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5987b396ec192f33654aab947426a5d6b59e6971 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_metadata.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/_server_adaptations.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_server_adaptations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..684279f4976967125595ac80f74b396331bc73ce Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/_server_adaptations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/implementations.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/implementations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4902eb4e98628409774dee1c3e6d5f20ddc8baa2 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/implementations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/interfaces.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/interfaces.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48b988abd2b6f4d436af7edd0d207f9037b10009 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/interfaces.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/__pycache__/utilities.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/beta/__pycache__/utilities.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..263fe9c5fa6526e20f785d297c91436ba5d92dbd Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/beta/__pycache__/utilities.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/beta/_client_adaptations.py b/MLPY/Lib/site-packages/grpc/beta/_client_adaptations.py new file mode 100644 index 0000000000000000000000000000000000000000..2286f4e5e9d8909004d07fccdee71082d44a9cc0 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/_client_adaptations.py @@ -0,0 +1,1015 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Translates gRPC's client-side API into gRPC's client-side Beta API.""" + +import grpc +from grpc import _common +from grpc.beta import _metadata +from grpc.beta import interfaces +from grpc.framework.common import cardinality +from grpc.framework.foundation import future +from grpc.framework.interfaces.face import face + +# pylint: disable=too-many-arguments,too-many-locals,unused-argument + +_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = { + grpc.StatusCode.CANCELLED: ( + face.Abortion.Kind.CANCELLED, + face.CancellationError, + ), + grpc.StatusCode.UNKNOWN: ( + face.Abortion.Kind.REMOTE_FAILURE, + face.RemoteError, + ), + grpc.StatusCode.DEADLINE_EXCEEDED: ( + face.Abortion.Kind.EXPIRED, + face.ExpirationError, + ), + grpc.StatusCode.UNIMPLEMENTED: ( + face.Abortion.Kind.LOCAL_FAILURE, + face.LocalError, + ), +} + + +def _effective_metadata(metadata, metadata_transformer): + non_none_metadata = () if metadata is None else metadata + if metadata_transformer is None: + return non_none_metadata + else: + return metadata_transformer(non_none_metadata) + + +def _credentials(grpc_call_options): + return None if grpc_call_options is None else grpc_call_options.credentials + + +def _abortion(rpc_error_call): + code = rpc_error_call.code() + pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code) + error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0] + return face.Abortion( + error_kind, + rpc_error_call.initial_metadata(), + rpc_error_call.trailing_metadata(), + code, + rpc_error_call.details(), + ) + + +def _abortion_error(rpc_error_call): + code = rpc_error_call.code() + pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code) + exception_class = face.AbortionError if pair is None else pair[1] + return exception_class( + rpc_error_call.initial_metadata(), + rpc_error_call.trailing_metadata(), + code, + rpc_error_call.details(), + ) + + +class _InvocationProtocolContext(interfaces.GRPCInvocationContext): + def disable_next_request_compression(self): + pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement. + + +class _Rendezvous(future.Future, face.Call): + def __init__(self, response_future, response_iterator, call): + self._future = response_future + self._iterator = response_iterator + self._call = call + + def cancel(self): + return self._call.cancel() + + def cancelled(self): + return self._future.cancelled() + + def running(self): + return self._future.running() + + def done(self): + return self._future.done() + + def result(self, timeout=None): + try: + return self._future.result(timeout=timeout) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def exception(self, timeout=None): + try: + rpc_error_call = self._future.exception(timeout=timeout) + if rpc_error_call is None: + return None + else: + return _abortion_error(rpc_error_call) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def traceback(self, timeout=None): + try: + return self._future.traceback(timeout=timeout) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def add_done_callback(self, fn): + self._future.add_done_callback(lambda ignored_callback: fn(self)) + + def __iter__(self): + return self + + def _next(self): + try: + return next(self._iterator) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + def __next__(self): + return self._next() + + def next(self): + return self._next() + + def is_active(self): + return self._call.is_active() + + def time_remaining(self): + return self._call.time_remaining() + + def add_abortion_callback(self, abortion_callback): + def done_callback(): + if self.code() is not grpc.StatusCode.OK: + abortion_callback(_abortion(self._call)) + + registered = self._call.add_callback(done_callback) + return None if registered else done_callback() + + def protocol_context(self): + return _InvocationProtocolContext() + + def initial_metadata(self): + return _metadata.beta(self._call.initial_metadata()) + + def terminal_metadata(self): + return _metadata.beta(self._call.terminal_metadata()) + + def code(self): + return self._call.code() + + def details(self): + return self._call.details() + + +def _blocking_unary_unary( + channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + try: + multi_callable = channel.unary_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + if with_call: + response, call = multi_callable.with_call( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return response, _Rendezvous(None, None, call) + else: + return multi_callable( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + +def _future_unary_unary( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + multi_callable = channel.unary_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_future = multi_callable.future( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(response_future, None, response_future) + + +def _unary_stream( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + multi_callable = channel.unary_stream( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_iterator = multi_callable( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(None, response_iterator, response_iterator) + + +def _blocking_stream_unary( + channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + try: + multi_callable = channel.stream_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + if with_call: + response, call = multi_callable.with_call( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return response, _Rendezvous(None, None, call) + else: + return multi_callable( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + +def _future_stream_unary( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + multi_callable = channel.stream_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_future = multi_callable.future( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(response_future, None, response_future) + + +def _stream_stream( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + multi_callable = channel.stream_stream( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_iterator = multi_callable( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(None, response_iterator, response_iterator) + + +class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, + request, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + return _blocking_unary_unary( + self._channel, + self._group, + self._method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def future(self, request, timeout, metadata=None, protocol_options=None): + return _future_unary_unary( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__(self, request, timeout, metadata=None, protocol_options=None): + return _unary_stream( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, + request_iterator, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + return _blocking_stream_unary( + self._channel, + self._group, + self._method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def future( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + return _future_stream_unary( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _StreamStreamMultiCallable(face.StreamStreamMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + return _stream_stream( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _GenericStub(face.GenericStub): + def __init__( + self, + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ): + self._channel = channel + self._metadata_transformer = metadata_transformer + self._request_serializers = request_serializers or {} + self._response_deserializers = response_deserializers or {} + + def blocking_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + with_call=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _blocking_unary_unary( + self._channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def future_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _future_unary_unary( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def inline_unary_stream( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _unary_stream( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def blocking_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + with_call=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _blocking_stream_unary( + self._channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def future_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _future_stream_unary( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def inline_stream_stream( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _stream_stream( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def event_unary_unary( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_unary_stream( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_stream_unary( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_stream_stream( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def unary_unary(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _UnaryUnaryMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def unary_stream(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _UnaryStreamMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def stream_unary(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _StreamUnaryMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def stream_stream(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _StreamStreamMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +class _DynamicStub(face.DynamicStub): + def __init__(self, backing_generic_stub, group, cardinalities): + self._generic_stub = backing_generic_stub + self._group = group + self._cardinalities = cardinalities + + def __getattr__(self, attr): + method_cardinality = self._cardinalities.get(attr) + if method_cardinality is cardinality.Cardinality.UNARY_UNARY: + return self._generic_stub.unary_unary(self._group, attr) + elif method_cardinality is cardinality.Cardinality.UNARY_STREAM: + return self._generic_stub.unary_stream(self._group, attr) + elif method_cardinality is cardinality.Cardinality.STREAM_UNARY: + return self._generic_stub.stream_unary(self._group, attr) + elif method_cardinality is cardinality.Cardinality.STREAM_STREAM: + return self._generic_stub.stream_stream(self._group, attr) + else: + raise AttributeError( + '_DynamicStub object has no attribute "%s"!' % attr + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +def generic_stub( + channel, + host, + metadata_transformer, + request_serializers, + response_deserializers, +): + return _GenericStub( + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ) + + +def dynamic_stub( + channel, + service, + cardinalities, + host, + metadata_transformer, + request_serializers, + response_deserializers, +): + return _DynamicStub( + _GenericStub( + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ), + service, + cardinalities, + ) diff --git a/MLPY/Lib/site-packages/grpc/beta/_metadata.py b/MLPY/Lib/site-packages/grpc/beta/_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7a3403078c837db7ec5bc68257d1a0854ba1f0 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/_metadata.py @@ -0,0 +1,56 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""API metadata conversion utilities.""" + +import collections + +_Metadatum = collections.namedtuple( + "_Metadatum", + ( + "key", + "value", + ), +) + + +def _beta_metadatum(key, value): + beta_key = key if isinstance(key, (bytes,)) else key.encode("ascii") + beta_value = value if isinstance(value, (bytes,)) else value.encode("ascii") + return _Metadatum(beta_key, beta_value) + + +def _metadatum(beta_key, beta_value): + key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode("utf8") + if isinstance(beta_value, (str,)) or key[-4:] == "-bin": + value = beta_value + else: + value = beta_value.decode("utf8") + return _Metadatum(key, value) + + +def beta(metadata): + if metadata is None: + return () + else: + return tuple(_beta_metadatum(key, value) for key, value in metadata) + + +def unbeta(beta_metadata): + if beta_metadata is None: + return () + else: + return tuple( + _metadatum(beta_key, beta_value) + for beta_key, beta_value in beta_metadata + ) diff --git a/MLPY/Lib/site-packages/grpc/beta/_server_adaptations.py b/MLPY/Lib/site-packages/grpc/beta/_server_adaptations.py new file mode 100644 index 0000000000000000000000000000000000000000..59a378b00b8de2108b348e5049b97ed45d48c161 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/_server_adaptations.py @@ -0,0 +1,465 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Translates gRPC's server-side API into gRPC's server-side Beta API.""" + +import collections +import threading + +import grpc +from grpc import _common +from grpc.beta import _metadata +from grpc.beta import interfaces +from grpc.framework.common import cardinality +from grpc.framework.common import style +from grpc.framework.foundation import abandonment +from grpc.framework.foundation import logging_pool +from grpc.framework.foundation import stream +from grpc.framework.interfaces.face import face + +# pylint: disable=too-many-return-statements + +_DEFAULT_POOL_SIZE = 8 + + +class _ServerProtocolContext(interfaces.GRPCServicerContext): + def __init__(self, servicer_context): + self._servicer_context = servicer_context + + def peer(self): + return self._servicer_context.peer() + + def disable_next_response_compression(self): + pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement. + + +class _FaceServicerContext(face.ServicerContext): + def __init__(self, servicer_context): + self._servicer_context = servicer_context + + def is_active(self): + return self._servicer_context.is_active() + + def time_remaining(self): + return self._servicer_context.time_remaining() + + def add_abortion_callback(self, abortion_callback): + raise NotImplementedError( + "add_abortion_callback no longer supported server-side!" + ) + + def cancel(self): + self._servicer_context.cancel() + + def protocol_context(self): + return _ServerProtocolContext(self._servicer_context) + + def invocation_metadata(self): + return _metadata.beta(self._servicer_context.invocation_metadata()) + + def initial_metadata(self, initial_metadata): + self._servicer_context.send_initial_metadata( + _metadata.unbeta(initial_metadata) + ) + + def terminal_metadata(self, terminal_metadata): + self._servicer_context.set_terminal_metadata( + _metadata.unbeta(terminal_metadata) + ) + + def code(self, code): + self._servicer_context.set_code(code) + + def details(self, details): + self._servicer_context.set_details(details) + + +def _adapt_unary_request_inline(unary_request_inline): + def adaptation(request, servicer_context): + return unary_request_inline( + request, _FaceServicerContext(servicer_context) + ) + + return adaptation + + +def _adapt_stream_request_inline(stream_request_inline): + def adaptation(request_iterator, servicer_context): + return stream_request_inline( + request_iterator, _FaceServicerContext(servicer_context) + ) + + return adaptation + + +class _Callback(stream.Consumer): + def __init__(self): + self._condition = threading.Condition() + self._values = [] + self._terminated = False + self._cancelled = False + + def consume(self, value): + with self._condition: + self._values.append(value) + self._condition.notify_all() + + def terminate(self): + with self._condition: + self._terminated = True + self._condition.notify_all() + + def consume_and_terminate(self, value): + with self._condition: + self._values.append(value) + self._terminated = True + self._condition.notify_all() + + def cancel(self): + with self._condition: + self._cancelled = True + self._condition.notify_all() + + def draw_one_value(self): + with self._condition: + while True: + if self._cancelled: + raise abandonment.Abandoned() + elif self._values: + return self._values.pop(0) + elif self._terminated: + return None + else: + self._condition.wait() + + def draw_all_values(self): + with self._condition: + while True: + if self._cancelled: + raise abandonment.Abandoned() + elif self._terminated: + all_values = tuple(self._values) + self._values = None + return all_values + else: + self._condition.wait() + + +def _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context +): + thread_joined = threading.Event() + + def pipe_requests(): + for request in request_iterator: + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.consume(request) + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.terminate() + + request_pipe_thread = threading.Thread(target=pipe_requests) + request_pipe_thread.daemon = True + request_pipe_thread.start() + + +def _adapt_unary_unary_event(unary_unary_event): + def adaptation(request, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + unary_unary_event( + request, + callback.consume_and_terminate, + _FaceServicerContext(servicer_context), + ) + return callback.draw_all_values()[0] + + return adaptation + + +def _adapt_unary_stream_event(unary_stream_event): + def adaptation(request, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + unary_stream_event( + request, callback, _FaceServicerContext(servicer_context) + ) + while True: + response = callback.draw_one_value() + if response is None: + return + else: + yield response + + return adaptation + + +def _adapt_stream_unary_event(stream_unary_event): + def adaptation(request_iterator, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + request_consumer = stream_unary_event( + callback.consume_and_terminate, + _FaceServicerContext(servicer_context), + ) + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context + ) + return callback.draw_all_values()[0] + + return adaptation + + +def _adapt_stream_stream_event(stream_stream_event): + def adaptation(request_iterator, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + request_consumer = stream_stream_event( + callback, _FaceServicerContext(servicer_context) + ) + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context + ) + while True: + response = callback.draw_one_value() + if response is None: + return + else: + yield response + + return adaptation + + +class _SimpleMethodHandler( + collections.namedtuple( + "_MethodHandler", + ( + "request_streaming", + "response_streaming", + "request_deserializer", + "response_serializer", + "unary_unary", + "unary_stream", + "stream_unary", + "stream_stream", + ), + ), + grpc.RpcMethodHandler, +): + pass + + +def _simple_method_handler( + implementation, request_deserializer, response_serializer +): + if implementation.style is style.Service.INLINE: + if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + return _SimpleMethodHandler( + False, + False, + request_deserializer, + response_serializer, + _adapt_unary_request_inline(implementation.unary_unary_inline), + None, + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + return _SimpleMethodHandler( + False, + True, + request_deserializer, + response_serializer, + None, + _adapt_unary_request_inline(implementation.unary_stream_inline), + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + return _SimpleMethodHandler( + True, + False, + request_deserializer, + response_serializer, + None, + None, + _adapt_stream_request_inline( + implementation.stream_unary_inline + ), + None, + ) + elif ( + implementation.cardinality is cardinality.Cardinality.STREAM_STREAM + ): + return _SimpleMethodHandler( + True, + True, + request_deserializer, + response_serializer, + None, + None, + None, + _adapt_stream_request_inline( + implementation.stream_stream_inline + ), + ) + elif implementation.style is style.Service.EVENT: + if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + return _SimpleMethodHandler( + False, + False, + request_deserializer, + response_serializer, + _adapt_unary_unary_event(implementation.unary_unary_event), + None, + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + return _SimpleMethodHandler( + False, + True, + request_deserializer, + response_serializer, + None, + _adapt_unary_stream_event(implementation.unary_stream_event), + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + return _SimpleMethodHandler( + True, + False, + request_deserializer, + response_serializer, + None, + None, + _adapt_stream_unary_event(implementation.stream_unary_event), + None, + ) + elif ( + implementation.cardinality is cardinality.Cardinality.STREAM_STREAM + ): + return _SimpleMethodHandler( + True, + True, + request_deserializer, + response_serializer, + None, + None, + None, + _adapt_stream_stream_event(implementation.stream_stream_event), + ) + raise ValueError() + + +def _flatten_method_pair_map(method_pair_map): + method_pair_map = method_pair_map or {} + flat_map = {} + for method_pair in method_pair_map: + method = _common.fully_qualified_method(method_pair[0], method_pair[1]) + flat_map[method] = method_pair_map[method_pair] + return flat_map + + +class _GenericRpcHandler(grpc.GenericRpcHandler): + def __init__( + self, + method_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + ): + self._method_implementations = _flatten_method_pair_map( + method_implementations + ) + self._request_deserializers = _flatten_method_pair_map( + request_deserializers + ) + self._response_serializers = _flatten_method_pair_map( + response_serializers + ) + self._multi_method_implementation = multi_method_implementation + + def service(self, handler_call_details): + method_implementation = self._method_implementations.get( + handler_call_details.method + ) + if method_implementation is not None: + return _simple_method_handler( + method_implementation, + self._request_deserializers.get(handler_call_details.method), + self._response_serializers.get(handler_call_details.method), + ) + elif self._multi_method_implementation is None: + return None + else: + try: + return None # TODO(nathaniel): call the multimethod. + except face.NoSuchMethodError: + return None + + +class _Server(interfaces.Server): + def __init__(self, grpc_server): + self._grpc_server = grpc_server + + def add_insecure_port(self, address): + return self._grpc_server.add_insecure_port(address) + + def add_secure_port(self, address, server_credentials): + return self._grpc_server.add_secure_port(address, server_credentials) + + def start(self): + self._grpc_server.start() + + def stop(self, grace): + return self._grpc_server.stop(grace) + + def __enter__(self): + self._grpc_server.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._grpc_server.stop(None) + return False + + +def server( + service_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, +): + generic_rpc_handler = _GenericRpcHandler( + service_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + ) + if thread_pool is None: + effective_thread_pool = logging_pool.pool( + _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size + ) + else: + effective_thread_pool = thread_pool + return _Server( + grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)) + ) diff --git a/MLPY/Lib/site-packages/grpc/beta/implementations.py b/MLPY/Lib/site-packages/grpc/beta/implementations.py new file mode 100644 index 0000000000000000000000000000000000000000..5f474376d9b3a682788a1659a1eda5e39a7095c0 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/implementations.py @@ -0,0 +1,345 @@ +# Copyright 2015-2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Entry points into the Beta API of gRPC Python.""" + +# threading is referenced from specification in this module. +import threading # pylint: disable=unused-import + +# interfaces, cardinality, and face are referenced from specification in this +# module. +import grpc +from grpc import _auth +from grpc.beta import _client_adaptations +from grpc.beta import _metadata +from grpc.beta import _server_adaptations +from grpc.beta import interfaces # pylint: disable=unused-import +from grpc.framework.common import cardinality # pylint: disable=unused-import +from grpc.framework.interfaces.face import face # pylint: disable=unused-import + +# pylint: disable=too-many-arguments + +ChannelCredentials = grpc.ChannelCredentials +ssl_channel_credentials = grpc.ssl_channel_credentials +CallCredentials = grpc.CallCredentials + + +def metadata_call_credentials(metadata_plugin, name=None): + def plugin(context, callback): + def wrapped_callback(beta_metadata, error): + callback(_metadata.unbeta(beta_metadata), error) + + metadata_plugin(context, wrapped_callback) + + return grpc.metadata_call_credentials(plugin, name=name) + + +def google_call_credentials(credentials): + """Construct CallCredentials from GoogleCredentials. + + Args: + credentials: A GoogleCredentials object from the oauth2client library. + + Returns: + A CallCredentials object for use in a GRPCCallOptions object. + """ + return metadata_call_credentials(_auth.GoogleCallCredentials(credentials)) + + +access_token_call_credentials = grpc.access_token_call_credentials +composite_call_credentials = grpc.composite_call_credentials +composite_channel_credentials = grpc.composite_channel_credentials + + +class Channel(object): + """A channel to a remote host through which RPCs may be conducted. + + Only the "subscribe" and "unsubscribe" methods are supported for application + use. This class' instance constructor and all other attributes are + unsupported. + """ + + def __init__(self, channel): + self._channel = channel + + def subscribe(self, callback, try_to_connect=None): + """Subscribes to this Channel's connectivity. + + Args: + callback: A callable to be invoked and passed an + interfaces.ChannelConnectivity identifying this Channel's connectivity. + The callable will be invoked immediately upon subscription and again for + every change to this Channel's connectivity thereafter until it is + unsubscribed. + try_to_connect: A boolean indicating whether or not this Channel should + attempt to connect if it is not already connected and ready to conduct + RPCs. + """ + self._channel.subscribe(callback, try_to_connect=try_to_connect) + + def unsubscribe(self, callback): + """Unsubscribes a callback from this Channel's connectivity. + + Args: + callback: A callable previously registered with this Channel from having + been passed to its "subscribe" method. + """ + self._channel.unsubscribe(callback) + + +def insecure_channel(host, port): + """Creates an insecure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + If None only the 'host' part will be used. + + Returns: + A Channel to the remote host through which RPCs may be conducted. + """ + channel = grpc.insecure_channel( + host if port is None else "%s:%d" % (host, port) + ) + return Channel(channel) + + +def secure_channel(host, port, channel_credentials): + """Creates a secure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + If None only the 'host' part will be used. + channel_credentials: A ChannelCredentials. + + Returns: + A secure Channel to the remote host through which RPCs may be conducted. + """ + channel = grpc.secure_channel( + host if port is None else "%s:%d" % (host, port), channel_credentials + ) + return Channel(channel) + + +class StubOptions(object): + """A value encapsulating the various options for creation of a Stub. + + This class and its instances have no supported interface - it exists to define + the type of its instances and its instances exist to be passed to other + functions. + """ + + def __init__( + self, + host, + request_serializers, + response_deserializers, + metadata_transformer, + thread_pool, + thread_pool_size, + ): + self.host = host + self.request_serializers = request_serializers + self.response_deserializers = response_deserializers + self.metadata_transformer = metadata_transformer + self.thread_pool = thread_pool + self.thread_pool_size = thread_pool_size + + +_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None) + + +def stub_options( + host=None, + request_serializers=None, + response_deserializers=None, + metadata_transformer=None, + thread_pool=None, + thread_pool_size=None, +): + """Creates a StubOptions value to be passed at stub creation. + + All parameters are optional and should always be passed by keyword. + + Args: + host: A host string to set on RPC calls. + request_serializers: A dictionary from service name-method name pair to + request serialization behavior. + response_deserializers: A dictionary from service name-method name pair to + response deserialization behavior. + metadata_transformer: A callable that given a metadata object produces + another metadata object to be used in the underlying communication on the + wire. + thread_pool: A thread pool to use in stubs. + thread_pool_size: The size of thread pool to create for use in stubs; + ignored if thread_pool has been passed. + + Returns: + A StubOptions value created from the passed parameters. + """ + return StubOptions( + host, + request_serializers, + response_deserializers, + metadata_transformer, + thread_pool, + thread_pool_size, + ) + + +def generic_stub(channel, options=None): + """Creates a face.GenericStub on which RPCs can be made. + + Args: + channel: A Channel for use by the created stub. + options: A StubOptions customizing the created stub. + + Returns: + A face.GenericStub on which RPCs can be made. + """ + effective_options = _EMPTY_STUB_OPTIONS if options is None else options + return _client_adaptations.generic_stub( + channel._channel, # pylint: disable=protected-access + effective_options.host, + effective_options.metadata_transformer, + effective_options.request_serializers, + effective_options.response_deserializers, + ) + + +def dynamic_stub(channel, service, cardinalities, options=None): + """Creates a face.DynamicStub with which RPCs can be invoked. + + Args: + channel: A Channel for the returned face.DynamicStub to use. + service: The package-qualified full name of the service. + cardinalities: A dictionary from RPC method name to cardinality.Cardinality + value identifying the cardinality of the RPC method. + options: An optional StubOptions value further customizing the functionality + of the returned face.DynamicStub. + + Returns: + A face.DynamicStub with which RPCs can be invoked. + """ + effective_options = _EMPTY_STUB_OPTIONS if options is None else options + return _client_adaptations.dynamic_stub( + channel._channel, # pylint: disable=protected-access + service, + cardinalities, + effective_options.host, + effective_options.metadata_transformer, + effective_options.request_serializers, + effective_options.response_deserializers, + ) + + +ServerCredentials = grpc.ServerCredentials +ssl_server_credentials = grpc.ssl_server_credentials + + +class ServerOptions(object): + """A value encapsulating the various options for creation of a Server. + + This class and its instances have no supported interface - it exists to define + the type of its instances and its instances exist to be passed to other + functions. + """ + + def __init__( + self, + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, + default_timeout, + maximum_timeout, + ): + self.multi_method_implementation = multi_method_implementation + self.request_deserializers = request_deserializers + self.response_serializers = response_serializers + self.thread_pool = thread_pool + self.thread_pool_size = thread_pool_size + self.default_timeout = default_timeout + self.maximum_timeout = maximum_timeout + + +_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None) + + +def server_options( + multi_method_implementation=None, + request_deserializers=None, + response_serializers=None, + thread_pool=None, + thread_pool_size=None, + default_timeout=None, + maximum_timeout=None, +): + """Creates a ServerOptions value to be passed at server creation. + + All parameters are optional and should always be passed by keyword. + + Args: + multi_method_implementation: A face.MultiMethodImplementation to be called + to service an RPC if the server has no specific method implementation for + the name of the RPC for which service was requested. + request_deserializers: A dictionary from service name-method name pair to + request deserialization behavior. + response_serializers: A dictionary from service name-method name pair to + response serialization behavior. + thread_pool: A thread pool to use in stubs. + thread_pool_size: The size of thread pool to create for use in stubs; + ignored if thread_pool has been passed. + default_timeout: A duration in seconds to allow for RPC service when + servicing RPCs that did not include a timeout value when invoked. + maximum_timeout: A duration in seconds to allow for RPC service when + servicing RPCs no matter what timeout value was passed when the RPC was + invoked. + + Returns: + A StubOptions value created from the passed parameters. + """ + return ServerOptions( + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, + default_timeout, + maximum_timeout, + ) + + +def server(service_implementations, options=None): + """Creates an interfaces.Server with which RPCs can be serviced. + + Args: + service_implementations: A dictionary from service name-method name pair to + face.MethodImplementation. + options: An optional ServerOptions value further customizing the + functionality of the returned Server. + + Returns: + An interfaces.Server with which RPCs can be serviced. + """ + effective_options = _EMPTY_SERVER_OPTIONS if options is None else options + return _server_adaptations.server( + service_implementations, + effective_options.multi_method_implementation, + effective_options.request_deserializers, + effective_options.response_serializers, + effective_options.thread_pool, + effective_options.thread_pool_size, + ) diff --git a/MLPY/Lib/site-packages/grpc/beta/interfaces.py b/MLPY/Lib/site-packages/grpc/beta/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0718e44f46c8497893d0678d4e1fbd70b3595c --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/interfaces.py @@ -0,0 +1,163 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Constants and interfaces of the Beta API of gRPC Python.""" + +import abc + +import grpc + +ChannelConnectivity = grpc.ChannelConnectivity +# FATAL_FAILURE was a Beta-API name for SHUTDOWN +ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN + +StatusCode = grpc.StatusCode + + +class GRPCCallOptions(object): + """A value encapsulating gRPC-specific options passed on RPC invocation. + + This class and its instances have no supported interface - it exists to + define the type of its instances and its instances exist to be passed to + other functions. + """ + + def __init__(self, disable_compression, subcall_of, credentials): + self.disable_compression = disable_compression + self.subcall_of = subcall_of + self.credentials = credentials + + +def grpc_call_options(disable_compression=False, credentials=None): + """Creates a GRPCCallOptions value to be passed at RPC invocation. + + All parameters are optional and should always be passed by keyword. + + Args: + disable_compression: A boolean indicating whether or not compression should + be disabled for the request object of the RPC. Only valid for + request-unary RPCs. + credentials: A CallCredentials object to use for the invoked RPC. + """ + return GRPCCallOptions(disable_compression, None, credentials) + + +GRPCAuthMetadataContext = grpc.AuthMetadataContext +GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback +GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin + + +class GRPCServicerContext(abc.ABC): + """Exposes gRPC-specific options and behaviors to code servicing RPCs.""" + + @abc.abstractmethod + def peer(self): + """Identifies the peer that invoked the RPC being serviced. + + Returns: + A string identifying the peer that invoked the RPC being serviced. + """ + raise NotImplementedError() + + @abc.abstractmethod + def disable_next_response_compression(self): + """Disables compression of the next response passed by the application.""" + raise NotImplementedError() + + +class GRPCInvocationContext(abc.ABC): + """Exposes gRPC-specific options and behaviors to code invoking RPCs.""" + + @abc.abstractmethod + def disable_next_request_compression(self): + """Disables compression of the next request passed by the application.""" + raise NotImplementedError() + + +class Server(abc.ABC): + """Services RPCs.""" + + @abc.abstractmethod + def add_insecure_port(self, address): + """Reserves a port for insecure RPC service once this Server becomes active. + + This method may only be called before calling this Server's start method is + called. + + Args: + address: The address for which to open a port. + + Returns: + An integer port on which RPCs will be serviced after this link has been + started. This is typically the same number as the port number contained + in the passed address, but will likely be different if the port number + contained in the passed address was zero. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_secure_port(self, address, server_credentials): + """Reserves a port for secure RPC service after this Server becomes active. + + This method may only be called before calling this Server's start method is + called. + + Args: + address: The address for which to open a port. + server_credentials: A ServerCredentials. + + Returns: + An integer port on which RPCs will be serviced after this link has been + started. This is typically the same number as the port number contained + in the passed address, but will likely be different if the port number + contained in the passed address was zero. + """ + raise NotImplementedError() + + @abc.abstractmethod + def start(self): + """Starts this Server's service of RPCs. + + This method may only be called while the server is not serving RPCs (i.e. it + is not idempotent). + """ + raise NotImplementedError() + + @abc.abstractmethod + def stop(self, grace): + """Stops this Server's service of RPCs. + + All calls to this method immediately stop service of new RPCs. When existing + RPCs are aborted is controlled by the grace period parameter passed to this + method. + + This method may be called at any time and is idempotent. Passing a smaller + grace value than has been passed in a previous call will have the effect of + stopping the Server sooner. Passing a larger grace value than has been + passed in a previous call will not have the effect of stopping the server + later. + + Args: + grace: A duration of time in seconds to allow existing RPCs to complete + before being aborted by this Server's stopping. May be zero for + immediate abortion of all in-progress RPCs. + + Returns: + A threading.Event that will be set when this Server has completely + stopped. The returned event may not be set until after the full grace + period (if some ongoing RPC continues for the full length of the period) + of it may be set much sooner (such as if this Server had no RPCs underway + at the time it was stopped or if all RPCs that it had underway completed + very early in the grace period). + """ + raise NotImplementedError() diff --git a/MLPY/Lib/site-packages/grpc/beta/utilities.py b/MLPY/Lib/site-packages/grpc/beta/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..047e43e0818f427236bdaa4bc365affffb8392da --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/beta/utilities.py @@ -0,0 +1,153 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for the gRPC Python Beta API.""" + +import threading +import time + +# implementations is referenced from specification in this module. +from grpc.beta import implementations # pylint: disable=unused-import +from grpc.beta import interfaces +from grpc.framework.foundation import callable_util +from grpc.framework.foundation import future + +_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = ( + 'Exception calling connectivity future "done" callback!' +) + + +class _ChannelReadyFuture(future.Future): + def __init__(self, channel): + self._condition = threading.Condition() + self._channel = channel + + self._matured = False + self._cancelled = False + self._done_callbacks = [] + + def _block(self, timeout): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._cancelled: + raise future.CancelledError() + elif self._matured: + return + else: + if until is None: + self._condition.wait() + else: + remaining = until - time.time() + if remaining < 0: + raise future.TimeoutError() + else: + self._condition.wait(timeout=remaining) + + def _update(self, connectivity): + with self._condition: + if ( + not self._cancelled + and connectivity is interfaces.ChannelConnectivity.READY + ): + self._matured = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self + ) + + def cancel(self): + with self._condition: + if not self._matured: + self._cancelled = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return False + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self + ) + + return True + + def cancelled(self): + with self._condition: + return self._cancelled + + def running(self): + with self._condition: + return not self._cancelled and not self._matured + + def done(self): + with self._condition: + return self._cancelled or self._matured + + def result(self, timeout=None): + self._block(timeout) + return None + + def exception(self, timeout=None): + self._block(timeout) + return None + + def traceback(self, timeout=None): + self._block(timeout) + return None + + def add_done_callback(self, fn): + with self._condition: + if not self._cancelled and not self._matured: + self._done_callbacks.append(fn) + return + + fn(self) + + def start(self): + with self._condition: + self._channel.subscribe(self._update, try_to_connect=True) + + def __del__(self): + with self._condition: + if not self._cancelled and not self._matured: + self._channel.unsubscribe(self._update) + + +def channel_ready_future(channel): + """Creates a future.Future tracking when an implementations.Channel is ready. + + Cancelling the returned future.Future does not tell the given + implementations.Channel to abandon attempts it may have been making to + connect; cancelling merely deactivates the return future.Future's + subscription to the given implementations.Channel's connectivity. + + Args: + channel: An implementations.Channel. + + Returns: + A future.Future that matures when the given Channel has connectivity + interfaces.ChannelConnectivity.READY. + """ + ready_future = _ChannelReadyFuture(channel) + ready_future.start() + return ready_future diff --git a/MLPY/Lib/site-packages/grpc/experimental/__init__.py b/MLPY/Lib/site-packages/grpc/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a42af469d2b14ead8c34a4e6bc6bb10a66c298b4 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/experimental/__init__.py @@ -0,0 +1,134 @@ +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC's experimental APIs. + +These APIs are subject to be removed during any minor version release. +""" + +import copy +import functools +import sys +import warnings + +import grpc +from grpc._cython import cygrpc as _cygrpc + +_EXPERIMENTAL_APIS_USED = set() + + +class ChannelOptions(object): + """Indicates a channel option unique to gRPC Python. + + This enumeration is part of an EXPERIMENTAL API. + + Attributes: + SingleThreadedUnaryStream: Perform unary-stream RPCs on a single thread. + """ + + SingleThreadedUnaryStream = "SingleThreadedUnaryStream" + + +class UsageError(Exception): + """Raised by the gRPC library to indicate usage not allowed by the API.""" + + +# It's important that there be a single insecure credentials object so that its +# hash is deterministic and can be used for indexing in the simple stubs cache. +_insecure_channel_credentials = grpc.ChannelCredentials( + _cygrpc.channel_credentials_insecure() +) + + +def insecure_channel_credentials(): + """Creates a ChannelCredentials for use with an insecure channel. + + THIS IS AN EXPERIMENTAL API. + """ + return _insecure_channel_credentials + + +class ExperimentalApiWarning(Warning): + """A warning that an API is experimental.""" + + +def _warn_experimental(api_name, stack_offset): + if api_name not in _EXPERIMENTAL_APIS_USED: + _EXPERIMENTAL_APIS_USED.add(api_name) + msg = ( + "'{}' is an experimental API. It is subject to change or ".format( + api_name + ) + + "removal between minor releases. Proceed with caution." + ) + warnings.warn(msg, ExperimentalApiWarning, stacklevel=2 + stack_offset) + + +def experimental_api(f): + @functools.wraps(f) + def _wrapper(*args, **kwargs): + _warn_experimental(f.__name__, 1) + return f(*args, **kwargs) + + return _wrapper + + +def wrap_server_method_handler(wrapper, handler): + """Wraps the server method handler function. + + The server implementation requires all server handlers being wrapped as + RpcMethodHandler objects. This helper function ease the pain of writing + server handler wrappers. + + Args: + wrapper: A wrapper function that takes in a method handler behavior + (the actual function) and returns a wrapped function. + handler: A RpcMethodHandler object to be wrapped. + + Returns: + A newly created RpcMethodHandler. + """ + if not handler: + return None + + if not handler.request_streaming: + if not handler.response_streaming: + # NOTE(lidiz) _replace is a public API: + # https://docs.python.org/dev/library/collections.html + return handler._replace(unary_unary=wrapper(handler.unary_unary)) + else: + return handler._replace(unary_stream=wrapper(handler.unary_stream)) + else: + if not handler.response_streaming: + return handler._replace(stream_unary=wrapper(handler.stream_unary)) + else: + return handler._replace( + stream_stream=wrapper(handler.stream_stream) + ) + + +__all__ = ( + "ChannelOptions", + "ExperimentalApiWarning", + "UsageError", + "insecure_channel_credentials", + "wrap_server_method_handler", +) + +if sys.version_info > (3, 6): + from grpc._simple_stubs import stream_stream + from grpc._simple_stubs import stream_unary + from grpc._simple_stubs import unary_stream + from grpc._simple_stubs import unary_unary + + __all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream) diff --git a/MLPY/Lib/site-packages/grpc/experimental/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4badde9de240b3cf3bcf06f26a8a940f4beabf Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/experimental/__pycache__/gevent.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/gevent.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fa873fd15e6dcb6e8ea2eb376a3b2aaf8ca8253 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/gevent.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/experimental/__pycache__/session_cache.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/session_cache.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6095afd41b089d66360e79429074a3815ede5221 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/experimental/__pycache__/session_cache.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/experimental/aio/__init__.py b/MLPY/Lib/site-packages/grpc/experimental/aio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1be5cebf22fabd8359b8a688f241f8fd14266910 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/experimental/aio/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2020 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Alias of grpc.aio to keep backward compatibility.""" + +from grpc.aio import * diff --git a/MLPY/Lib/site-packages/grpc/experimental/aio/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/experimental/aio/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3432a2b1f4f9a307db29f2a21ffc7a7b579fb4c8 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/experimental/aio/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/experimental/gevent.py b/MLPY/Lib/site-packages/grpc/experimental/gevent.py new file mode 100644 index 0000000000000000000000000000000000000000..eef80ca95089f0fd28fc9f1b618b6904c91dbd3e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/experimental/gevent.py @@ -0,0 +1,27 @@ +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC's Python gEvent APIs.""" + +from grpc._cython import cygrpc as _cygrpc + + +def init_gevent(): + """Patches gRPC's libraries to be compatible with gevent. + + This must be called AFTER the python standard lib has been patched, + but BEFORE creating and gRPC objects. + + In order for progress to be made, the application must drive the event loop. + """ + _cygrpc.init_grpc_gevent() diff --git a/MLPY/Lib/site-packages/grpc/experimental/session_cache.py b/MLPY/Lib/site-packages/grpc/experimental/session_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa222b621b33482bcc2620145946c4e1b18b1d3 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/experimental/session_cache.py @@ -0,0 +1,45 @@ +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC's APIs for TLS Session Resumption support""" + +from grpc._cython import cygrpc as _cygrpc + + +def ssl_session_cache_lru(capacity): + """Creates an SSLSessionCache with LRU replacement policy + + Args: + capacity: Size of the cache + + Returns: + An SSLSessionCache with LRU replacement policy that can be passed as a value for + the grpc.ssl_session_cache option to a grpc.Channel. SSL session caches are used + to store session tickets, which clients can present to resume previous TLS sessions + with a server. + """ + return SSLSessionCache(_cygrpc.SSLSessionCacheLRU(capacity)) + + +class SSLSessionCache(object): + """An encapsulation of a session cache used for TLS session resumption. + + Instances of this class can be passed to a Channel as values for the + grpc.ssl_session_cache option + """ + + def __init__(self, cache): + self._cache = cache + + def __int__(self): + return int(self._cache) diff --git a/MLPY/Lib/site-packages/grpc/framework/__init__.py b/MLPY/Lib/site-packages/grpc/framework/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa338519b52994a1e7ceddd77c6074537fdc0886 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/common/__init__.py b/MLPY/Lib/site-packages/grpc/framework/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/common/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e69330c95c531cbf8bab522a9fa32ca9325cf7c9 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/cardinality.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/cardinality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a528d7f963d12197ef02963751826f3537d0ae70 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/cardinality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/style.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/style.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b83aa41df665b87ae84ae98a5bc3f87bc0812d14 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/common/__pycache__/style.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/common/cardinality.py b/MLPY/Lib/site-packages/grpc/framework/common/cardinality.py new file mode 100644 index 0000000000000000000000000000000000000000..711a969f39d7138f82b84ee8df784b6b1634301f --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/common/cardinality.py @@ -0,0 +1,26 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Defines an enum for classifying RPC methods by streaming semantics.""" + +import enum + + +@enum.unique +class Cardinality(enum.Enum): + """Describes the streaming semantics of an RPC method.""" + + UNARY_UNARY = "request-unary/response-unary" + UNARY_STREAM = "request-unary/response-streaming" + STREAM_UNARY = "request-streaming/response-unary" + STREAM_STREAM = "request-streaming/response-streaming" diff --git a/MLPY/Lib/site-packages/grpc/framework/common/style.py b/MLPY/Lib/site-packages/grpc/framework/common/style.py new file mode 100644 index 0000000000000000000000000000000000000000..291975812ab98199fdc25aca2c2dccde32626439 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/common/style.py @@ -0,0 +1,24 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Defines an enum for classifying RPC methods by control flow semantics.""" + +import enum + + +@enum.unique +class Service(enum.Enum): + """Describes the control flow style of RPC method implementation.""" + + INLINE = "inline" + EVENT = "event" diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__init__.py b/MLPY/Lib/site-packages/grpc/framework/foundation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d25c022230ffadb8329809a3d84f864a1632043 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/abandonment.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/abandonment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..028871f14f15e06fd31a5d6fa6a881273280ae99 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/abandonment.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/callable_util.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/callable_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7b95be88c8a27263313844eb816a92dc3db18f2 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/callable_util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/future.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/future.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f49e79751b087254fbd117219212bf66fec7fd9 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/future.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/logging_pool.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/logging_pool.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba2ac281a45adf5aba8de0233154f5d63c86edc Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/logging_pool.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb4b9354675f88c0201d24bccfd793015fe221ca Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream_util.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaef676e093ab026ec80b4f8ad6947f7ba9b876d Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/foundation/__pycache__/stream_util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/abandonment.py b/MLPY/Lib/site-packages/grpc/framework/foundation/abandonment.py new file mode 100644 index 0000000000000000000000000000000000000000..c5b65b5135ab20cf98d8a1f1c869220ea94f41f2 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/abandonment.py @@ -0,0 +1,22 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for indicating abandonment of computation.""" + + +class Abandoned(Exception): + """Indicates that some computation is being abandoned. + + Abandoning a computation is different than returning a value or raising + an exception indicating some operational or programming defect. + """ diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/callable_util.py b/MLPY/Lib/site-packages/grpc/framework/foundation/callable_util.py new file mode 100644 index 0000000000000000000000000000000000000000..219ac343628b5ce617dec17fa2452fb3585b497c --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/callable_util.py @@ -0,0 +1,98 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for working with callables.""" + +from abc import ABC +import collections +import enum +import functools +import logging + +_LOGGER = logging.getLogger(__name__) + + +class Outcome(ABC): + """A sum type describing the outcome of some call. + + Attributes: + kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the + call returned a value or raised an exception. + return_value: The value returned by the call. Must be present if kind is + Kind.RETURNED. + exception: The exception raised by the call. Must be present if kind is + Kind.RAISED. + """ + + @enum.unique + class Kind(enum.Enum): + """Identifies the general kind of the outcome of some call.""" + + RETURNED = object() + RAISED = object() + + +class _EasyOutcome( + collections.namedtuple( + "_EasyOutcome", ["kind", "return_value", "exception"] + ), + Outcome, +): + """A trivial implementation of Outcome.""" + + +def _call_logging_exceptions(behavior, message, *args, **kwargs): + try: + return _EasyOutcome( + Outcome.Kind.RETURNED, behavior(*args, **kwargs), None + ) + except Exception as e: # pylint: disable=broad-except + _LOGGER.exception(message) + return _EasyOutcome(Outcome.Kind.RAISED, None, e) + + +def with_exceptions_logged(behavior, message): + """Wraps a callable in a try-except that logs any exceptions it raises. + + Args: + behavior: Any callable. + message: A string to log if the behavior raises an exception. + + Returns: + A callable that when executed invokes the given behavior. The returned + callable takes the same arguments as the given behavior but returns a + future.Outcome describing whether the given behavior returned a value or + raised an exception. + """ + + @functools.wraps(behavior) + def wrapped_behavior(*args, **kwargs): + return _call_logging_exceptions(behavior, message, *args, **kwargs) + + return wrapped_behavior + + +def call_logging_exceptions(behavior, message, *args, **kwargs): + """Calls a behavior in a try-except that logs any exceptions it raises. + + Args: + behavior: Any callable. + message: A string to log if the behavior raises an exception. + *args: Positional arguments to pass to the given behavior. + **kwargs: Keyword arguments to pass to the given behavior. + + Returns: + An Outcome describing whether the given behavior returned a value or raised + an exception. + """ + return _call_logging_exceptions(behavior, message, *args, **kwargs) diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/future.py b/MLPY/Lib/site-packages/grpc/framework/foundation/future.py new file mode 100644 index 0000000000000000000000000000000000000000..537e001201f34984246f330022e520a80e3721b2 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/future.py @@ -0,0 +1,219 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A Future interface. + +Python doesn't have a Future interface in its standard library. In the absence +of such a standard, three separate, incompatible implementations +(concurrent.futures.Future, ndb.Future, and asyncio.Future) have appeared. This +interface attempts to be as compatible as possible with +concurrent.futures.Future. From ndb.Future it adopts a traceback-object accessor +method. + +Unlike the concrete and implemented Future classes listed above, the Future +class defined in this module is an entirely abstract interface that anyone may +implement and use. + +The one known incompatibility between this interface and the interface of +concurrent.futures.Future is that this interface defines its own CancelledError +and TimeoutError exceptions rather than raising the implementation-private +concurrent.futures._base.CancelledError and the +built-in-but-only-in-3.3-and-later TimeoutError. +""" + +import abc + + +class TimeoutError(Exception): + """Indicates that a particular call timed out.""" + + +class CancelledError(Exception): + """Indicates that the computation underlying a Future was cancelled.""" + + +class Future(abc.ABC): + """A representation of a computation in another control flow. + + Computations represented by a Future may be yet to be begun, may be ongoing, + or may have already completed. + """ + + # NOTE(nathaniel): This isn't the return type that I would want to have if it + # were up to me. Were this interface being written from scratch, the return + # type of this method would probably be a sum type like: + # + # NOT_COMMENCED + # COMMENCED_AND_NOT_COMPLETED + # PARTIAL_RESULT + # COMPLETED + # UNCANCELLABLE + # NOT_IMMEDIATELY_DETERMINABLE + @abc.abstractmethod + def cancel(self): + """Attempts to cancel the computation. + + This method does not block. + + Returns: + True if the computation has not yet begun, will not be allowed to take + place, and determination of both was possible without blocking. False + under all other circumstances including but not limited to the + computation's already having begun, the computation's already having + finished, and the computation's having been scheduled for execution on a + remote system for which a determination of whether or not it commenced + before being cancelled cannot be made without blocking. + """ + raise NotImplementedError() + + # NOTE(nathaniel): Here too this isn't the return type that I'd want this + # method to have if it were up to me. I think I'd go with another sum type + # like: + # + # NOT_CANCELLED (this object's cancel method hasn't been called) + # NOT_COMMENCED + # COMMENCED_AND_NOT_COMPLETED + # PARTIAL_RESULT + # COMPLETED + # UNCANCELLABLE + # NOT_IMMEDIATELY_DETERMINABLE + # + # Notice how giving the cancel method the right semantics obviates most + # reasons for this method to exist. + @abc.abstractmethod + def cancelled(self): + """Describes whether the computation was cancelled. + + This method does not block. + + Returns: + True if the computation was cancelled any time before its result became + immediately available. False under all other circumstances including but + not limited to this object's cancel method not having been called and + the computation's result having become immediately available. + """ + raise NotImplementedError() + + @abc.abstractmethod + def running(self): + """Describes whether the computation is taking place. + + This method does not block. + + Returns: + True if the computation is scheduled to take place in the future or is + taking place now, or False if the computation took place in the past or + was cancelled. + """ + raise NotImplementedError() + + # NOTE(nathaniel): These aren't quite the semantics I'd like here either. I + # would rather this only returned True in cases in which the underlying + # computation completed successfully. A computation's having been cancelled + # conflicts with considering that computation "done". + @abc.abstractmethod + def done(self): + """Describes whether the computation has taken place. + + This method does not block. + + Returns: + True if the computation is known to have either completed or have been + unscheduled or interrupted. False if the computation may possibly be + executing or scheduled to execute later. + """ + raise NotImplementedError() + + @abc.abstractmethod + def result(self, timeout=None): + """Accesses the outcome of the computation or raises its exception. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation to + finish or be cancelled, or None if this method should block until the + computation has finished or is cancelled no matter how long that takes. + + Returns: + The return value of the computation. + + Raises: + TimeoutError: If a timeout value is passed and the computation does not + terminate within the allotted time. + CancelledError: If the computation was cancelled. + Exception: If the computation raised an exception, this call will raise + the same exception. + """ + raise NotImplementedError() + + @abc.abstractmethod + def exception(self, timeout=None): + """Return the exception raised by the computation. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation to + terminate or be cancelled, or None if this method should block until + the computation is terminated or is cancelled no matter how long that + takes. + + Returns: + The exception raised by the computation, or None if the computation did + not raise an exception. + + Raises: + TimeoutError: If a timeout value is passed and the computation does not + terminate within the allotted time. + CancelledError: If the computation was cancelled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def traceback(self, timeout=None): + """Access the traceback of the exception raised by the computation. + + This method may return immediately or may block. + + Args: + timeout: The length of time in seconds to wait for the computation to + terminate or be cancelled, or None if this method should block until + the computation is terminated or is cancelled no matter how long that + takes. + + Returns: + The traceback of the exception raised by the computation, or None if the + computation did not raise an exception. + + Raises: + TimeoutError: If a timeout value is passed and the computation does not + terminate within the allotted time. + CancelledError: If the computation was cancelled. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_done_callback(self, fn): + """Adds a function to be called at completion of the computation. + + The callback will be passed this Future object describing the outcome of + the computation. + + If the computation has already completed, the callback will be called + immediately. + + Args: + fn: A callable taking this Future object as its single parameter. + """ + raise NotImplementedError() diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/logging_pool.py b/MLPY/Lib/site-packages/grpc/framework/foundation/logging_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..31b9b73968dfc8ff207660635a03c3df35cbc879 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/logging_pool.py @@ -0,0 +1,72 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A thread pool that logs exceptions raised by tasks executed within it.""" + +from concurrent import futures +import logging + +_LOGGER = logging.getLogger(__name__) + + +def _wrap(behavior): + """Wraps an arbitrary callable behavior in exception-logging.""" + + def _wrapping(*args, **kwargs): + try: + return behavior(*args, **kwargs) + except Exception: + _LOGGER.exception( + "Unexpected exception from %s executed in logging pool!", + behavior, + ) + raise + + return _wrapping + + +class _LoggingPool(object): + """An exception-logging futures.ThreadPoolExecutor-compatible thread pool.""" + + def __init__(self, backing_pool): + self._backing_pool = backing_pool + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._backing_pool.shutdown(wait=True) + + def submit(self, fn, *args, **kwargs): + return self._backing_pool.submit(_wrap(fn), *args, **kwargs) + + def map(self, func, *iterables, **kwargs): + return self._backing_pool.map( + _wrap(func), *iterables, timeout=kwargs.get("timeout", None) + ) + + def shutdown(self, wait=True): + self._backing_pool.shutdown(wait=wait) + + +def pool(max_workers): + """Creates a thread pool that logs exceptions raised by the tasks within it. + + Args: + max_workers: The maximum number of worker threads to allow the pool. + + Returns: + A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions + raised by the tasks executed within it. + """ + return _LoggingPool(futures.ThreadPoolExecutor(max_workers)) diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/stream.py b/MLPY/Lib/site-packages/grpc/framework/foundation/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4a4cb72fbb7e7b4eb3af1a3c4e1ae17da33b83 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/stream.py @@ -0,0 +1,43 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Interfaces related to streams of values or objects.""" + +import abc + + +class Consumer(abc.ABC): + """Interface for consumers of finite streams of values or objects.""" + + @abc.abstractmethod + def consume(self, value): + """Accepts a value. + + Args: + value: Any value accepted by this Consumer. + """ + raise NotImplementedError() + + @abc.abstractmethod + def terminate(self): + """Indicates to this Consumer that no more values will be supplied.""" + raise NotImplementedError() + + @abc.abstractmethod + def consume_and_terminate(self, value): + """Supplies a value and signals that no more values will be supplied. + + Args: + value: Any value accepted by this Consumer. + """ + raise NotImplementedError() diff --git a/MLPY/Lib/site-packages/grpc/framework/foundation/stream_util.py b/MLPY/Lib/site-packages/grpc/framework/foundation/stream_util.py new file mode 100644 index 0000000000000000000000000000000000000000..d8952d909b7a6198b5cac485e17946db1a1c26ea --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/foundation/stream_util.py @@ -0,0 +1,148 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Helpful utilities related to the stream module.""" + +import logging +import threading + +from grpc.framework.foundation import stream + +_NO_VALUE = object() +_LOGGER = logging.getLogger(__name__) + + +class TransformingConsumer(stream.Consumer): + """A stream.Consumer that passes a transformation of its input to another.""" + + def __init__(self, transformation, downstream): + self._transformation = transformation + self._downstream = downstream + + def consume(self, value): + self._downstream.consume(self._transformation(value)) + + def terminate(self): + self._downstream.terminate() + + def consume_and_terminate(self, value): + self._downstream.consume_and_terminate(self._transformation(value)) + + +class IterableConsumer(stream.Consumer): + """A Consumer that when iterated over emits the values it has consumed.""" + + def __init__(self): + self._condition = threading.Condition() + self._values = [] + self._active = True + + def consume(self, value): + with self._condition: + if self._active: + self._values.append(value) + self._condition.notify() + + def terminate(self): + with self._condition: + self._active = False + self._condition.notify() + + def consume_and_terminate(self, value): + with self._condition: + if self._active: + self._values.append(value) + self._active = False + self._condition.notify() + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + with self._condition: + while self._active and not self._values: + self._condition.wait() + if self._values: + return self._values.pop(0) + else: + raise StopIteration() + + +class ThreadSwitchingConsumer(stream.Consumer): + """A Consumer decorator that affords serialization and asynchrony.""" + + def __init__(self, sink, pool): + self._lock = threading.Lock() + self._sink = sink + self._pool = pool + # True if self._spin has been submitted to the pool to be called once and + # that call has not yet returned, False otherwise. + self._spinning = False + self._values = [] + self._active = True + + def _spin(self, sink, value, terminate): + while True: + try: + if value is _NO_VALUE: + sink.terminate() + elif terminate: + sink.consume_and_terminate(value) + else: + sink.consume(value) + except Exception as e: # pylint:disable=broad-except + _LOGGER.exception(e) + + with self._lock: + if terminate: + self._spinning = False + return + elif self._values: + value = self._values.pop(0) + terminate = not self._values and not self._active + elif not self._active: + value = _NO_VALUE + terminate = True + else: + self._spinning = False + return + + def consume(self, value): + with self._lock: + if self._active: + if self._spinning: + self._values.append(value) + else: + self._pool.submit(self._spin, self._sink, value, False) + self._spinning = True + + def terminate(self): + with self._lock: + if self._active: + self._active = False + if not self._spinning: + self._pool.submit(self._spin, self._sink, _NO_VALUE, True) + self._spinning = True + + def consume_and_terminate(self, value): + with self._lock: + if self._active: + self._active = False + if self._spinning: + self._values.append(value) + else: + self._pool.submit(self._spin, self._sink, value, True) + self._spinning = True diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/__init__.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb6849533cfdba06169e82ced8e6a9586e272a2c Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__init__.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8692d63fe239b07de8274b1b85b20bba9bb4cdc8 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/base.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f42e8c4802b05badb2c1839af1676faa5e635180 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/utilities.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/utilities.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd6c47cf9a1130d1f582ffb9bdef3c2273bbca67 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/__pycache__/utilities.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/base.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/base.py new file mode 100644 index 0000000000000000000000000000000000000000..da763ec640bd43721ad427cd37bbcf4157490194 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/base.py @@ -0,0 +1,328 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""The base interface of RPC Framework. + +Implementations of this interface support the conduct of "operations": +exchanges between two distinct ends of an arbitrary number of data payloads +and metadata such as a name for the operation, initial and terminal metadata +in each direction, and flow control. These operations may be used for transfers +of data, remote procedure calls, status indication, or anything else +applications choose. +""" + +# threading is referenced from specification in this module. +import abc +import enum +import threading # pylint: disable=unused-import + +# pylint: disable=too-many-arguments + + +class NoSuchMethodError(Exception): + """Indicates that an unrecognized operation has been called. + + Attributes: + code: A code value to communicate to the other side of the operation + along with indication of operation termination. May be None. + details: A details value to communicate to the other side of the + operation along with indication of operation termination. May be None. + """ + + def __init__(self, code, details): + """Constructor. + + Args: + code: A code value to communicate to the other side of the operation + along with indication of operation termination. May be None. + details: A details value to communicate to the other side of the + operation along with indication of operation termination. May be None. + """ + super(NoSuchMethodError, self).__init__() + self.code = code + self.details = details + + +class Outcome(object): + """The outcome of an operation. + + Attributes: + kind: A Kind value coarsely identifying how the operation terminated. + code: An application-specific code value or None if no such value was + provided. + details: An application-specific details value or None if no such value was + provided. + """ + + @enum.unique + class Kind(enum.Enum): + """Ways in which an operation can terminate.""" + + COMPLETED = "completed" + CANCELLED = "cancelled" + EXPIRED = "expired" + LOCAL_SHUTDOWN = "local shutdown" + REMOTE_SHUTDOWN = "remote shutdown" + RECEPTION_FAILURE = "reception failure" + TRANSMISSION_FAILURE = "transmission failure" + LOCAL_FAILURE = "local failure" + REMOTE_FAILURE = "remote failure" + + +class Completion(abc.ABC): + """An aggregate of the values exchanged upon operation completion. + + Attributes: + terminal_metadata: A terminal metadata value for the operaton. + code: A code value for the operation. + message: A message value for the operation. + """ + + +class OperationContext(abc.ABC): + """Provides operation-related information and action.""" + + @abc.abstractmethod + def outcome(self): + """Indicates the operation's outcome (or that the operation is ongoing). + + Returns: + None if the operation is still active or the Outcome value for the + operation if it has terminated. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_termination_callback(self, callback): + """Adds a function to be called upon operation termination. + + Args: + callback: A callable to be passed an Outcome value on operation + termination. + + Returns: + None if the operation has not yet terminated and the passed callback will + later be called when it does terminate, or if the operation has already + terminated an Outcome value describing the operation termination and the + passed callback will not be called as a result of this method call. + """ + raise NotImplementedError() + + @abc.abstractmethod + def time_remaining(self): + """Describes the length of allowed time remaining for the operation. + + Returns: + A nonnegative float indicating the length of allowed time in seconds + remaining for the operation to complete before it is considered to have + timed out. Zero is returned if the operation has terminated. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the operation if the operation has not yet terminated.""" + raise NotImplementedError() + + @abc.abstractmethod + def fail(self, exception): + """Indicates that the operation has failed. + + Args: + exception: An exception germane to the operation failure. May be None. + """ + raise NotImplementedError() + + +class Operator(abc.ABC): + """An interface through which to participate in an operation.""" + + @abc.abstractmethod + def advance( + self, + initial_metadata=None, + payload=None, + completion=None, + allowance=None, + ): + """Progresses the operation. + + Args: + initial_metadata: An initial metadata value. Only one may ever be + communicated in each direction for an operation, and they must be + communicated no later than either the first payload or the completion. + payload: A payload value. + completion: A Completion value. May only ever be non-None once in either + direction, and no payloads may be passed after it has been communicated. + allowance: A positive integer communicating the number of additional + payloads allowed to be passed by the remote side of the operation. + """ + raise NotImplementedError() + + +class ProtocolReceiver(abc.ABC): + """A means of receiving protocol values during an operation.""" + + @abc.abstractmethod + def context(self, protocol_context): + """Accepts the protocol context object for the operation. + + Args: + protocol_context: The protocol context object for the operation. + """ + raise NotImplementedError() + + +class Subscription(abc.ABC): + """Describes customer code's interest in values from the other side. + + Attributes: + kind: A Kind value describing the overall kind of this value. + termination_callback: A callable to be passed the Outcome associated with + the operation after it has terminated. Must be non-None if kind is + Kind.TERMINATION_ONLY. Must be None otherwise. + allowance: A callable behavior that accepts positive integers representing + the number of additional payloads allowed to be passed to the other side + of the operation. Must be None if kind is Kind.FULL. Must not be None + otherwise. + operator: An Operator to be passed values from the other side of the + operation. Must be non-None if kind is Kind.FULL. Must be None otherwise. + protocol_receiver: A ProtocolReceiver to be passed protocol objects as they + become available during the operation. Must be non-None if kind is + Kind.FULL. + """ + + @enum.unique + class Kind(enum.Enum): + NONE = "none" + TERMINATION_ONLY = "termination only" + FULL = "full" + + +class Servicer(abc.ABC): + """Interface for service implementations.""" + + @abc.abstractmethod + def service(self, group, method, context, output_operator): + """Services an operation. + + Args: + group: The group identifier of the operation to be serviced. + method: The method identifier of the operation to be serviced. + context: An OperationContext object affording contextual information and + actions. + output_operator: An Operator that will accept output values of the + operation. + + Returns: + A Subscription via which this object may or may not accept more values of + the operation. + + Raises: + NoSuchMethodError: If this Servicer does not handle operations with the + given group and method. + abandonment.Abandoned: If the operation has been aborted and there no + longer is any reason to service the operation. + """ + raise NotImplementedError() + + +class End(abc.ABC): + """Common type for entry-point objects on both sides of an operation.""" + + @abc.abstractmethod + def start(self): + """Starts this object's service of operations.""" + raise NotImplementedError() + + @abc.abstractmethod + def stop(self, grace): + """Stops this object's service of operations. + + This object will refuse service of new operations as soon as this method is + called but operations under way at the time of the call may be given a + grace period during which they are allowed to finish. + + Args: + grace: A duration of time in seconds to allow ongoing operations to + terminate before being forcefully terminated by the stopping of this + End. May be zero to terminate all ongoing operations and immediately + stop. + + Returns: + A threading.Event that will be set to indicate all operations having + terminated and this End having completely stopped. The returned event + may not be set until after the full grace period (if some ongoing + operation continues for the full length of the period) or it may be set + much sooner (if for example this End had no operations in progress at + the time its stop method was called). + """ + raise NotImplementedError() + + @abc.abstractmethod + def operate( + self, + group, + method, + subscription, + timeout, + initial_metadata=None, + payload=None, + completion=None, + protocol_options=None, + ): + """Commences an operation. + + Args: + group: The group identifier of the invoked operation. + method: The method identifier of the invoked operation. + subscription: A Subscription to which the results of the operation will be + passed. + timeout: A length of time in seconds to allow for the operation. + initial_metadata: An initial metadata value to be sent to the other side + of the operation. May be None if the initial metadata will be later + passed via the returned operator or if there will be no initial metadata + passed at all. + payload: An initial payload for the operation. + completion: A Completion value indicating the end of transmission to the + other side of the operation. + protocol_options: A value specified by the provider of a Base interface + implementation affording custom state and behavior. + + Returns: + A pair of objects affording information about the operation and action + continuing the operation. The first element of the returned pair is an + OperationContext for the operation and the second element of the + returned pair is an Operator to which operation values not passed in + this call should later be passed. + """ + raise NotImplementedError() + + @abc.abstractmethod + def operation_stats(self): + """Reports the number of terminated operations broken down by outcome. + + Returns: + A dictionary from Outcome.Kind value to an integer identifying the number + of operations that terminated with that outcome kind. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_idle_action(self, action): + """Adds an action to be called when this End has no ongoing operations. + + Args: + action: A callable that accepts no arguments. + """ + raise NotImplementedError() diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/base/utilities.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..512fa33d32599852709126fb768b9f5f39d59a2f --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/base/utilities.py @@ -0,0 +1,83 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for use with the base interface of RPC Framework.""" + +import collections + +from grpc.framework.interfaces.base import base + + +class _Completion( + base.Completion, + collections.namedtuple( + "_Completion", + ( + "terminal_metadata", + "code", + "message", + ), + ), +): + """A trivial implementation of base.Completion.""" + + +class _Subscription( + base.Subscription, + collections.namedtuple( + "_Subscription", + ( + "kind", + "termination_callback", + "allowance", + "operator", + "protocol_receiver", + ), + ), +): + """A trivial implementation of base.Subscription.""" + + +_NONE_SUBSCRIPTION = _Subscription( + base.Subscription.Kind.NONE, None, None, None, None +) + + +def completion(terminal_metadata, code, message): + """Creates a base.Completion aggregating the given operation values. + + Args: + terminal_metadata: A terminal metadata value for an operaton. + code: A code value for an operation. + message: A message value for an operation. + + Returns: + A base.Completion aggregating the given operation values. + """ + return _Completion(terminal_metadata, code, message) + + +def full_subscription(operator, protocol_receiver): + """Creates a "full" base.Subscription for the given base.Operator. + + Args: + operator: A base.Operator to be used in an operation. + protocol_receiver: A base.ProtocolReceiver to be used in an operation. + + Returns: + A base.Subscription of kind base.Subscription.Kind.FULL wrapping the given + base.Operator and base.ProtocolReceiver. + """ + return _Subscription( + base.Subscription.Kind.FULL, None, None, operator, protocol_receiver + ) diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__init__.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11539f2c67582349f5aa4104776b3ac8757dc08e --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee6450d3d87502c47bb29414f1c183807cda5ed7 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/face.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/face.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d89b501714be38da217d8690fcc747b3ddf71e38 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/face.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/utilities.cpython-39.pyc b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/utilities.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29533f5880a8b3d6d9903b0ea07640265c4881e3 Binary files /dev/null and b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/__pycache__/utilities.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/face.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/face.py new file mode 100644 index 0000000000000000000000000000000000000000..d722e3cf3928928eea2ea58e9e02867a5f2d0764 --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/face.py @@ -0,0 +1,1084 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Interfaces defining the Face layer of RPC Framework.""" + +import abc +import collections +import enum + +# cardinality, style, abandonment, future, and stream are +# referenced from specification in this module. +from grpc.framework.common import cardinality # pylint: disable=unused-import +from grpc.framework.common import style # pylint: disable=unused-import +from grpc.framework.foundation import future # pylint: disable=unused-import +from grpc.framework.foundation import stream # pylint: disable=unused-import + +# pylint: disable=too-many-arguments + + +class NoSuchMethodError(Exception): + """Raised by customer code to indicate an unrecognized method. + + Attributes: + group: The group of the unrecognized method. + name: The name of the unrecognized method. + """ + + def __init__(self, group, method): + """Constructor. + + Args: + group: The group identifier of the unrecognized RPC name. + method: The method identifier of the unrecognized RPC name. + """ + super(NoSuchMethodError, self).__init__() + self.group = group + self.method = method + + def __repr__(self): + return "face.NoSuchMethodError(%s, %s)" % ( + self.group, + self.method, + ) + + +class Abortion( + collections.namedtuple( + "Abortion", + ( + "kind", + "initial_metadata", + "terminal_metadata", + "code", + "details", + ), + ) +): + """A value describing RPC abortion. + + Attributes: + kind: A Kind value identifying how the RPC failed. + initial_metadata: The initial metadata from the other side of the RPC or + None if no initial metadata value was received. + terminal_metadata: The terminal metadata from the other side of the RPC or + None if no terminal metadata value was received. + code: The code value from the other side of the RPC or None if no code value + was received. + details: The details value from the other side of the RPC or None if no + details value was received. + """ + + @enum.unique + class Kind(enum.Enum): + """Types of RPC abortion.""" + + CANCELLED = "cancelled" + EXPIRED = "expired" + LOCAL_SHUTDOWN = "local shutdown" + REMOTE_SHUTDOWN = "remote shutdown" + NETWORK_FAILURE = "network failure" + LOCAL_FAILURE = "local failure" + REMOTE_FAILURE = "remote failure" + + +class AbortionError(Exception, metaclass=abc.ABCMeta): + """Common super type for exceptions indicating RPC abortion. + + initial_metadata: The initial metadata from the other side of the RPC or + None if no initial metadata value was received. + terminal_metadata: The terminal metadata from the other side of the RPC or + None if no terminal metadata value was received. + code: The code value from the other side of the RPC or None if no code value + was received. + details: The details value from the other side of the RPC or None if no + details value was received. + """ + + def __init__(self, initial_metadata, terminal_metadata, code, details): + super(AbortionError, self).__init__() + self.initial_metadata = initial_metadata + self.terminal_metadata = terminal_metadata + self.code = code + self.details = details + + def __str__(self): + return '%s(code=%s, details="%s")' % ( + self.__class__.__name__, + self.code, + self.details, + ) + + +class CancellationError(AbortionError): + """Indicates that an RPC has been cancelled.""" + + +class ExpirationError(AbortionError): + """Indicates that an RPC has expired ("timed out").""" + + +class LocalShutdownError(AbortionError): + """Indicates that an RPC has terminated due to local shutdown of RPCs.""" + + +class RemoteShutdownError(AbortionError): + """Indicates that an RPC has terminated due to remote shutdown of RPCs.""" + + +class NetworkError(AbortionError): + """Indicates that some error occurred on the network.""" + + +class LocalError(AbortionError): + """Indicates that an RPC has terminated due to a local defect.""" + + +class RemoteError(AbortionError): + """Indicates that an RPC has terminated due to a remote defect.""" + + +class RpcContext(abc.ABC): + """Provides RPC-related information and action.""" + + @abc.abstractmethod + def is_active(self): + """Describes whether the RPC is active or has terminated.""" + raise NotImplementedError() + + @abc.abstractmethod + def time_remaining(self): + """Describes the length of allowed time remaining for the RPC. + + Returns: + A nonnegative float indicating the length of allowed time in seconds + remaining for the RPC to complete before it is considered to have timed + out. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_abortion_callback(self, abortion_callback): + """Registers a callback to be called if the RPC is aborted. + + Args: + abortion_callback: A callable to be called and passed an Abortion value + in the event of RPC abortion. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC. + + Idempotent and has no effect if the RPC has already terminated. + """ + raise NotImplementedError() + + @abc.abstractmethod + def protocol_context(self): + """Accesses a custom object specified by an implementation provider. + + Returns: + A value specified by the provider of a Face interface implementation + affording custom state and behavior. + """ + raise NotImplementedError() + + +class Call(RpcContext, metaclass=abc.ABCMeta): + """Invocation-side utility object for an RPC.""" + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata from the service-side of the RPC. + + This method blocks until the value is available or is known not to have been + emitted from the service-side of the RPC. + + Returns: + The initial metadata object emitted by the service-side of the RPC, or + None if there was no such value. + """ + raise NotImplementedError() + + @abc.abstractmethod + def terminal_metadata(self): + """Accesses the terminal metadata from the service-side of the RPC. + + This method blocks until the value is available or is known not to have been + emitted from the service-side of the RPC. + + Returns: + The terminal metadata object emitted by the service-side of the RPC, or + None if there was no such value. + """ + raise NotImplementedError() + + @abc.abstractmethod + def code(self): + """Accesses the code emitted by the service-side of the RPC. + + This method blocks until the value is available or is known not to have been + emitted from the service-side of the RPC. + + Returns: + The code object emitted by the service-side of the RPC, or None if there + was no such value. + """ + raise NotImplementedError() + + @abc.abstractmethod + def details(self): + """Accesses the details value emitted by the service-side of the RPC. + + This method blocks until the value is available or is known not to have been + emitted from the service-side of the RPC. + + Returns: + The details value emitted by the service-side of the RPC, or None if there + was no such value. + """ + raise NotImplementedError() + + +class ServicerContext(RpcContext, metaclass=abc.ABCMeta): + """A context object passed to method implementations.""" + + @abc.abstractmethod + def invocation_metadata(self): + """Accesses the metadata from the invocation-side of the RPC. + + This method blocks until the value is available or is known not to have been + emitted from the invocation-side of the RPC. + + Returns: + The metadata object emitted by the invocation-side of the RPC, or None if + there was no such value. + """ + raise NotImplementedError() + + @abc.abstractmethod + def initial_metadata(self, initial_metadata): + """Accepts the service-side initial metadata value of the RPC. + + This method need not be called by method implementations if they have no + service-side initial metadata to transmit. + + Args: + initial_metadata: The service-side initial metadata value of the RPC to + be transmitted to the invocation side of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def terminal_metadata(self, terminal_metadata): + """Accepts the service-side terminal metadata value of the RPC. + + This method need not be called by method implementations if they have no + service-side terminal metadata to transmit. + + Args: + terminal_metadata: The service-side terminal metadata value of the RPC to + be transmitted to the invocation side of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def code(self, code): + """Accepts the service-side code of the RPC. + + This method need not be called by method implementations if they have no + code to transmit. + + Args: + code: The code of the RPC to be transmitted to the invocation side of the + RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def details(self, details): + """Accepts the service-side details of the RPC. + + This method need not be called by method implementations if they have no + service-side details to transmit. + + Args: + details: The service-side details value of the RPC to be transmitted to + the invocation side of the RPC. + """ + raise NotImplementedError() + + +class ResponseReceiver(abc.ABC): + """Invocation-side object used to accept the output of an RPC.""" + + @abc.abstractmethod + def initial_metadata(self, initial_metadata): + """Receives the initial metadata from the service-side of the RPC. + + Args: + initial_metadata: The initial metadata object emitted from the + service-side of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def response(self, response): + """Receives a response from the service-side of the RPC. + + Args: + response: A response object emitted from the service-side of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def complete(self, terminal_metadata, code, details): + """Receives the completion values emitted from the service-side of the RPC. + + Args: + terminal_metadata: The terminal metadata object emitted from the + service-side of the RPC. + code: The code object emitted from the service-side of the RPC. + details: The details object emitted from the service-side of the RPC. + """ + raise NotImplementedError() + + +class UnaryUnaryMultiCallable(abc.ABC): + """Affords invoking a unary-unary RPC in any call style.""" + + @abc.abstractmethod + def __call__( + self, + request, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + """Synchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + with_call: Whether or not to include return a Call for the RPC in addition + to the response. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + The response value for the RPC, and a Call for the RPC if with_call was + set to True at invocation. + + Raises: + AbortionError: Indicating that the RPC was aborted. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future(self, request, timeout, metadata=None, protocol_options=None): + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and a future.Future. In the + event of RPC completion, the return Future's result value will be the + response value of the RPC. In the event of RPC abortion, the returned + Future's exception value will be an AbortionError. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A Call for the RPC. + """ + raise NotImplementedError() + + +class UnaryStreamMultiCallable(abc.ABC): + """Affords invoking a unary-stream RPC in any call style.""" + + @abc.abstractmethod + def __call__(self, request, timeout, metadata=None, protocol_options=None): + """Invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and an iterator of response + values. Drawing response values from the returned iterator may raise + AbortionError indicating abortion of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Asynchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A Call object for the RPC. + """ + raise NotImplementedError() + + +class StreamUnaryMultiCallable(abc.ABC): + """Affords invoking a stream-unary RPC in any call style.""" + + @abc.abstractmethod + def __call__( + self, + request_iterator, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + """Synchronously invokes the underlying RPC. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + with_call: Whether or not to include return a Call for the RPC in addition + to the response. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + The response value for the RPC, and a Call for the RPC if with_call was + set to True at invocation. + + Raises: + AbortionError: Indicating that the RPC was aborted. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + """Asynchronously invokes the underlying RPC. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and a future.Future. In the + event of RPC completion, the return Future's result value will be the + response value of the RPC. In the event of RPC abortion, the returned + Future's exception value will be an AbortionError. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Asynchronously invokes the underlying RPC. + + Args: + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A single object that is both a Call object for the RPC and a + stream.Consumer to which the request values of the RPC should be passed. + """ + raise NotImplementedError() + + +class StreamStreamMultiCallable(abc.ABC): + """Affords invoking a stream-stream RPC in any call style.""" + + @abc.abstractmethod + def __call__( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + """Invokes the underlying RPC. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and an iterator of response + values. Drawing response values from the returned iterator may raise + AbortionError indicating abortion of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Asynchronously invokes the underlying RPC. + + Args: + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of + the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A single object that is both a Call object for the RPC and a + stream.Consumer to which the request values of the RPC should be passed. + """ + raise NotImplementedError() + + +class MethodImplementation(abc.ABC): + """A sum type that describes a method implementation. + + Attributes: + cardinality: A cardinality.Cardinality value. + style: A style.Service value. + unary_unary_inline: The implementation of the method as a callable value + that takes a request value and a ServicerContext object and returns a + response value. Only non-None if cardinality is + cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE. + unary_stream_inline: The implementation of the method as a callable value + that takes a request value and a ServicerContext object and returns an + iterator of response values. Only non-None if cardinality is + cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE. + stream_unary_inline: The implementation of the method as a callable value + that takes an iterator of request values and a ServicerContext object and + returns a response value. Only non-None if cardinality is + cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE. + stream_stream_inline: The implementation of the method as a callable value + that takes an iterator of request values and a ServicerContext object and + returns an iterator of response values. Only non-None if cardinality is + cardinality.Cardinality.STREAM_STREAM and style is style.Service.INLINE. + unary_unary_event: The implementation of the method as a callable value that + takes a request value, a response callback to which to pass the response + value of the RPC, and a ServicerContext. Only non-None if cardinality is + cardinality.Cardinality.UNARY_UNARY and style is style.Service.EVENT. + unary_stream_event: The implementation of the method as a callable value + that takes a request value, a stream.Consumer to which to pass the + response values of the RPC, and a ServicerContext. Only non-None if + cardinality is cardinality.Cardinality.UNARY_STREAM and style is + style.Service.EVENT. + stream_unary_event: The implementation of the method as a callable value + that takes a response callback to which to pass the response value of the + RPC and a ServicerContext and returns a stream.Consumer to which the + request values of the RPC should be passed. Only non-None if cardinality + is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT. + stream_stream_event: The implementation of the method as a callable value + that takes a stream.Consumer to which to pass the response values of the + RPC and a ServicerContext and returns a stream.Consumer to which the + request values of the RPC should be passed. Only non-None if cardinality + is cardinality.Cardinality.STREAM_STREAM and style is + style.Service.EVENT. + """ + + +class MultiMethodImplementation(abc.ABC): + """A general type able to service many methods.""" + + @abc.abstractmethod + def service(self, group, method, response_consumer, context): + """Services an RPC. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + response_consumer: A stream.Consumer to be called to accept the response + values of the RPC. + context: a ServicerContext object. + + Returns: + A stream.Consumer with which to accept the request values of the RPC. The + consumer returned from this method may or may not be invoked to + completion: in the case of RPC abortion, RPC Framework will simply stop + passing values to this object. Implementations must not assume that this + object will be called to completion of the request stream or even called + at all. + + Raises: + abandonment.Abandoned: May or may not be raised when the RPC has been + aborted. + NoSuchMethodError: If this MultiMethod does not recognize the given group + and name for the RPC and is not able to service the RPC. + """ + raise NotImplementedError() + + +class GenericStub(abc.ABC): + """Affords RPC invocation via generic methods.""" + + @abc.abstractmethod + def blocking_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + """Invokes a unary-request-unary-response method. + + This method blocks until either returning the response value of the RPC + (in the event of RPC completion) or raising an exception (in the event of + RPC abortion). + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + with_call: Whether or not to include return a Call for the RPC in addition + to the response. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + The response value for the RPC, and a Call for the RPC if with_call was + set to True at invocation. + + Raises: + AbortionError: Indicating that the RPC was aborted. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + """Invokes a unary-request-unary-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and a future.Future. In the + event of RPC completion, the return Future's result value will be the + response value of the RPC. In the event of RPC abortion, the returned + Future's exception value will be an AbortionError. + """ + raise NotImplementedError() + + @abc.abstractmethod + def inline_unary_stream( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + """Invokes a unary-request-stream-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request: The request value for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and an iterator of response + values. Drawing response values from the returned iterator may raise + AbortionError indicating abortion of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def blocking_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + """Invokes a stream-request-unary-response method. + + This method blocks until either returning the response value of the RPC + (in the event of RPC completion) or raising an exception (in the event of + RPC abortion). + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + with_call: Whether or not to include return a Call for the RPC in addition + to the response. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + The response value for the RPC, and a Call for the RPC if with_call was + set to True at invocation. + + Raises: + AbortionError: Indicating that the RPC was aborted. + """ + raise NotImplementedError() + + @abc.abstractmethod + def future_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + """Invokes a stream-request-unary-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and a future.Future. In the + event of RPC completion, the return Future's result value will be the + response value of the RPC. In the event of RPC abortion, the returned + Future's exception value will be an AbortionError. + """ + raise NotImplementedError() + + @abc.abstractmethod + def inline_stream_stream( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + """Invokes a stream-request-stream-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request_iterator: An iterator that yields request values for the RPC. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + An object that is both a Call for the RPC and an iterator of response + values. Drawing response values from the returned iterator may raise + AbortionError indicating abortion of the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event_unary_unary( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Event-driven invocation of a unary-request-unary-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request: The request value for the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A Call for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event_unary_stream( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Event-driven invocation of a unary-request-stream-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + request: The request value for the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A Call for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event_stream_unary( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Event-driven invocation of a unary-request-unary-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A pair of a Call object for the RPC and a stream.Consumer to which the + request values of the RPC should be passed. + """ + raise NotImplementedError() + + @abc.abstractmethod + def event_stream_stream( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + """Event-driven invocation of a unary-request-stream-response method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + receiver: A ResponseReceiver to be passed the response data of the RPC. + abortion_callback: A callback to be called and passed an Abortion value + in the event of RPC abortion. + timeout: A duration of time in seconds to allow for the RPC. + metadata: A metadata value to be passed to the service-side of the RPC. + protocol_options: A value specified by the provider of a Face interface + implementation affording custom state and behavior. + + Returns: + A pair of a Call object for the RPC and a stream.Consumer to which the + request values of the RPC should be passed. + """ + raise NotImplementedError() + + @abc.abstractmethod + def unary_unary(self, group, method): + """Creates a UnaryUnaryMultiCallable for a unary-unary method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + + Returns: + A UnaryUnaryMultiCallable value for the named unary-unary method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def unary_stream(self, group, method): + """Creates a UnaryStreamMultiCallable for a unary-stream method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + + Returns: + A UnaryStreamMultiCallable value for the name unary-stream method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def stream_unary(self, group, method): + """Creates a StreamUnaryMultiCallable for a stream-unary method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + + Returns: + A StreamUnaryMultiCallable value for the named stream-unary method. + """ + raise NotImplementedError() + + @abc.abstractmethod + def stream_stream(self, group, method): + """Creates a StreamStreamMultiCallable for a stream-stream method. + + Args: + group: The group identifier of the RPC. + method: The method identifier of the RPC. + + Returns: + A StreamStreamMultiCallable value for the named stream-stream method. + """ + raise NotImplementedError() + + +class DynamicStub(abc.ABC): + """Affords RPC invocation via attributes corresponding to afforded methods. + + Instances of this type may be scoped to a single group so that attribute + access is unambiguous. + + Instances of this type respond to attribute access as follows: if the + requested attribute is the name of a unary-unary method, the value of the + attribute will be a UnaryUnaryMultiCallable with which to invoke an RPC; if + the requested attribute is the name of a unary-stream method, the value of the + attribute will be a UnaryStreamMultiCallable with which to invoke an RPC; if + the requested attribute is the name of a stream-unary method, the value of the + attribute will be a StreamUnaryMultiCallable with which to invoke an RPC; and + if the requested attribute is the name of a stream-stream method, the value of + the attribute will be a StreamStreamMultiCallable with which to invoke an RPC. + """ diff --git a/MLPY/Lib/site-packages/grpc/framework/interfaces/face/utilities.py b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0b3fcff81173856a2d3136fdd29431b370669d --- /dev/null +++ b/MLPY/Lib/site-packages/grpc/framework/interfaces/face/utilities.py @@ -0,0 +1,245 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for RPC Framework's Face interface.""" + +import collections + +# stream is referenced from specification in this module. +from grpc.framework.common import cardinality +from grpc.framework.common import style +from grpc.framework.foundation import stream # pylint: disable=unused-import +from grpc.framework.interfaces.face import face + + +class _MethodImplementation( + face.MethodImplementation, + collections.namedtuple( + "_MethodImplementation", + [ + "cardinality", + "style", + "unary_unary_inline", + "unary_stream_inline", + "stream_unary_inline", + "stream_stream_inline", + "unary_unary_event", + "unary_stream_event", + "stream_unary_event", + "stream_stream_event", + ], + ), +): + pass + + +def unary_unary_inline(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a unary-unary RPC method as a callable value + that takes a request value and an face.ServicerContext object and + returns a response value. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.UNARY_UNARY, + style.Service.INLINE, + behavior, + None, + None, + None, + None, + None, + None, + None, + ) + + +def unary_stream_inline(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a unary-stream RPC method as a callable + value that takes a request value and an face.ServicerContext object and + returns an iterator of response values. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.UNARY_STREAM, + style.Service.INLINE, + None, + behavior, + None, + None, + None, + None, + None, + None, + ) + + +def stream_unary_inline(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a stream-unary RPC method as a callable + value that takes an iterator of request values and an + face.ServicerContext object and returns a response value. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.STREAM_UNARY, + style.Service.INLINE, + None, + None, + behavior, + None, + None, + None, + None, + None, + ) + + +def stream_stream_inline(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a stream-stream RPC method as a callable + value that takes an iterator of request values and an + face.ServicerContext object and returns an iterator of response values. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.STREAM_STREAM, + style.Service.INLINE, + None, + None, + None, + behavior, + None, + None, + None, + None, + ) + + +def unary_unary_event(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a unary-unary RPC method as a callable + value that takes a request value, a response callback to which to pass + the response value of the RPC, and an face.ServicerContext. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.UNARY_UNARY, + style.Service.EVENT, + None, + None, + None, + None, + behavior, + None, + None, + None, + ) + + +def unary_stream_event(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a unary-stream RPC method as a callable + value that takes a request value, a stream.Consumer to which to pass the + the response values of the RPC, and an face.ServicerContext. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.UNARY_STREAM, + style.Service.EVENT, + None, + None, + None, + None, + None, + behavior, + None, + None, + ) + + +def stream_unary_event(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a stream-unary RPC method as a callable + value that takes a response callback to which to pass the response value + of the RPC and an face.ServicerContext and returns a stream.Consumer to + which the request values of the RPC should be passed. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.STREAM_UNARY, + style.Service.EVENT, + None, + None, + None, + None, + None, + None, + behavior, + None, + ) + + +def stream_stream_event(behavior): + """Creates an face.MethodImplementation for the given behavior. + + Args: + behavior: The implementation of a stream-stream RPC method as a callable + value that takes a stream.Consumer to which to pass the response values + of the RPC and an face.ServicerContext and returns a stream.Consumer to + which the request values of the RPC should be passed. + + Returns: + An face.MethodImplementation derived from the given behavior. + """ + return _MethodImplementation( + cardinality.Cardinality.STREAM_STREAM, + style.Service.EVENT, + None, + None, + None, + None, + None, + None, + None, + behavior, + ) diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/INSTALLER b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/LICENSE b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..442f783d527f531f00439d5b5cba7b68f2b48b2e --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/LICENSE @@ -0,0 +1,610 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +----------------------------------------------------------- + +BSD 3-Clause License + +Copyright 2016, Google Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +----------------------------------------------------------- + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/METADATA b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..44134f9686a7eec0bd5fbb2fb978778efdee21e3 --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/METADATA @@ -0,0 +1,119 @@ +Metadata-Version: 2.1 +Name: grpcio +Version: 1.65.1 +Summary: HTTP/2-based RPC framework +Home-page: https://grpc.io +Author: The gRPC Authors +Author-email: grpc-io@googlegroups.com +License: Apache License 2.0 +Project-URL: Source Code, https://github.com/grpc/grpc +Project-URL: Bug Tracker, https://github.com/grpc/grpc/issues +Project-URL: Documentation, https://grpc.github.io/grpc/python +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: License :: OSI Approved :: Apache Software License +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: protobuf +Requires-Dist: grpcio-tools >=1.65.1 ; extra == 'protobuf' + +gRPC Python +=========== + +|compat_check_pypi| + +Package for gRPC Python. + +.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=grpcio + :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=grpcio + +Supported Python Versions +------------------------- +Python >= 3.8 + +Installation +------------ + +gRPC Python is available for Linux, macOS, and Windows. + +Installing From PyPI +~~~~~~~~~~~~~~~~~~~~ + +If you are installing locally... + +:: + + $ pip install grpcio + +Else system wide (on Ubuntu)... + +:: + + $ sudo pip install grpcio + +If you're on Windows make sure that you installed the :code:`pip.exe` component +when you installed Python (if not go back and install it!) then invoke: + +:: + + $ pip.exe install grpcio + +Windows users may need to invoke :code:`pip.exe` from a command line ran as +administrator. + +n.b. On Windows and on Mac OS X one *must* have a recent release of :code:`pip` +to retrieve the proper wheel from PyPI. Be sure to upgrade to the latest +version! + +Installing From Source +~~~~~~~~~~~~~~~~~~~~~~ + +Building from source requires that you have the Python headers (usually a +package named :code:`python-dev`). + +:: + + $ export REPO_ROOT=grpc # REPO_ROOT can be any directory of your choice + $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc $REPO_ROOT + $ cd $REPO_ROOT + $ git submodule update --init + + # To include systemd socket-activation feature in the build, + # first install the `libsystemd-dev` package, then : + $ export GRPC_PYTHON_BUILD_WITH_SYSTEMD=1 + + # For the next two commands do `sudo pip install` if you get permission-denied errors + $ pip install -r requirements.txt + $ GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install . + +You cannot currently install Python from source on Windows. Things might work +out for you in MSYS2 (follow the Linux instructions), but it isn't officially +supported at the moment. + +Troubleshooting +~~~~~~~~~~~~~~~ + +Help, I ... + +* **... see the following error on some platforms** + + :: + + /tmp/pip-build-U8pSsr/cython/Cython/Plex/Scanners.c:4:20: fatal error: Python.h: No such file or directory + #include "Python.h" + ^ + compilation terminated. + + You can fix it by installing `python-dev` package. i.e + + :: + + sudo apt-get install python-dev + diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/RECORD b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6c40da842f9f8006381f68440521452d9d0dd729 --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/RECORD @@ -0,0 +1,120 @@ +grpc/__init__.py,sha256=oogVRu0MF78OnANapY41rEueEQNPTNLpUrL2vvqdtgw,84681 +grpc/__pycache__/__init__.cpython-39.pyc,, +grpc/__pycache__/_auth.cpython-39.pyc,, +grpc/__pycache__/_channel.cpython-39.pyc,, +grpc/__pycache__/_common.cpython-39.pyc,, +grpc/__pycache__/_compression.cpython-39.pyc,, +grpc/__pycache__/_grpcio_metadata.cpython-39.pyc,, +grpc/__pycache__/_interceptor.cpython-39.pyc,, +grpc/__pycache__/_observability.cpython-39.pyc,, +grpc/__pycache__/_plugin_wrapping.cpython-39.pyc,, +grpc/__pycache__/_runtime_protos.cpython-39.pyc,, +grpc/__pycache__/_server.cpython-39.pyc,, +grpc/__pycache__/_simple_stubs.cpython-39.pyc,, +grpc/__pycache__/_typing.cpython-39.pyc,, +grpc/__pycache__/_utilities.cpython-39.pyc,, +grpc/_auth.py,sha256=7GGYpUPXKqU7_iF_jniN1fDWch0_1T3x0nkhzRfsf40,2715 +grpc/_channel.py,sha256=Yq8CYcVTlgi3hWonOG3FyDwhP_03HCWjWMLb1wX4oQU,83613 +grpc/_common.py,sha256=L-pYsWV2y6d9EYCM9XnEBJzw1QoSDKfTbONqPiL6Z0g,6967 +grpc/_compression.py,sha256=hrvGhY68cMej3c53nV2CJg_pGg5kZrMWyUQBrJKxLiY,2054 +grpc/_cython/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/_cython/__pycache__/__init__.cpython-39.pyc,, +grpc/_cython/_credentials/roots.pem,sha256=iQrx6SIqWnawwL7sTFcKoy0a1ZIIj89WEN6VR4qN5jg,268777 +grpc/_cython/_cygrpc/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/_cython/_cygrpc/__pycache__/__init__.cpython-39.pyc,, +grpc/_cython/cygrpc.cp39-win_amd64.pyd,sha256=-dO_pVrhwCsVDuD0kpyWSbXp-4IZsnJ7JIYe2aJX1KI,8973312 +grpc/_grpcio_metadata.py,sha256=WRcUwxpPBxCJj4HiCcepBlg4Ikmlpv19Jd_EGtcox4E,26 +grpc/_interceptor.py,sha256=xkXA_oQCZXBTBiIWz-OmX81z7gf4KU1_dqU6wqmzBoY,26675 +grpc/_observability.py,sha256=8bzMgLtCQ-VZuFXK10HqWPn3AtpEzcLUuEbw_z6cMAQ,12085 +grpc/_plugin_wrapping.py,sha256=sd98vNKf1TqtQWMSDIKcT1z8tX2lwKrfbjkYQ1l4EB0,4518 +grpc/_runtime_protos.py,sha256=c2Jargloi3on2AKFXCDOJRSxqgxwS6O8EQJ_pUrJx64,5975 +grpc/_server.py,sha256=Zge8brc8JFYIeR1VXOeAx8zr1q6O8TUC7-2kup40MCY,52414 +grpc/_simple_stubs.py,sha256=3p-NQBfa41JcLydxBFIUfbj28XrlIgV1VVAOdrxT4gE,25198 +grpc/_typing.py,sha256=jsam5hDuif4oZU-3t8wrDnEF2IIh1AQHdkJe4hqVMuk,2853 +grpc/_utilities.py,sha256=n6dxyRWRfDjH8bmgrteEPDNdve_fVVYUZXhMk-8-ZcM,7265 +grpc/aio/__init__.py,sha256=ww6NbDPsc3XAFs8WSYykRYIMemQ8wx4tnoH7UTF6DUg,3255 +grpc/aio/__pycache__/__init__.cpython-39.pyc,, +grpc/aio/__pycache__/_base_call.cpython-39.pyc,, +grpc/aio/__pycache__/_base_channel.cpython-39.pyc,, +grpc/aio/__pycache__/_base_server.cpython-39.pyc,, +grpc/aio/__pycache__/_call.cpython-39.pyc,, +grpc/aio/__pycache__/_channel.cpython-39.pyc,, +grpc/aio/__pycache__/_interceptor.cpython-39.pyc,, +grpc/aio/__pycache__/_metadata.cpython-39.pyc,, +grpc/aio/__pycache__/_server.cpython-39.pyc,, +grpc/aio/__pycache__/_typing.cpython-39.pyc,, +grpc/aio/__pycache__/_utils.cpython-39.pyc,, +grpc/aio/_base_call.py,sha256=ikxxi_OwGAvN2xMxGQG1FrpJxznJsOUhhaBzvWf9st0,7818 +grpc/aio/_base_channel.py,sha256=GPID6XGt6cEIZN5lUyYrjzBCDiF6Pofrb8AZW3426Sg,14258 +grpc/aio/_base_server.py,sha256=YFVpu8u2yUMvjz_ONbXDL2a1qEIjvhiDD34BvQL0jwI,12908 +grpc/aio/_call.py,sha256=V136GU08-lXEjU22DfesDxTC-5w7U4Azw93Wa76w0K4,26120 +grpc/aio/_channel.py,sha256=5v3L_nphEGAI--GPgA4PL4zzzXjb553F1mCtmnxLejM,22467 +grpc/aio/_interceptor.py,sha256=ncP744oVJi8aYuIoxCC1duAyyzqW4FxjoMvJHlbItRc,42516 +grpc/aio/_metadata.py,sha256=wvoXY3ky2RI6355iLS452nvQ6xqJry8ZiagUkTsaDws,5146 +grpc/aio/_server.py,sha256=WKTxg5gbjtP7uD5ju1Yc5yWO6R_eA39gfMAL7RviXDE,9170 +grpc/aio/_typing.py,sha256=QgYlt_nen7CnvtDxCcpUw0BVjRDo7roQHkycqzfcz0I,1421 +grpc/aio/_utils.py,sha256=Fw8FpKtucE2a2VRNF3JyAMzh0BtgNQZKpE5jWlHlwQw,843 +grpc/beta/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/beta/__pycache__/__init__.cpython-39.pyc,, +grpc/beta/__pycache__/_client_adaptations.cpython-39.pyc,, +grpc/beta/__pycache__/_metadata.cpython-39.pyc,, +grpc/beta/__pycache__/_server_adaptations.cpython-39.pyc,, +grpc/beta/__pycache__/implementations.cpython-39.pyc,, +grpc/beta/__pycache__/interfaces.cpython-39.pyc,, +grpc/beta/__pycache__/utilities.cpython-39.pyc,, +grpc/beta/_client_adaptations.py,sha256=4C6KYM68G9Dg76SbPNMnGrpcj3I1aLG7nAdvDEuzMl8,28038 +grpc/beta/_metadata.py,sha256=4bRwelrMpqyqZxmTb_lAhGDpICyBVR-qmsvB1Ti8JrA,1694 +grpc/beta/_server_adaptations.py,sha256=T7EHbyI8BCB_eMyGUoBXnIxQcjp_CoV2jHRPZ8P5sUY,15076 +grpc/beta/implementations.py,sha256=fEpftVpElsOzHK3MxVVQqE21CZ5HdBUkSAbjLNvU1Ew,12403 +grpc/beta/interfaces.py,sha256=U5D3tvq7Z_t4fCYJD9RfFqqnpSvS9VmB4cYQTtari-Y,6245 +grpc/beta/utilities.py,sha256=Yt9_vnL1Bx6l5QO4UL3-TMb4AqKzTJNhmDkRawmlXfg,5158 +grpc/experimental/__init__.py,sha256=J_nNA_SE-VfwMXleVA9CVxc2gNXpZPXIQNSflWeQ1S0,4237 +grpc/experimental/__pycache__/__init__.cpython-39.pyc,, +grpc/experimental/__pycache__/gevent.cpython-39.pyc,, +grpc/experimental/__pycache__/session_cache.cpython-39.pyc,, +grpc/experimental/aio/__init__.py,sha256=QoEtaa5C408IeaaSMKvooYNJdWCqyX6X9UYF-maJcIY,676 +grpc/experimental/aio/__pycache__/__init__.cpython-39.pyc,, +grpc/experimental/gevent.py,sha256=ZmFL0iK7irhC9JtTC2JJP23-IRG3_ZCohRQBhhVWuyM,1000 +grpc/experimental/session_cache.py,sha256=OdASXKtZYY8vP1Yo6GeRi4dfEaVuvyiOK5CknnROExE,1578 +grpc/framework/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/common/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/common/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/common/__pycache__/cardinality.cpython-39.pyc,, +grpc/framework/common/__pycache__/style.cpython-39.pyc,, +grpc/framework/common/cardinality.py,sha256=lhyS0Chc2kUCH84K069F2PgNLFJHpjfBjSdN79olL8E,1014 +grpc/framework/common/style.py,sha256=eFEX5mA2ynBzHFf8H2eKeAFp2mm-fR343MiGq9Xq_sI,848 +grpc/framework/foundation/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/foundation/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/abandonment.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/callable_util.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/future.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/logging_pool.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/stream.cpython-39.pyc,, +grpc/framework/foundation/__pycache__/stream_util.cpython-39.pyc,, +grpc/framework/foundation/abandonment.py,sha256=lI3FSuizdQ85z8sxEKluhvYj9I64YeImvqWDuWu6JXs,900 +grpc/framework/foundation/callable_util.py,sha256=QJZNFhQXoJFD6DAeiRe3KGSnJd-br0C7aqgsagAR1HI,3249 +grpc/framework/foundation/future.py,sha256=WGkqWv-4W0ahgE0iRsgsKdIuVNIBny22OrARQb4AMfc,8592 +grpc/framework/foundation/logging_pool.py,sha256=4hGk3EL1_QyDyF4ic4CF5SI2rrTwWe7Pczj-dJEyuYs,2320 +grpc/framework/foundation/stream.py,sha256=l2VMgFK5qhPMBT0yjM75ZppHMIf5Lzz6Iwnp6Wd9wAw,1420 +grpc/framework/foundation/stream_util.py,sha256=xbo7u8uXLBsQ_vf_NKHsls8mFOmmsaLQni51yk_xLpw,4920 +grpc/framework/interfaces/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/interfaces/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/interfaces/base/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/interfaces/base/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/interfaces/base/__pycache__/base.cpython-39.pyc,, +grpc/framework/interfaces/base/__pycache__/utilities.cpython-39.pyc,, +grpc/framework/interfaces/base/base.py,sha256=r1386XyqmkzGyDML3uR4o7PABU2VXbF2eavmbbbPTq0,12561 +grpc/framework/interfaces/base/utilities.py,sha256=JjTfu63jD7mMRinR0pNuETCg3veJggaCI3BqY3hz0e4,2444 +grpc/framework/interfaces/face/__init__.py,sha256=v-bMmhfnkYP7kR6Mw9wPLG_cCagk-ZKiUZi6pyap2GU,590 +grpc/framework/interfaces/face/__pycache__/__init__.cpython-39.pyc,, +grpc/framework/interfaces/face/__pycache__/face.cpython-39.pyc,, +grpc/framework/interfaces/face/__pycache__/utilities.cpython-39.pyc,, +grpc/framework/interfaces/face/face.py,sha256=Ml3loDRLwrunMkCs_B9iE6uJEbV3QSCciWxLWykGaYo,40784 +grpc/framework/interfaces/face/utilities.py,sha256=G8o4hCBcDPwtF2pJg0qDM5_7hxjvIjLC3THapJ9oJG8,7026 +grpcio-1.65.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +grpcio-1.65.1.dist-info/LICENSE,sha256=r2PsgwP9UTw5BUp_27KVfsqU-p-O8FvhUTnWMS8iJ3s,30297 +grpcio-1.65.1.dist-info/METADATA,sha256=ldOYWgSq5hRYeG4ROOCPwJqcL5jjavAomKTicTrSZHk,3443 +grpcio-1.65.1.dist-info/RECORD,, +grpcio-1.65.1.dist-info/WHEEL,sha256=Z6c-bE0pUM47a70GvqO_SvH_XXU0lm62gEAKtoNJ08A,100 +grpcio-1.65.1.dist-info/top_level.txt,sha256=eEd2Jq_aVQFp38bWW8Pfwjz_5iibqeOFT-2zXlPAq_8,5 diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/WHEEL b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..b7132af6d27aab726a7499fc58ccd63c206a0a33 --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: false +Tag: cp39-cp39-win_amd64 + diff --git a/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/top_level.txt b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b2fe54cba7bccaa783f551549920c8e8927cf1e --- /dev/null +++ b/MLPY/Lib/site-packages/grpcio-1.65.1.dist-info/top_level.txt @@ -0,0 +1 @@ +grpc diff --git a/MLPY/Lib/site-packages/gym/__init__.py b/MLPY/Lib/site-packages/gym/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..185f064ff222081cdde1aed0316bf694c53868c1 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/__init__.py @@ -0,0 +1,43 @@ +"""Root __init__ of the gym module setting the __all__ of gym modules.""" +# isort: skip_file + +from gym import error +from gym.version import VERSION as __version__ + +from gym.core import ( + Env, + Wrapper, + ObservationWrapper, + ActionWrapper, + RewardWrapper, +) +from gym.spaces import Space +from gym.envs import make, spec, register +from gym import logger +from gym import vector +from gym import wrappers +import os +import sys + +__all__ = ["Env", "Space", "Wrapper", "make", "spec", "register"] + +# Initializing pygame initializes audio connections through SDL. SDL uses alsa by default on all Linux systems +# SDL connecting to alsa frequently create these giant lists of warnings every time you import an environment using +# pygame +# DSP is far more benign (and should probably be the default in SDL anyways) + +if sys.platform.startswith("linux"): + os.environ["SDL_AUDIODRIVER"] = "dsp" + +os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide" + +try: + import gym_notices.notices as notices + + # print version warning if necessary + notice = notices.notices.get(__version__) + if notice: + print(notice, file=sys.stderr) + +except Exception: # nosec + pass diff --git a/MLPY/Lib/site-packages/gym/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8245d2b012638ffe6e4e3afb4095dc91fcb9783d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/__pycache__/core.cpython-39.pyc b/MLPY/Lib/site-packages/gym/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8927d1b5ff37575918894d29b3d125c30b410b40 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/__pycache__/core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/__pycache__/error.cpython-39.pyc b/MLPY/Lib/site-packages/gym/__pycache__/error.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62aa32016d06e14787ac97b7100e83d48b061975 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/__pycache__/error.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/__pycache__/logger.cpython-39.pyc b/MLPY/Lib/site-packages/gym/__pycache__/logger.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0f99402d690961416f0aa259c91c08958c8dd53 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/__pycache__/logger.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/__pycache__/version.cpython-39.pyc b/MLPY/Lib/site-packages/gym/__pycache__/version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43cc3622de306eac0f1cd7fa95382f3e18874541 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/__pycache__/version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/core.py b/MLPY/Lib/site-packages/gym/core.py new file mode 100644 index 0000000000000000000000000000000000000000..96e5870ce4c3f2d3519f91dc578252fa6119acac --- /dev/null +++ b/MLPY/Lib/site-packages/gym/core.py @@ -0,0 +1,468 @@ +"""Core API for Environment, Wrapper, ActionWrapper, RewardWrapper and ObservationWrapper.""" +import sys +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Optional, + SupportsFloat, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +from gym import spaces +from gym.logger import warn +from gym.utils import seeding + +if TYPE_CHECKING: + from gym.envs.registration import EnvSpec + +if sys.version_info[0:2] == (3, 6): + warn( + "Gym minimally supports python 3.6 as the python foundation not longer supports the version, please update your version to 3.7+" + ) + +ObsType = TypeVar("ObsType") +ActType = TypeVar("ActType") +RenderFrame = TypeVar("RenderFrame") + + +class Env(Generic[ObsType, ActType]): + r"""The main OpenAI Gym class. + + It encapsulates an environment with arbitrary behind-the-scenes dynamics. + An environment can be partially or fully observed. + + The main API methods that users of this class need to know are: + + - :meth:`step` - Takes a step in the environment using an action returning the next observation, reward, + if the environment terminated and observation information. + - :meth:`reset` - Resets the environment to an initial state, returning the initial observation and observation information. + - :meth:`render` - Renders the environment observation with modes depending on the output + - :meth:`close` - Closes the environment, important for rendering where pygame is imported + + And set the following attributes: + + - :attr:`action_space` - The Space object corresponding to valid actions + - :attr:`observation_space` - The Space object corresponding to valid observations + - :attr:`reward_range` - A tuple corresponding to the minimum and maximum possible rewards + - :attr:`spec` - An environment spec that contains the information used to initialise the environment from `gym.make` + - :attr:`metadata` - The metadata of the environment, i.e. render modes + - :attr:`np_random` - The random number generator for the environment + + Note: a default reward range set to :math:`(-\infty,+\infty)` already exists. Set it if you want a narrower range. + """ + + # Set this in SOME subclasses + metadata: Dict[str, Any] = {"render_modes": []} + # define render_mode if your environment supports rendering + render_mode: Optional[str] = None + reward_range = (-float("inf"), float("inf")) + spec: "EnvSpec" = None + + # Set these in ALL subclasses + action_space: spaces.Space[ActType] + observation_space: spaces.Space[ObsType] + + # Created + _np_random: Optional[np.random.Generator] = None + + @property + def np_random(self) -> np.random.Generator: + """Returns the environment's internal :attr:`_np_random` that if not set will initialise with a random seed.""" + if self._np_random is None: + self._np_random, seed = seeding.np_random() + return self._np_random + + @np_random.setter + def np_random(self, value: np.random.Generator): + self._np_random = value + + def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]: + """Run one timestep of the environment's dynamics. + + When end of episode is reached, you are responsible for calling :meth:`reset` to reset this environment's state. + Accepts an action and returns either a tuple `(observation, reward, terminated, truncated, info)`. + + Args: + action (ActType): an action provided by the agent + + Returns: + observation (object): this will be an element of the environment's :attr:`observation_space`. + This may, for instance, be a numpy array containing the positions and velocities of certain objects. + reward (float): The amount of reward returned as a result of taking the action. + terminated (bool): whether a `terminal state` (as defined under the MDP of the task) is reached. + In this case further step() calls could return undefined results. + truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. + Typically a timelimit, but could also be used to indicate agent physically going out of bounds. + Can be used to end the episode prematurely before a `terminal state` is reached. + info (dictionary): `info` contains auxiliary diagnostic information (helpful for debugging, learning, and logging). + This might, for instance, contain: metrics that describe the agent's performance state, variables that are + hidden from observations, or individual reward terms that are combined to produce the total reward. + It also can contain information that distinguishes truncation and termination, however this is deprecated in favour + of returning two booleans, and will be removed in a future version. + + (deprecated) + done (bool): A boolean value for if the episode has ended, in which case further :meth:`step` calls will return undefined results. + A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, + a certain timelimit was exceeded, or the physics simulation has entered an invalid state. + """ + raise NotImplementedError + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ) -> Tuple[ObsType, dict]: + """Resets the environment to an initial state and returns the initial observation. + + This method can reset the environment's random number generator(s) if ``seed`` is an integer or + if the environment has not yet initialized a random number generator. + If the environment already has a random number generator and :meth:`reset` is called with ``seed=None``, + the RNG should not be reset. Moreover, :meth:`reset` should (in the typical use case) be called with an + integer seed right after initialization and then never again. + + Args: + seed (optional int): The seed that is used to initialize the environment's PRNG. + If the environment does not already have a PRNG and ``seed=None`` (the default option) is passed, + a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). + However, if the environment already has a PRNG and ``seed=None`` is passed, the PRNG will *not* be reset. + If you pass an integer, the PRNG will be reset even if it already exists. + Usually, you want to pass an integer *right after the environment has been initialized and then never again*. + Please refer to the minimal example above to see this paradigm in action. + options (optional dict): Additional information to specify how the environment is reset (optional, + depending on the specific environment) + + + Returns: + observation (object): Observation of the initial state. This will be an element of :attr:`observation_space` + (typically a numpy array) and is analogous to the observation returned by :meth:`step`. + info (dictionary): This dictionary contains auxiliary information complementing ``observation``. It should be analogous to + the ``info`` returned by :meth:`step`. + """ + # Initialize the RNG if the seed is manually passed + if seed is not None: + self._np_random, seed = seeding.np_random(seed) + + def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]: + """Compute the render frames as specified by render_mode attribute during initialization of the environment. + + The set of supported modes varies per environment. (And some + third-party environments may not support rendering at all.) + By convention, if render_mode is: + + - None (default): no render is computed. + - human: render return None. + The environment is continuously rendered in the current display or terminal. Usually for human consumption. + - rgb_array: return a single frame representing the current state of the environment. + A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. + - rgb_array_list: return a list of frames representing the states of the environment since the last reset. + Each frame is a numpy.ndarray with shape (x, y, 3), as with `rgb_array`. + - ansi: Return a strings (str) or StringIO.StringIO containing a + terminal-style text representation for each time step. + The text can include newlines and ANSI escape sequences (e.g. for colors). + + Note: + Make sure that your class's metadata 'render_modes' key includes + the list of supported modes. It's recommended to call super() + in implementations to use the functionality of this method. + """ + raise NotImplementedError + + def close(self): + """Override close in your subclass to perform any necessary cleanup. + + Environments will automatically :meth:`close()` themselves when + garbage collected or when the program exits. + """ + pass + + @property + def unwrapped(self) -> "Env": + """Returns the base non-wrapped environment. + + Returns: + Env: The base non-wrapped gym.Env instance + """ + return self + + def __str__(self): + """Returns a string of the environment with the spec id if specified.""" + if self.spec is None: + return f"<{type(self).__name__} instance>" + else: + return f"<{type(self).__name__}<{self.spec.id}>>" + + def __enter__(self): + """Support with-statement for the environment.""" + return self + + def __exit__(self, *args): + """Support with-statement for the environment.""" + self.close() + # propagate exception + return False + + +class Wrapper(Env[ObsType, ActType]): + """Wraps an environment to allow a modular transformation of the :meth:`step` and :meth:`reset` methods. + + This class is the base class for all wrappers. The subclass could override + some methods to change the behavior of the original environment without touching the + original code. + + Note: + Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`. + """ + + def __init__(self, env: Env): + """Wraps an environment to allow a modular transformation of the :meth:`step` and :meth:`reset` methods. + + Args: + env: The environment to wrap + """ + self.env = env + + self._action_space: Optional[spaces.Space] = None + self._observation_space: Optional[spaces.Space] = None + self._reward_range: Optional[Tuple[SupportsFloat, SupportsFloat]] = None + self._metadata: Optional[dict] = None + + def __getattr__(self, name): + """Returns an attribute with ``name``, unless ``name`` starts with an underscore.""" + if name.startswith("_"): + raise AttributeError(f"accessing private attribute '{name}' is prohibited") + return getattr(self.env, name) + + @property + def spec(self): + """Returns the environment specification.""" + return self.env.spec + + @classmethod + def class_name(cls): + """Returns the class name of the wrapper.""" + return cls.__name__ + + @property + def action_space(self) -> spaces.Space[ActType]: + """Returns the action space of the environment.""" + if self._action_space is None: + return self.env.action_space + return self._action_space + + @action_space.setter + def action_space(self, space: spaces.Space): + self._action_space = space + + @property + def observation_space(self) -> spaces.Space: + """Returns the observation space of the environment.""" + if self._observation_space is None: + return self.env.observation_space + return self._observation_space + + @observation_space.setter + def observation_space(self, space: spaces.Space): + self._observation_space = space + + @property + def reward_range(self) -> Tuple[SupportsFloat, SupportsFloat]: + """Return the reward range of the environment.""" + if self._reward_range is None: + return self.env.reward_range + return self._reward_range + + @reward_range.setter + def reward_range(self, value: Tuple[SupportsFloat, SupportsFloat]): + self._reward_range = value + + @property + def metadata(self) -> dict: + """Returns the environment metadata.""" + if self._metadata is None: + return self.env.metadata + return self._metadata + + @metadata.setter + def metadata(self, value): + self._metadata = value + + @property + def render_mode(self) -> Optional[str]: + """Returns the environment render_mode.""" + return self.env.render_mode + + @property + def np_random(self) -> np.random.Generator: + """Returns the environment np_random.""" + return self.env.np_random + + @np_random.setter + def np_random(self, value): + self.env.np_random = value + + @property + def _np_random(self): + raise AttributeError( + "Can't access `_np_random` of a wrapper, use `.unwrapped._np_random` or `.np_random`." + ) + + def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, dict]: + """Steps through the environment with action.""" + return self.env.step(action) + + def reset(self, **kwargs) -> Tuple[ObsType, dict]: + """Resets the environment with kwargs.""" + return self.env.reset(**kwargs) + + def render( + self, *args, **kwargs + ) -> Optional[Union[RenderFrame, List[RenderFrame]]]: + """Renders the environment.""" + return self.env.render(*args, **kwargs) + + def close(self): + """Closes the environment.""" + return self.env.close() + + def __str__(self): + """Returns the wrapper name and the unwrapped environment string.""" + return f"<{type(self).__name__}{self.env}>" + + def __repr__(self): + """Returns the string representation of the wrapper.""" + return str(self) + + @property + def unwrapped(self) -> Env: + """Returns the base environment of the wrapper.""" + return self.env.unwrapped + + +class ObservationWrapper(Wrapper): + """Superclass of wrappers that can modify observations using :meth:`observation` for :meth:`reset` and :meth:`step`. + + If you would like to apply a function to the observation that is returned by the base environment before + passing it to learning code, you can simply inherit from :class:`ObservationWrapper` and overwrite the method + :meth:`observation` to implement that transformation. The transformation defined in that method must be + defined on the base environment’s observation space. However, it may take values in a different space. + In that case, you need to specify the new observation space of the wrapper by setting :attr:`self.observation_space` + in the :meth:`__init__` method of your wrapper. + + For example, you might have a 2D navigation task where the environment returns dictionaries as observations with + keys ``"agent_position"`` and ``"target_position"``. A common thing to do might be to throw away some degrees of + freedom and only consider the position of the target relative to the agent, i.e. + ``observation["target_position"] - observation["agent_position"]``. For this, you could implement an + observation wrapper like this:: + + class RelativePosition(gym.ObservationWrapper): + def __init__(self, env): + super().__init__(env) + self.observation_space = Box(shape=(2,), low=-np.inf, high=np.inf) + + def observation(self, obs): + return obs["target"] - obs["agent"] + + Among others, Gym provides the observation wrapper :class:`TimeAwareObservation`, which adds information about the + index of the timestep to the observation. + """ + + def reset(self, **kwargs): + """Resets the environment, returning a modified observation using :meth:`self.observation`.""" + obs, info = self.env.reset(**kwargs) + return self.observation(obs), info + + def step(self, action): + """Returns a modified observation using :meth:`self.observation` after calling :meth:`env.step`.""" + observation, reward, terminated, truncated, info = self.env.step(action) + return self.observation(observation), reward, terminated, truncated, info + + def observation(self, observation): + """Returns a modified observation.""" + raise NotImplementedError + + +class RewardWrapper(Wrapper): + """Superclass of wrappers that can modify the returning reward from a step. + + If you would like to apply a function to the reward that is returned by the base environment before + passing it to learning code, you can simply inherit from :class:`RewardWrapper` and overwrite the method + :meth:`reward` to implement that transformation. + This transformation might change the reward range; to specify the reward range of your wrapper, + you can simply define :attr:`self.reward_range` in :meth:`__init__`. + + Let us look at an example: Sometimes (especially when we do not have control over the reward + because it is intrinsic), we want to clip the reward to a range to gain some numerical stability. + To do that, we could, for instance, implement the following wrapper:: + + class ClipReward(gym.RewardWrapper): + def __init__(self, env, min_reward, max_reward): + super().__init__(env) + self.min_reward = min_reward + self.max_reward = max_reward + self.reward_range = (min_reward, max_reward) + + def reward(self, reward): + return np.clip(reward, self.min_reward, self.max_reward) + """ + + def step(self, action): + """Modifies the reward using :meth:`self.reward` after the environment :meth:`env.step`.""" + observation, reward, terminated, truncated, info = self.env.step(action) + return observation, self.reward(reward), terminated, truncated, info + + def reward(self, reward): + """Returns a modified ``reward``.""" + raise NotImplementedError + + +class ActionWrapper(Wrapper): + """Superclass of wrappers that can modify the action before :meth:`env.step`. + + If you would like to apply a function to the action before passing it to the base environment, + you can simply inherit from :class:`ActionWrapper` and overwrite the method :meth:`action` to implement + that transformation. The transformation defined in that method must take values in the base environment’s + action space. However, its domain might differ from the original action space. + In that case, you need to specify the new action space of the wrapper by setting :attr:`self.action_space` in + the :meth:`__init__` method of your wrapper. + + Let’s say you have an environment with action space of type :class:`gym.spaces.Box`, but you would only like + to use a finite subset of actions. Then, you might want to implement the following wrapper:: + + class DiscreteActions(gym.ActionWrapper): + def __init__(self, env, disc_to_cont): + super().__init__(env) + self.disc_to_cont = disc_to_cont + self.action_space = Discrete(len(disc_to_cont)) + + def action(self, act): + return self.disc_to_cont[act] + + if __name__ == "__main__": + env = gym.make("LunarLanderContinuous-v2") + wrapped_env = DiscreteActions(env, [np.array([1,0]), np.array([-1,0]), + np.array([0,1]), np.array([0,-1])]) + print(wrapped_env.action_space) #Discrete(4) + + + Among others, Gym provides the action wrappers :class:`ClipAction` and :class:`RescaleAction`. + """ + + def step(self, action): + """Runs the environment :meth:`env.step` using the modified ``action`` from :meth:`self.action`.""" + return self.env.step(self.action(action)) + + def action(self, action): + """Returns a modified action before :meth:`env.step` is called.""" + raise NotImplementedError + + def reverse_action(self, action): + """Returns a reversed ``action``.""" + raise NotImplementedError diff --git a/MLPY/Lib/site-packages/gym/envs/__init__.py b/MLPY/Lib/site-packages/gym/envs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7281027697b37079622dd2f7997c87fee68300 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/__init__.py @@ -0,0 +1,320 @@ +from gym.envs.registration import load_env_plugins as _load_env_plugins +from gym.envs.registration import make, register, registry, spec + +# Hook to load plugins from entry points +_load_env_plugins() + + +# Classic +# ---------------------------------------- + +register( + id="CartPole-v0", + entry_point="gym.envs.classic_control.cartpole:CartPoleEnv", + max_episode_steps=200, + reward_threshold=195.0, +) + +register( + id="CartPole-v1", + entry_point="gym.envs.classic_control.cartpole:CartPoleEnv", + max_episode_steps=500, + reward_threshold=475.0, +) + +register( + id="MountainCar-v0", + entry_point="gym.envs.classic_control.mountain_car:MountainCarEnv", + max_episode_steps=200, + reward_threshold=-110.0, +) + +register( + id="MountainCarContinuous-v0", + entry_point="gym.envs.classic_control.continuous_mountain_car:Continuous_MountainCarEnv", + max_episode_steps=999, + reward_threshold=90.0, +) + +register( + id="Pendulum-v1", + entry_point="gym.envs.classic_control.pendulum:PendulumEnv", + max_episode_steps=200, +) + +register( + id="Acrobot-v1", + entry_point="gym.envs.classic_control.acrobot:AcrobotEnv", + reward_threshold=-100.0, + max_episode_steps=500, +) + +# Box2d +# ---------------------------------------- + +register( + id="LunarLander-v2", + entry_point="gym.envs.box2d.lunar_lander:LunarLander", + max_episode_steps=1000, + reward_threshold=200, +) + +register( + id="LunarLanderContinuous-v2", + entry_point="gym.envs.box2d.lunar_lander:LunarLander", + kwargs={"continuous": True}, + max_episode_steps=1000, + reward_threshold=200, +) + +register( + id="BipedalWalker-v3", + entry_point="gym.envs.box2d.bipedal_walker:BipedalWalker", + max_episode_steps=1600, + reward_threshold=300, +) + +register( + id="BipedalWalkerHardcore-v3", + entry_point="gym.envs.box2d.bipedal_walker:BipedalWalker", + kwargs={"hardcore": True}, + max_episode_steps=2000, + reward_threshold=300, +) + +register( + id="CarRacing-v2", + entry_point="gym.envs.box2d.car_racing:CarRacing", + max_episode_steps=1000, + reward_threshold=900, +) + +# Toy Text +# ---------------------------------------- + +register( + id="Blackjack-v1", + entry_point="gym.envs.toy_text.blackjack:BlackjackEnv", + kwargs={"sab": True, "natural": False}, +) + +register( + id="FrozenLake-v1", + entry_point="gym.envs.toy_text.frozen_lake:FrozenLakeEnv", + kwargs={"map_name": "4x4"}, + max_episode_steps=100, + reward_threshold=0.70, # optimum = 0.74 +) + +register( + id="FrozenLake8x8-v1", + entry_point="gym.envs.toy_text.frozen_lake:FrozenLakeEnv", + kwargs={"map_name": "8x8"}, + max_episode_steps=200, + reward_threshold=0.85, # optimum = 0.91 +) + +register( + id="CliffWalking-v0", + entry_point="gym.envs.toy_text.cliffwalking:CliffWalkingEnv", +) + +register( + id="Taxi-v3", + entry_point="gym.envs.toy_text.taxi:TaxiEnv", + reward_threshold=8, # optimum = 8.46 + max_episode_steps=200, +) + +# Mujoco +# ---------------------------------------- + +# 2D + +register( + id="Reacher-v2", + entry_point="gym.envs.mujoco:ReacherEnv", + max_episode_steps=50, + reward_threshold=-3.75, +) + +register( + id="Reacher-v4", + entry_point="gym.envs.mujoco.reacher_v4:ReacherEnv", + max_episode_steps=50, + reward_threshold=-3.75, +) + +register( + id="Pusher-v2", + entry_point="gym.envs.mujoco:PusherEnv", + max_episode_steps=100, + reward_threshold=0.0, +) + +register( + id="Pusher-v4", + entry_point="gym.envs.mujoco.pusher_v4:PusherEnv", + max_episode_steps=100, + reward_threshold=0.0, +) + +register( + id="InvertedPendulum-v2", + entry_point="gym.envs.mujoco:InvertedPendulumEnv", + max_episode_steps=1000, + reward_threshold=950.0, +) + +register( + id="InvertedPendulum-v4", + entry_point="gym.envs.mujoco.inverted_pendulum_v4:InvertedPendulumEnv", + max_episode_steps=1000, + reward_threshold=950.0, +) + +register( + id="InvertedDoublePendulum-v2", + entry_point="gym.envs.mujoco:InvertedDoublePendulumEnv", + max_episode_steps=1000, + reward_threshold=9100.0, +) + +register( + id="InvertedDoublePendulum-v4", + entry_point="gym.envs.mujoco.inverted_double_pendulum_v4:InvertedDoublePendulumEnv", + max_episode_steps=1000, + reward_threshold=9100.0, +) + +register( + id="HalfCheetah-v2", + entry_point="gym.envs.mujoco:HalfCheetahEnv", + max_episode_steps=1000, + reward_threshold=4800.0, +) + +register( + id="HalfCheetah-v3", + entry_point="gym.envs.mujoco.half_cheetah_v3:HalfCheetahEnv", + max_episode_steps=1000, + reward_threshold=4800.0, +) + +register( + id="HalfCheetah-v4", + entry_point="gym.envs.mujoco.half_cheetah_v4:HalfCheetahEnv", + max_episode_steps=1000, + reward_threshold=4800.0, +) + +register( + id="Hopper-v2", + entry_point="gym.envs.mujoco:HopperEnv", + max_episode_steps=1000, + reward_threshold=3800.0, +) + +register( + id="Hopper-v3", + entry_point="gym.envs.mujoco.hopper_v3:HopperEnv", + max_episode_steps=1000, + reward_threshold=3800.0, +) + +register( + id="Hopper-v4", + entry_point="gym.envs.mujoco.hopper_v4:HopperEnv", + max_episode_steps=1000, + reward_threshold=3800.0, +) + +register( + id="Swimmer-v2", + entry_point="gym.envs.mujoco:SwimmerEnv", + max_episode_steps=1000, + reward_threshold=360.0, +) + +register( + id="Swimmer-v3", + entry_point="gym.envs.mujoco.swimmer_v3:SwimmerEnv", + max_episode_steps=1000, + reward_threshold=360.0, +) + +register( + id="Swimmer-v4", + entry_point="gym.envs.mujoco.swimmer_v4:SwimmerEnv", + max_episode_steps=1000, + reward_threshold=360.0, +) + +register( + id="Walker2d-v2", + max_episode_steps=1000, + entry_point="gym.envs.mujoco:Walker2dEnv", +) + +register( + id="Walker2d-v3", + max_episode_steps=1000, + entry_point="gym.envs.mujoco.walker2d_v3:Walker2dEnv", +) + +register( + id="Walker2d-v4", + max_episode_steps=1000, + entry_point="gym.envs.mujoco.walker2d_v4:Walker2dEnv", +) + +register( + id="Ant-v2", + entry_point="gym.envs.mujoco:AntEnv", + max_episode_steps=1000, + reward_threshold=6000.0, +) + +register( + id="Ant-v3", + entry_point="gym.envs.mujoco.ant_v3:AntEnv", + max_episode_steps=1000, + reward_threshold=6000.0, +) + +register( + id="Ant-v4", + entry_point="gym.envs.mujoco.ant_v4:AntEnv", + max_episode_steps=1000, + reward_threshold=6000.0, +) + +register( + id="Humanoid-v2", + entry_point="gym.envs.mujoco:HumanoidEnv", + max_episode_steps=1000, +) + +register( + id="Humanoid-v3", + entry_point="gym.envs.mujoco.humanoid_v3:HumanoidEnv", + max_episode_steps=1000, +) + +register( + id="Humanoid-v4", + entry_point="gym.envs.mujoco.humanoid_v4:HumanoidEnv", + max_episode_steps=1000, +) + +register( + id="HumanoidStandup-v2", + entry_point="gym.envs.mujoco:HumanoidStandupEnv", + max_episode_steps=1000, +) + +register( + id="HumanoidStandup-v4", + entry_point="gym.envs.mujoco.humanoidstandup_v4:HumanoidStandupEnv", + max_episode_steps=1000, +) diff --git a/MLPY/Lib/site-packages/gym/envs/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57985a413acb584b573a566c1d306d3ae671ffe4 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/__pycache__/registration.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/__pycache__/registration.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..779921879ebc328c361ecb7cecda1eb5996ec887 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/__pycache__/registration.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__init__.py b/MLPY/Lib/site-packages/gym/envs/box2d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e702010b62148d130b0a3f142f964fbef6878fd0 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/box2d/__init__.py @@ -0,0 +1,3 @@ +from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore +from gym.envs.box2d.car_racing import CarRacing +from gym.envs.box2d.lunar_lander import LunarLander, LunarLanderContinuous diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547b00669e3b0459c7c2d3eefa84f045cf8ebb3e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/bipedal_walker.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/bipedal_walker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87e58cd08cccf5771172f72a71c16c1cfa85a663 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/bipedal_walker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_dynamics.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_dynamics.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aff7c60fb5e912000dc8a4b099ee059c4d8b0bc7 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_dynamics.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_racing.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_racing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b304fa97572d6bd15f9bef865c0d0f655f4e1f6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/car_racing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/lunar_lander.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/lunar_lander.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfcb74801199b038b000d00fa0051b6d8f050288 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/box2d/__pycache__/lunar_lander.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/bipedal_walker.py b/MLPY/Lib/site-packages/gym/envs/box2d/bipedal_walker.py new file mode 100644 index 0000000000000000000000000000000000000000..bea56f4c7f73a67a33dcb587f9e782aeeb8bb554 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/box2d/bipedal_walker.py @@ -0,0 +1,857 @@ +__credits__ = ["Andrea PIERRÉ"] + +import math +from typing import TYPE_CHECKING, List, Optional + +import numpy as np + +import gym +from gym import error, spaces +from gym.error import DependencyNotInstalled +from gym.utils import EzPickle + +try: + import Box2D + from Box2D.b2 import ( + circleShape, + contactListener, + edgeShape, + fixtureDef, + polygonShape, + revoluteJointDef, + ) +except ImportError: + raise DependencyNotInstalled("box2D is not installed, run `pip install gym[box2d]`") + + +if TYPE_CHECKING: + import pygame + +FPS = 50 +SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well + +MOTORS_TORQUE = 80 +SPEED_HIP = 4 +SPEED_KNEE = 6 +LIDAR_RANGE = 160 / SCALE + +INITIAL_RANDOM = 5 + +HULL_POLY = [(-30, +9), (+6, +9), (+34, +1), (+34, -8), (-30, -8)] +LEG_DOWN = -8 / SCALE +LEG_W, LEG_H = 8 / SCALE, 34 / SCALE + +VIEWPORT_W = 600 +VIEWPORT_H = 400 + +TERRAIN_STEP = 14 / SCALE +TERRAIN_LENGTH = 200 # in steps +TERRAIN_HEIGHT = VIEWPORT_H / SCALE / 4 +TERRAIN_GRASS = 10 # low long are grass spots, in steps +TERRAIN_STARTPAD = 20 # in steps +FRICTION = 2.5 + +HULL_FD = fixtureDef( + shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for x, y in HULL_POLY]), + density=5.0, + friction=0.1, + categoryBits=0x0020, + maskBits=0x001, # collide only with ground + restitution=0.0, +) # 0.99 bouncy + +LEG_FD = fixtureDef( + shape=polygonShape(box=(LEG_W / 2, LEG_H / 2)), + density=1.0, + restitution=0.0, + categoryBits=0x0020, + maskBits=0x001, +) + +LOWER_FD = fixtureDef( + shape=polygonShape(box=(0.8 * LEG_W / 2, LEG_H / 2)), + density=1.0, + restitution=0.0, + categoryBits=0x0020, + maskBits=0x001, +) + + +class ContactDetector(contactListener): + def __init__(self, env): + contactListener.__init__(self) + self.env = env + + def BeginContact(self, contact): + if ( + self.env.hull == contact.fixtureA.body + or self.env.hull == contact.fixtureB.body + ): + self.env.game_over = True + for leg in [self.env.legs[1], self.env.legs[3]]: + if leg in [contact.fixtureA.body, contact.fixtureB.body]: + leg.ground_contact = True + + def EndContact(self, contact): + for leg in [self.env.legs[1], self.env.legs[3]]: + if leg in [contact.fixtureA.body, contact.fixtureB.body]: + leg.ground_contact = False + + +class BipedalWalker(gym.Env, EzPickle): + """ + ### Description + This is a simple 4-joint walker robot environment. + There are two versions: + - Normal, with slightly uneven terrain. + - Hardcore, with ladders, stumps, pitfalls. + + To solve the normal version, you need to get 300 points in 1600 time steps. + To solve the hardcore version, you need 300 points in 2000 time steps. + + A heuristic is provided for testing. It's also useful to get demonstrations + to learn from. To run the heuristic: + ``` + python gym/envs/box2d/bipedal_walker.py + ``` + + ### Action Space + Actions are motor speed values in the [-1, 1] range for each of the + 4 joints at both hips and knees. + + ### Observation Space + State consists of hull angle speed, angular velocity, horizontal speed, + vertical speed, position of joints and joints angular speed, legs contact + with ground, and 10 lidar rangefinder measurements. There are no coordinates + in the state vector. + + ### Rewards + Reward is given for moving forward, totaling 300+ points up to the far end. + If the robot falls, it gets -100. Applying motor torque costs a small + amount of points. A more optimal agent will get a better score. + + ### Starting State + The walker starts standing at the left end of the terrain with the hull + horizontal, and both legs in the same position with a slight knee angle. + + ### Episode Termination + The episode will terminate if the hull gets in contact with the ground or + if the walker exceeds the right end of the terrain length. + + ### Arguments + To use to the _hardcore_ environment, you need to specify the + `hardcore=True` argument like below: + ```python + import gym + env = gym.make("BipedalWalker-v3", hardcore=True) + ``` + + ### Version History + - v3: returns closest lidar trace instead of furthest; + faster video recording + - v2: Count energy spent + - v1: Legs now report contact with ground; motors have higher torque and + speed; ground has higher friction; lidar rendered less nervously. + - v0: Initial version + + + + + ### Credits + Created by Oleg Klimov + + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": FPS, + } + + def __init__(self, render_mode: Optional[str] = None, hardcore: bool = False): + EzPickle.__init__(self, render_mode, hardcore) + self.isopen = True + + self.world = Box2D.b2World() + self.terrain: List[Box2D.b2Body] = [] + self.hull: Optional[Box2D.b2Body] = None + + self.prev_shaping = None + + self.hardcore = hardcore + + self.fd_polygon = fixtureDef( + shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)]), + friction=FRICTION, + ) + + self.fd_edge = fixtureDef( + shape=edgeShape(vertices=[(0, 0), (1, 1)]), + friction=FRICTION, + categoryBits=0x0001, + ) + + # we use 5.0 to represent the joints moving at maximum + # 5 x the rated speed due to impulses from ground contact etc. + low = np.array( + [ + -math.pi, + -5.0, + -5.0, + -5.0, + -math.pi, + -5.0, + -math.pi, + -5.0, + -0.0, + -math.pi, + -5.0, + -math.pi, + -5.0, + -0.0, + ] + + [-1.0] * 10 + ).astype(np.float32) + high = np.array( + [ + math.pi, + 5.0, + 5.0, + 5.0, + math.pi, + 5.0, + math.pi, + 5.0, + 5.0, + math.pi, + 5.0, + math.pi, + 5.0, + 5.0, + ] + + [1.0] * 10 + ).astype(np.float32) + self.action_space = spaces.Box( + np.array([-1, -1, -1, -1]).astype(np.float32), + np.array([1, 1, 1, 1]).astype(np.float32), + ) + self.observation_space = spaces.Box(low, high) + + # state = [ + # self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible. + # 2.0 * self.hull.angularVelocity / FPS, + # 0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS, # Normalized to get -1..1 range + # 0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS, + # self.joints[ + # 0 + # ].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too) + # self.joints[0].speed / SPEED_HIP, + # self.joints[1].angle + 1.0, + # self.joints[1].speed / SPEED_KNEE, + # 1.0 if self.legs[1].ground_contact else 0.0, + # self.joints[2].angle, + # self.joints[2].speed / SPEED_HIP, + # self.joints[3].angle + 1.0, + # self.joints[3].speed / SPEED_KNEE, + # 1.0 if self.legs[3].ground_contact else 0.0, + # ] + # state += [l.fraction for l in self.lidar] + + self.render_mode = render_mode + self.screen: Optional[pygame.Surface] = None + self.clock = None + + def _destroy(self): + if not self.terrain: + return + self.world.contactListener = None + for t in self.terrain: + self.world.DestroyBody(t) + self.terrain = [] + self.world.DestroyBody(self.hull) + self.hull = None + for leg in self.legs: + self.world.DestroyBody(leg) + self.legs = [] + self.joints = [] + + def _generate_terrain(self, hardcore): + GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5) + state = GRASS + velocity = 0.0 + y = TERRAIN_HEIGHT + counter = TERRAIN_STARTPAD + oneshot = False + self.terrain = [] + self.terrain_x = [] + self.terrain_y = [] + + stair_steps, stair_width, stair_height = 0, 0, 0 + original_y = 0 + for i in range(TERRAIN_LENGTH): + x = i * TERRAIN_STEP + self.terrain_x.append(x) + + if state == GRASS and not oneshot: + velocity = 0.8 * velocity + 0.01 * np.sign(TERRAIN_HEIGHT - y) + if i > TERRAIN_STARTPAD: + velocity += self.np_random.uniform(-1, 1) / SCALE # 1 + y += velocity + + elif state == PIT and oneshot: + counter = self.np_random.integers(3, 5) + poly = [ + (x, y), + (x + TERRAIN_STEP, y), + (x + TERRAIN_STEP, y - 4 * TERRAIN_STEP), + (x, y - 4 * TERRAIN_STEP), + ] + self.fd_polygon.shape.vertices = poly + t = self.world.CreateStaticBody(fixtures=self.fd_polygon) + t.color1, t.color2 = (255, 255, 255), (153, 153, 153) + self.terrain.append(t) + + self.fd_polygon.shape.vertices = [ + (p[0] + TERRAIN_STEP * counter, p[1]) for p in poly + ] + t = self.world.CreateStaticBody(fixtures=self.fd_polygon) + t.color1, t.color2 = (255, 255, 255), (153, 153, 153) + self.terrain.append(t) + counter += 2 + original_y = y + + elif state == PIT and not oneshot: + y = original_y + if counter > 1: + y -= 4 * TERRAIN_STEP + + elif state == STUMP and oneshot: + counter = self.np_random.integers(1, 3) + poly = [ + (x, y), + (x + counter * TERRAIN_STEP, y), + (x + counter * TERRAIN_STEP, y + counter * TERRAIN_STEP), + (x, y + counter * TERRAIN_STEP), + ] + self.fd_polygon.shape.vertices = poly + t = self.world.CreateStaticBody(fixtures=self.fd_polygon) + t.color1, t.color2 = (255, 255, 255), (153, 153, 153) + self.terrain.append(t) + + elif state == STAIRS and oneshot: + stair_height = +1 if self.np_random.random() > 0.5 else -1 + stair_width = self.np_random.integers(4, 5) + stair_steps = self.np_random.integers(3, 5) + original_y = y + for s in range(stair_steps): + poly = [ + ( + x + (s * stair_width) * TERRAIN_STEP, + y + (s * stair_height) * TERRAIN_STEP, + ), + ( + x + ((1 + s) * stair_width) * TERRAIN_STEP, + y + (s * stair_height) * TERRAIN_STEP, + ), + ( + x + ((1 + s) * stair_width) * TERRAIN_STEP, + y + (-1 + s * stair_height) * TERRAIN_STEP, + ), + ( + x + (s * stair_width) * TERRAIN_STEP, + y + (-1 + s * stair_height) * TERRAIN_STEP, + ), + ] + self.fd_polygon.shape.vertices = poly + t = self.world.CreateStaticBody(fixtures=self.fd_polygon) + t.color1, t.color2 = (255, 255, 255), (153, 153, 153) + self.terrain.append(t) + counter = stair_steps * stair_width + + elif state == STAIRS and not oneshot: + s = stair_steps * stair_width - counter - stair_height + n = s / stair_width + y = original_y + (n * stair_height) * TERRAIN_STEP + + oneshot = False + self.terrain_y.append(y) + counter -= 1 + if counter == 0: + counter = self.np_random.integers(TERRAIN_GRASS / 2, TERRAIN_GRASS) + if state == GRASS and hardcore: + state = self.np_random.integers(1, _STATES_) + oneshot = True + else: + state = GRASS + oneshot = True + + self.terrain_poly = [] + for i in range(TERRAIN_LENGTH - 1): + poly = [ + (self.terrain_x[i], self.terrain_y[i]), + (self.terrain_x[i + 1], self.terrain_y[i + 1]), + ] + self.fd_edge.shape.vertices = poly + t = self.world.CreateStaticBody(fixtures=self.fd_edge) + color = (76, 255 if i % 2 == 0 else 204, 76) + t.color1 = color + t.color2 = color + self.terrain.append(t) + color = (102, 153, 76) + poly += [(poly[1][0], 0), (poly[0][0], 0)] + self.terrain_poly.append((poly, color)) + self.terrain.reverse() + + def _generate_clouds(self): + # Sorry for the clouds, couldn't resist + self.cloud_poly = [] + for i in range(TERRAIN_LENGTH // 20): + x = self.np_random.uniform(0, TERRAIN_LENGTH) * TERRAIN_STEP + y = VIEWPORT_H / SCALE * 3 / 4 + poly = [ + ( + x + + 15 * TERRAIN_STEP * math.sin(3.14 * 2 * a / 5) + + self.np_random.uniform(0, 5 * TERRAIN_STEP), + y + + 5 * TERRAIN_STEP * math.cos(3.14 * 2 * a / 5) + + self.np_random.uniform(0, 5 * TERRAIN_STEP), + ) + for a in range(5) + ] + x1 = min(p[0] for p in poly) + x2 = max(p[0] for p in poly) + self.cloud_poly.append((poly, x1, x2)) + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self._destroy() + self.world.contactListener_bug_workaround = ContactDetector(self) + self.world.contactListener = self.world.contactListener_bug_workaround + self.game_over = False + self.prev_shaping = None + self.scroll = 0.0 + self.lidar_render = 0 + + self._generate_terrain(self.hardcore) + self._generate_clouds() + + init_x = TERRAIN_STEP * TERRAIN_STARTPAD / 2 + init_y = TERRAIN_HEIGHT + 2 * LEG_H + self.hull = self.world.CreateDynamicBody( + position=(init_x, init_y), fixtures=HULL_FD + ) + self.hull.color1 = (127, 51, 229) + self.hull.color2 = (76, 76, 127) + self.hull.ApplyForceToCenter( + (self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True + ) + + self.legs: List[Box2D.b2Body] = [] + self.joints: List[Box2D.b2RevoluteJoint] = [] + for i in [-1, +1]: + leg = self.world.CreateDynamicBody( + position=(init_x, init_y - LEG_H / 2 - LEG_DOWN), + angle=(i * 0.05), + fixtures=LEG_FD, + ) + leg.color1 = (153 - i * 25, 76 - i * 25, 127 - i * 25) + leg.color2 = (102 - i * 25, 51 - i * 25, 76 - i * 25) + rjd = revoluteJointDef( + bodyA=self.hull, + bodyB=leg, + localAnchorA=(0, LEG_DOWN), + localAnchorB=(0, LEG_H / 2), + enableMotor=True, + enableLimit=True, + maxMotorTorque=MOTORS_TORQUE, + motorSpeed=i, + lowerAngle=-0.8, + upperAngle=1.1, + ) + self.legs.append(leg) + self.joints.append(self.world.CreateJoint(rjd)) + + lower = self.world.CreateDynamicBody( + position=(init_x, init_y - LEG_H * 3 / 2 - LEG_DOWN), + angle=(i * 0.05), + fixtures=LOWER_FD, + ) + lower.color1 = (153 - i * 25, 76 - i * 25, 127 - i * 25) + lower.color2 = (102 - i * 25, 51 - i * 25, 76 - i * 25) + rjd = revoluteJointDef( + bodyA=leg, + bodyB=lower, + localAnchorA=(0, -LEG_H / 2), + localAnchorB=(0, LEG_H / 2), + enableMotor=True, + enableLimit=True, + maxMotorTorque=MOTORS_TORQUE, + motorSpeed=1, + lowerAngle=-1.6, + upperAngle=-0.1, + ) + lower.ground_contact = False + self.legs.append(lower) + self.joints.append(self.world.CreateJoint(rjd)) + + self.drawlist = self.terrain + self.legs + [self.hull] + + class LidarCallback(Box2D.b2.rayCastCallback): + def ReportFixture(self, fixture, point, normal, fraction): + if (fixture.filterData.categoryBits & 1) == 0: + return -1 + self.p2 = point + self.fraction = fraction + return fraction + + self.lidar = [LidarCallback() for _ in range(10)] + if self.render_mode == "human": + self.render() + return self.step(np.array([0, 0, 0, 0]))[0], {} + + def step(self, action: np.ndarray): + assert self.hull is not None + + # self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help + control_speed = False # Should be easier as well + if control_speed: + self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1)) + self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1)) + self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1)) + self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1)) + else: + self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0])) + self.joints[0].maxMotorTorque = float( + MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1) + ) + self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1])) + self.joints[1].maxMotorTorque = float( + MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1) + ) + self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2])) + self.joints[2].maxMotorTorque = float( + MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1) + ) + self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3])) + self.joints[3].maxMotorTorque = float( + MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1) + ) + + self.world.Step(1.0 / FPS, 6 * 30, 2 * 30) + + pos = self.hull.position + vel = self.hull.linearVelocity + + for i in range(10): + self.lidar[i].fraction = 1.0 + self.lidar[i].p1 = pos + self.lidar[i].p2 = ( + pos[0] + math.sin(1.5 * i / 10.0) * LIDAR_RANGE, + pos[1] - math.cos(1.5 * i / 10.0) * LIDAR_RANGE, + ) + self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2) + + state = [ + self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible. + 2.0 * self.hull.angularVelocity / FPS, + 0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS, # Normalized to get -1..1 range + 0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS, + self.joints[0].angle, + # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too) + self.joints[0].speed / SPEED_HIP, + self.joints[1].angle + 1.0, + self.joints[1].speed / SPEED_KNEE, + 1.0 if self.legs[1].ground_contact else 0.0, + self.joints[2].angle, + self.joints[2].speed / SPEED_HIP, + self.joints[3].angle + 1.0, + self.joints[3].speed / SPEED_KNEE, + 1.0 if self.legs[3].ground_contact else 0.0, + ] + state += [l.fraction for l in self.lidar] + assert len(state) == 24 + + self.scroll = pos.x - VIEWPORT_W / SCALE / 5 + + shaping = ( + 130 * pos[0] / SCALE + ) # moving forward is a way to receive reward (normalized to get 300 on completion) + shaping -= 5.0 * abs( + state[0] + ) # keep head straight, other than that and falling, any behavior is unpunished + + reward = 0 + if self.prev_shaping is not None: + reward = shaping - self.prev_shaping + self.prev_shaping = shaping + + for a in action: + reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1) + # normalized to about -50.0 using heuristic, more optimal agent should spend less + + terminated = False + if self.game_over or pos[0] < 0: + reward = -100 + terminated = True + if pos[0] > (TERRAIN_LENGTH - TERRAIN_GRASS) * TERRAIN_STEP: + terminated = True + + if self.render_mode == "human": + self.render() + return np.array(state, dtype=np.float32), reward, terminated, False, {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[box2d]`" + ) + + if self.screen is None and self.render_mode == "human": + pygame.init() + pygame.display.init() + self.screen = pygame.display.set_mode((VIEWPORT_W, VIEWPORT_H)) + if self.clock is None: + self.clock = pygame.time.Clock() + + self.surf = pygame.Surface( + (VIEWPORT_W + max(0.0, self.scroll) * SCALE, VIEWPORT_H) + ) + + pygame.transform.scale(self.surf, (SCALE, SCALE)) + + pygame.draw.polygon( + self.surf, + color=(215, 215, 255), + points=[ + (self.scroll * SCALE, 0), + (self.scroll * SCALE + VIEWPORT_W, 0), + (self.scroll * SCALE + VIEWPORT_W, VIEWPORT_H), + (self.scroll * SCALE, VIEWPORT_H), + ], + ) + + for poly, x1, x2 in self.cloud_poly: + if x2 < self.scroll / 2: + continue + if x1 > self.scroll / 2 + VIEWPORT_W / SCALE: + continue + pygame.draw.polygon( + self.surf, + color=(255, 255, 255), + points=[ + (p[0] * SCALE + self.scroll * SCALE / 2, p[1] * SCALE) for p in poly + ], + ) + gfxdraw.aapolygon( + self.surf, + [(p[0] * SCALE + self.scroll * SCALE / 2, p[1] * SCALE) for p in poly], + (255, 255, 255), + ) + for poly, color in self.terrain_poly: + if poly[1][0] < self.scroll: + continue + if poly[0][0] > self.scroll + VIEWPORT_W / SCALE: + continue + scaled_poly = [] + for coord in poly: + scaled_poly.append([coord[0] * SCALE, coord[1] * SCALE]) + pygame.draw.polygon(self.surf, color=color, points=scaled_poly) + gfxdraw.aapolygon(self.surf, scaled_poly, color) + + self.lidar_render = (self.lidar_render + 1) % 100 + i = self.lidar_render + if i < 2 * len(self.lidar): + single_lidar = ( + self.lidar[i] + if i < len(self.lidar) + else self.lidar[len(self.lidar) - i - 1] + ) + if hasattr(single_lidar, "p1") and hasattr(single_lidar, "p2"): + pygame.draw.line( + self.surf, + color=(255, 0, 0), + start_pos=(single_lidar.p1[0] * SCALE, single_lidar.p1[1] * SCALE), + end_pos=(single_lidar.p2[0] * SCALE, single_lidar.p2[1] * SCALE), + width=1, + ) + + for obj in self.drawlist: + for f in obj.fixtures: + trans = f.body.transform + if type(f.shape) is circleShape: + pygame.draw.circle( + self.surf, + color=obj.color1, + center=trans * f.shape.pos * SCALE, + radius=f.shape.radius * SCALE, + ) + pygame.draw.circle( + self.surf, + color=obj.color2, + center=trans * f.shape.pos * SCALE, + radius=f.shape.radius * SCALE, + ) + else: + path = [trans * v * SCALE for v in f.shape.vertices] + if len(path) > 2: + pygame.draw.polygon(self.surf, color=obj.color1, points=path) + gfxdraw.aapolygon(self.surf, path, obj.color1) + path.append(path[0]) + pygame.draw.polygon( + self.surf, color=obj.color2, points=path, width=1 + ) + gfxdraw.aapolygon(self.surf, path, obj.color2) + else: + pygame.draw.aaline( + self.surf, + start_pos=path[0], + end_pos=path[1], + color=obj.color1, + ) + + flagy1 = TERRAIN_HEIGHT * SCALE + flagy2 = flagy1 + 50 + x = TERRAIN_STEP * 3 * SCALE + pygame.draw.aaline( + self.surf, color=(0, 0, 0), start_pos=(x, flagy1), end_pos=(x, flagy2) + ) + f = [ + (x, flagy2), + (x, flagy2 - 10), + (x + 25, flagy2 - 5), + ] + pygame.draw.polygon(self.surf, color=(230, 51, 0), points=f) + pygame.draw.lines( + self.surf, color=(0, 0, 0), points=f + [f[0]], width=1, closed=False + ) + + self.surf = pygame.transform.flip(self.surf, False, True) + + if self.render_mode == "human": + assert self.screen is not None + self.screen.blit(self.surf, (-self.scroll * SCALE, 0)) + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.surf)), axes=(1, 0, 2) + )[:, -VIEWPORT_W:] + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False + + +class BipedalWalkerHardcore: + def __init__(self): + raise error.Error( + "Error initializing BipedalWalkerHardcore Environment.\n" + "Currently, we do not support initializing this mode of environment by calling the class directly.\n" + "To use this environment, instead create it by specifying the hardcore keyword in gym.make, i.e.\n" + 'gym.make("BipedalWalker-v3", hardcore=True)' + ) + + +if __name__ == "__main__": + # Heurisic: suboptimal, have no notion of balance. + env = BipedalWalker() + env.reset() + steps = 0 + total_reward = 0 + a = np.array([0.0, 0.0, 0.0, 0.0]) + STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1, 2, 3 + SPEED = 0.29 # Will fall forward on higher speed + state = STAY_ON_ONE_LEG + moving_leg = 0 + supporting_leg = 1 - moving_leg + SUPPORT_KNEE_ANGLE = +0.1 + supporting_knee_angle = SUPPORT_KNEE_ANGLE + while True: + s, r, terminated, truncated, info = env.step(a) + total_reward += r + if steps % 20 == 0 or terminated or truncated: + print("\naction " + str([f"{x:+0.2f}" for x in a])) + print(f"step {steps} total_reward {total_reward:+0.2f}") + print("hull " + str([f"{x:+0.2f}" for x in s[0:4]])) + print("leg0 " + str([f"{x:+0.2f}" for x in s[4:9]])) + print("leg1 " + str([f"{x:+0.2f}" for x in s[9:14]])) + steps += 1 + + contact0 = s[8] + contact1 = s[13] + moving_s_base = 4 + 5 * moving_leg + supporting_s_base = 4 + 5 * supporting_leg + + hip_targ = [None, None] # -0.8 .. +1.1 + knee_targ = [None, None] # -0.6 .. +0.9 + hip_todo = [0.0, 0.0] + knee_todo = [0.0, 0.0] + + if state == STAY_ON_ONE_LEG: + hip_targ[moving_leg] = 1.1 + knee_targ[moving_leg] = -0.6 + supporting_knee_angle += 0.03 + if s[2] > SPEED: + supporting_knee_angle += 0.03 + supporting_knee_angle = min(supporting_knee_angle, SUPPORT_KNEE_ANGLE) + knee_targ[supporting_leg] = supporting_knee_angle + if s[supporting_s_base + 0] < 0.10: # supporting leg is behind + state = PUT_OTHER_DOWN + if state == PUT_OTHER_DOWN: + hip_targ[moving_leg] = +0.1 + knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE + knee_targ[supporting_leg] = supporting_knee_angle + if s[moving_s_base + 4]: + state = PUSH_OFF + supporting_knee_angle = min(s[moving_s_base + 2], SUPPORT_KNEE_ANGLE) + if state == PUSH_OFF: + knee_targ[moving_leg] = supporting_knee_angle + knee_targ[supporting_leg] = +1.0 + if s[supporting_s_base + 2] > 0.88 or s[2] > 1.2 * SPEED: + state = STAY_ON_ONE_LEG + moving_leg = 1 - moving_leg + supporting_leg = 1 - moving_leg + + if hip_targ[0]: + hip_todo[0] = 0.9 * (hip_targ[0] - s[4]) - 0.25 * s[5] + if hip_targ[1]: + hip_todo[1] = 0.9 * (hip_targ[1] - s[9]) - 0.25 * s[10] + if knee_targ[0]: + knee_todo[0] = 4.0 * (knee_targ[0] - s[6]) - 0.25 * s[7] + if knee_targ[1]: + knee_todo[1] = 4.0 * (knee_targ[1] - s[11]) - 0.25 * s[12] + + hip_todo[0] -= 0.9 * (0 - s[0]) - 1.5 * s[1] # PID to keep head strait + hip_todo[1] -= 0.9 * (0 - s[0]) - 1.5 * s[1] + knee_todo[0] -= 15.0 * s[3] # vertical speed, to damp oscillations + knee_todo[1] -= 15.0 * s[3] + + a[0] = hip_todo[0] + a[1] = knee_todo[0] + a[2] = hip_todo[1] + a[3] = knee_todo[1] + a = np.clip(0.5 * a, -1.0, 1.0) + + if terminated or truncated: + break diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/car_dynamics.py b/MLPY/Lib/site-packages/gym/envs/box2d/car_dynamics.py new file mode 100644 index 0000000000000000000000000000000000000000..435a4dec2fd9afb781ee99744807da61983b88b9 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/box2d/car_dynamics.py @@ -0,0 +1,351 @@ +""" +Top-down car dynamics simulation. + +Some ideas are taken from this great tutorial http://www.iforce2d.net/b2dtut/top-down-car by Chris Campbell. +This simulation is a bit more detailed, with wheels rotation. + +Created by Oleg Klimov +""" + +import math + +import Box2D +import numpy as np + +from gym.error import DependencyNotInstalled + +try: + from Box2D.b2 import fixtureDef, polygonShape, revoluteJointDef +except ImportError: + raise DependencyNotInstalled("box2D is not installed, run `pip install gym[box2d]`") + + +SIZE = 0.02 +ENGINE_POWER = 100000000 * SIZE * SIZE +WHEEL_MOMENT_OF_INERTIA = 4000 * SIZE * SIZE +FRICTION_LIMIT = ( + 1000000 * SIZE * SIZE +) # friction ~= mass ~= size^2 (calculated implicitly using density) +WHEEL_R = 27 +WHEEL_W = 14 +WHEELPOS = [(-55, +80), (+55, +80), (-55, -82), (+55, -82)] +HULL_POLY1 = [(-60, +130), (+60, +130), (+60, +110), (-60, +110)] +HULL_POLY2 = [(-15, +120), (+15, +120), (+20, +20), (-20, 20)] +HULL_POLY3 = [ + (+25, +20), + (+50, -10), + (+50, -40), + (+20, -90), + (-20, -90), + (-50, -40), + (-50, -10), + (-25, +20), +] +HULL_POLY4 = [(-50, -120), (+50, -120), (+50, -90), (-50, -90)] +WHEEL_COLOR = (0, 0, 0) +WHEEL_WHITE = (77, 77, 77) +MUD_COLOR = (102, 102, 0) + + +class Car: + def __init__(self, world, init_angle, init_x, init_y): + self.world: Box2D.b2World = world + self.hull: Box2D.b2Body = self.world.CreateDynamicBody( + position=(init_x, init_y), + angle=init_angle, + fixtures=[ + fixtureDef( + shape=polygonShape( + vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY1] + ), + density=1.0, + ), + fixtureDef( + shape=polygonShape( + vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY2] + ), + density=1.0, + ), + fixtureDef( + shape=polygonShape( + vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY3] + ), + density=1.0, + ), + fixtureDef( + shape=polygonShape( + vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY4] + ), + density=1.0, + ), + ], + ) + self.hull.color = (0.8, 0.0, 0.0) + self.wheels = [] + self.fuel_spent = 0.0 + WHEEL_POLY = [ + (-WHEEL_W, +WHEEL_R), + (+WHEEL_W, +WHEEL_R), + (+WHEEL_W, -WHEEL_R), + (-WHEEL_W, -WHEEL_R), + ] + for wx, wy in WHEELPOS: + front_k = 1.0 if wy > 0 else 1.0 + w = self.world.CreateDynamicBody( + position=(init_x + wx * SIZE, init_y + wy * SIZE), + angle=init_angle, + fixtures=fixtureDef( + shape=polygonShape( + vertices=[ + (x * front_k * SIZE, y * front_k * SIZE) + for x, y in WHEEL_POLY + ] + ), + density=0.1, + categoryBits=0x0020, + maskBits=0x001, + restitution=0.0, + ), + ) + w.wheel_rad = front_k * WHEEL_R * SIZE + w.color = WHEEL_COLOR + w.gas = 0.0 + w.brake = 0.0 + w.steer = 0.0 + w.phase = 0.0 # wheel angle + w.omega = 0.0 # angular velocity + w.skid_start = None + w.skid_particle = None + rjd = revoluteJointDef( + bodyA=self.hull, + bodyB=w, + localAnchorA=(wx * SIZE, wy * SIZE), + localAnchorB=(0, 0), + enableMotor=True, + enableLimit=True, + maxMotorTorque=180 * 900 * SIZE * SIZE, + motorSpeed=0, + lowerAngle=-0.4, + upperAngle=+0.4, + ) + w.joint = self.world.CreateJoint(rjd) + w.tiles = set() + w.userData = w + self.wheels.append(w) + self.drawlist = self.wheels + [self.hull] + self.particles = [] + + def gas(self, gas): + """control: rear wheel drive + + Args: + gas (float): How much gas gets applied. Gets clipped between 0 and 1. + """ + gas = np.clip(gas, 0, 1) + for w in self.wheels[2:4]: + diff = gas - w.gas + if diff > 0.1: + diff = 0.1 # gradually increase, but stop immediately + w.gas += diff + + def brake(self, b): + """control: brake + + Args: + b (0..1): Degree to which the brakes are applied. More than 0.9 blocks the wheels to zero rotation""" + for w in self.wheels: + w.brake = b + + def steer(self, s): + """control: steer + + Args: + s (-1..1): target position, it takes time to rotate steering wheel from side-to-side""" + self.wheels[0].steer = s + self.wheels[1].steer = s + + def step(self, dt): + for w in self.wheels: + # Steer each wheel + dir = np.sign(w.steer - w.joint.angle) + val = abs(w.steer - w.joint.angle) + w.joint.motorSpeed = dir * min(50.0 * val, 3.0) + + # Position => friction_limit + grass = True + friction_limit = FRICTION_LIMIT * 0.6 # Grass friction if no tile + for tile in w.tiles: + friction_limit = max( + friction_limit, FRICTION_LIMIT * tile.road_friction + ) + grass = False + + # Force + forw = w.GetWorldVector((0, 1)) + side = w.GetWorldVector((1, 0)) + v = w.linearVelocity + vf = forw[0] * v[0] + forw[1] * v[1] # forward speed + vs = side[0] * v[0] + side[1] * v[1] # side speed + + # WHEEL_MOMENT_OF_INERTIA*np.square(w.omega)/2 = E -- energy + # WHEEL_MOMENT_OF_INERTIA*w.omega * domega/dt = dE/dt = W -- power + # domega = dt*W/WHEEL_MOMENT_OF_INERTIA/w.omega + + # add small coef not to divide by zero + w.omega += ( + dt + * ENGINE_POWER + * w.gas + / WHEEL_MOMENT_OF_INERTIA + / (abs(w.omega) + 5.0) + ) + self.fuel_spent += dt * ENGINE_POWER * w.gas + + if w.brake >= 0.9: + w.omega = 0 + elif w.brake > 0: + BRAKE_FORCE = 15 # radians per second + dir = -np.sign(w.omega) + val = BRAKE_FORCE * w.brake + if abs(val) > abs(w.omega): + val = abs(w.omega) # low speed => same as = 0 + w.omega += dir * val + w.phase += w.omega * dt + + vr = w.omega * w.wheel_rad # rotating wheel speed + f_force = -vf + vr # force direction is direction of speed difference + p_force = -vs + + # Physically correct is to always apply friction_limit until speed is equal. + # But dt is finite, that will lead to oscillations if difference is already near zero. + + # Random coefficient to cut oscillations in few steps (have no effect on friction_limit) + f_force *= 205000 * SIZE * SIZE + p_force *= 205000 * SIZE * SIZE + force = np.sqrt(np.square(f_force) + np.square(p_force)) + + # Skid trace + if abs(force) > 2.0 * friction_limit: + if ( + w.skid_particle + and w.skid_particle.grass == grass + and len(w.skid_particle.poly) < 30 + ): + w.skid_particle.poly.append((w.position[0], w.position[1])) + elif w.skid_start is None: + w.skid_start = w.position + else: + w.skid_particle = self._create_particle( + w.skid_start, w.position, grass + ) + w.skid_start = None + else: + w.skid_start = None + w.skid_particle = None + + if abs(force) > friction_limit: + f_force /= force + p_force /= force + force = friction_limit # Correct physics here + f_force *= force + p_force *= force + + w.omega -= dt * f_force * w.wheel_rad / WHEEL_MOMENT_OF_INERTIA + + w.ApplyForceToCenter( + ( + p_force * side[0] + f_force * forw[0], + p_force * side[1] + f_force * forw[1], + ), + True, + ) + + def draw(self, surface, zoom, translation, angle, draw_particles=True): + import pygame.draw + + if draw_particles: + for p in self.particles: + poly = [pygame.math.Vector2(c).rotate_rad(angle) for c in p.poly] + poly = [ + ( + coords[0] * zoom + translation[0], + coords[1] * zoom + translation[1], + ) + for coords in poly + ] + pygame.draw.lines( + surface, color=p.color, points=poly, width=2, closed=False + ) + + for obj in self.drawlist: + for f in obj.fixtures: + trans = f.body.transform + path = [trans * v for v in f.shape.vertices] + path = [(coords[0], coords[1]) for coords in path] + path = [pygame.math.Vector2(c).rotate_rad(angle) for c in path] + path = [ + ( + coords[0] * zoom + translation[0], + coords[1] * zoom + translation[1], + ) + for coords in path + ] + color = [int(c * 255) for c in obj.color] + + pygame.draw.polygon(surface, color=color, points=path) + + if "phase" not in obj.__dict__: + continue + a1 = obj.phase + a2 = obj.phase + 1.2 # radians + s1 = math.sin(a1) + s2 = math.sin(a2) + c1 = math.cos(a1) + c2 = math.cos(a2) + if s1 > 0 and s2 > 0: + continue + if s1 > 0: + c1 = np.sign(c1) + if s2 > 0: + c2 = np.sign(c2) + white_poly = [ + (-WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE), + (+WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE), + (+WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE), + (-WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE), + ] + white_poly = [trans * v for v in white_poly] + + white_poly = [(coords[0], coords[1]) for coords in white_poly] + white_poly = [ + pygame.math.Vector2(c).rotate_rad(angle) for c in white_poly + ] + white_poly = [ + ( + coords[0] * zoom + translation[0], + coords[1] * zoom + translation[1], + ) + for coords in white_poly + ] + pygame.draw.polygon(surface, color=WHEEL_WHITE, points=white_poly) + + def _create_particle(self, point1, point2, grass): + class Particle: + pass + + p = Particle() + p.color = WHEEL_COLOR if not grass else MUD_COLOR + p.ttl = 1 + p.poly = [(point1[0], point1[1]), (point2[0], point2[1])] + p.grass = grass + self.particles.append(p) + while len(self.particles) > 30: + self.particles.pop(0) + return p + + def destroy(self): + self.world.DestroyBody(self.hull) + self.hull = None + for w in self.wheels: + self.world.DestroyBody(w) + self.wheels = [] diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/car_racing.py b/MLPY/Lib/site-packages/gym/envs/box2d/car_racing.py new file mode 100644 index 0000000000000000000000000000000000000000..bee7f3acd2f884f2b3ab728d94bae2ac4057d98b --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/box2d/car_racing.py @@ -0,0 +1,828 @@ +__credits__ = ["Andrea PIERRÉ"] + +import math +from typing import Optional, Union + +import numpy as np + +import gym +from gym import spaces +from gym.envs.box2d.car_dynamics import Car +from gym.error import DependencyNotInstalled, InvalidAction +from gym.utils import EzPickle + +try: + import Box2D + from Box2D.b2 import contactListener, fixtureDef, polygonShape +except ImportError: + raise DependencyNotInstalled("box2D is not installed, run `pip install gym[box2d]`") + +try: + # As pygame is necessary for using the environment (reset and step) even without a render mode + # therefore, pygame is a necessary import for the environment. + import pygame + from pygame import gfxdraw +except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[box2d]`" + ) + + +STATE_W = 96 # less than Atari 160x192 +STATE_H = 96 +VIDEO_W = 600 +VIDEO_H = 400 +WINDOW_W = 1000 +WINDOW_H = 800 + +SCALE = 6.0 # Track scale +TRACK_RAD = 900 / SCALE # Track is heavily morphed circle with this radius +PLAYFIELD = 2000 / SCALE # Game over boundary +FPS = 50 # Frames per second +ZOOM = 2.7 # Camera zoom +ZOOM_FOLLOW = True # Set to False for fixed view (don't use zoom) + + +TRACK_DETAIL_STEP = 21 / SCALE +TRACK_TURN_RATE = 0.31 +TRACK_WIDTH = 40 / SCALE +BORDER = 8 / SCALE +BORDER_MIN_COUNT = 4 +GRASS_DIM = PLAYFIELD / 20.0 +MAX_SHAPE_DIM = ( + max(GRASS_DIM, TRACK_WIDTH, TRACK_DETAIL_STEP) * math.sqrt(2) * ZOOM * SCALE +) + + +class FrictionDetector(contactListener): + def __init__(self, env, lap_complete_percent): + contactListener.__init__(self) + self.env = env + self.lap_complete_percent = lap_complete_percent + + def BeginContact(self, contact): + self._contact(contact, True) + + def EndContact(self, contact): + self._contact(contact, False) + + def _contact(self, contact, begin): + tile = None + obj = None + u1 = contact.fixtureA.body.userData + u2 = contact.fixtureB.body.userData + if u1 and "road_friction" in u1.__dict__: + tile = u1 + obj = u2 + if u2 and "road_friction" in u2.__dict__: + tile = u2 + obj = u1 + if not tile: + return + + # inherit tile color from env + tile.color[:] = self.env.road_color + if not obj or "tiles" not in obj.__dict__: + return + if begin: + obj.tiles.add(tile) + if not tile.road_visited: + tile.road_visited = True + self.env.reward += 1000.0 / len(self.env.track) + self.env.tile_visited_count += 1 + + # Lap is considered completed if enough % of the track was covered + if ( + tile.idx == 0 + and self.env.tile_visited_count / len(self.env.track) + > self.lap_complete_percent + ): + self.env.new_lap = True + else: + obj.tiles.remove(tile) + + +class CarRacing(gym.Env, EzPickle): + """ + ### Description + The easiest control task to learn from pixels - a top-down + racing environment. The generated track is random every episode. + + Some indicators are shown at the bottom of the window along with the + state RGB buffer. From left to right: true speed, four ABS sensors, + steering wheel position, and gyroscope. + To play yourself (it's rather fast for humans), type: + ``` + python gym/envs/box2d/car_racing.py + ``` + Remember: it's a powerful rear-wheel drive car - don't press the accelerator + and turn at the same time. + + ### Action Space + If continuous: + There are 3 actions: steering (-1 is full left, +1 is full right), gas, and breaking. + If discrete: + There are 5 actions: do nothing, steer left, steer right, gas, brake. + + ### Observation Space + State consists of 96x96 pixels. + + ### Rewards + The reward is -0.1 every frame and +1000/N for every track tile visited, + where N is the total number of tiles visited in the track. For example, + if you have finished in 732 frames, your reward is + 1000 - 0.1*732 = 926.8 points. + + ### Starting State + The car starts at rest in the center of the road. + + ### Episode Termination + The episode finishes when all of the tiles are visited. The car can also go + outside of the playfield - that is, far off the track, in which case it will + receive -100 reward and die. + + ### Arguments + `lap_complete_percent` dictates the percentage of tiles that must be visited by + the agent before a lap is considered complete. + + Passing `domain_randomize=True` enables the domain randomized variant of the environment. + In this scenario, the background and track colours are different on every reset. + + Passing `continuous=False` converts the environment to use discrete action space. + The discrete action space has 5 actions: [do nothing, left, right, gas, brake]. + + ### Reset Arguments + Passing the option `options["randomize"] = True` will change the current colour of the environment on demand. + Correspondingly, passing the option `options["randomize"] = False` will not change the current colour of the environment. + `domain_randomize` must be `True` on init for this argument to work. + Example usage: + ```py + env = gym.make("CarRacing-v1", domain_randomize=True) + + # normal reset, this changes the colour scheme by default + env.reset() + + # reset with colour scheme change + env.reset(options={"randomize": True}) + + # reset with no colour scheme change + env.reset(options={"randomize": False}) + ``` + + ### Version History + - v1: Change track completion logic and add domain randomization (0.24.0) + - v0: Original version + + ### References + - Chris Campbell (2014), http://www.iforce2d.net/b2dtut/top-down-car. + + ### Credits + Created by Oleg Klimov + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "state_pixels", + ], + "render_fps": FPS, + } + + def __init__( + self, + render_mode: Optional[str] = None, + verbose: bool = False, + lap_complete_percent: float = 0.95, + domain_randomize: bool = False, + continuous: bool = True, + ): + EzPickle.__init__( + self, + render_mode, + verbose, + lap_complete_percent, + domain_randomize, + continuous, + ) + self.continuous = continuous + self.domain_randomize = domain_randomize + self.lap_complete_percent = lap_complete_percent + self._init_colors() + + self.contactListener_keepref = FrictionDetector(self, self.lap_complete_percent) + self.world = Box2D.b2World((0, 0), contactListener=self.contactListener_keepref) + self.screen: Optional[pygame.Surface] = None + self.surf = None + self.clock = None + self.isopen = True + self.invisible_state_window = None + self.invisible_video_window = None + self.road = None + self.car: Optional[Car] = None + self.reward = 0.0 + self.prev_reward = 0.0 + self.verbose = verbose + self.new_lap = False + self.fd_tile = fixtureDef( + shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)]) + ) + + # This will throw a warning in tests/envs/test_envs in utils/env_checker.py as the space is not symmetric + # or normalised however this is not possible here so ignore + if self.continuous: + self.action_space = spaces.Box( + np.array([-1, 0, 0]).astype(np.float32), + np.array([+1, +1, +1]).astype(np.float32), + ) # steer, gas, brake + else: + self.action_space = spaces.Discrete(5) + # do nothing, left, right, gas, brake + + self.observation_space = spaces.Box( + low=0, high=255, shape=(STATE_H, STATE_W, 3), dtype=np.uint8 + ) + + self.render_mode = render_mode + + def _destroy(self): + if not self.road: + return + for t in self.road: + self.world.DestroyBody(t) + self.road = [] + assert self.car is not None + self.car.destroy() + + def _init_colors(self): + if self.domain_randomize: + # domain randomize the bg and grass colour + self.road_color = self.np_random.uniform(0, 210, size=3) + + self.bg_color = self.np_random.uniform(0, 210, size=3) + + self.grass_color = np.copy(self.bg_color) + idx = self.np_random.integers(3) + self.grass_color[idx] += 20 + else: + # default colours + self.road_color = np.array([102, 102, 102]) + self.bg_color = np.array([102, 204, 102]) + self.grass_color = np.array([102, 230, 102]) + + def _reinit_colors(self, randomize): + assert ( + self.domain_randomize + ), "domain_randomize must be True to use this function." + + if randomize: + # domain randomize the bg and grass colour + self.road_color = self.np_random.uniform(0, 210, size=3) + + self.bg_color = self.np_random.uniform(0, 210, size=3) + + self.grass_color = np.copy(self.bg_color) + idx = self.np_random.integers(3) + self.grass_color[idx] += 20 + + def _create_track(self): + CHECKPOINTS = 12 + + # Create checkpoints + checkpoints = [] + for c in range(CHECKPOINTS): + noise = self.np_random.uniform(0, 2 * math.pi * 1 / CHECKPOINTS) + alpha = 2 * math.pi * c / CHECKPOINTS + noise + rad = self.np_random.uniform(TRACK_RAD / 3, TRACK_RAD) + + if c == 0: + alpha = 0 + rad = 1.5 * TRACK_RAD + if c == CHECKPOINTS - 1: + alpha = 2 * math.pi * c / CHECKPOINTS + self.start_alpha = 2 * math.pi * (-0.5) / CHECKPOINTS + rad = 1.5 * TRACK_RAD + + checkpoints.append((alpha, rad * math.cos(alpha), rad * math.sin(alpha))) + self.road = [] + + # Go from one checkpoint to another to create track + x, y, beta = 1.5 * TRACK_RAD, 0, 0 + dest_i = 0 + laps = 0 + track = [] + no_freeze = 2500 + visited_other_side = False + while True: + alpha = math.atan2(y, x) + if visited_other_side and alpha > 0: + laps += 1 + visited_other_side = False + if alpha < 0: + visited_other_side = True + alpha += 2 * math.pi + + while True: # Find destination from checkpoints + failed = True + + while True: + dest_alpha, dest_x, dest_y = checkpoints[dest_i % len(checkpoints)] + if alpha <= dest_alpha: + failed = False + break + dest_i += 1 + if dest_i % len(checkpoints) == 0: + break + + if not failed: + break + + alpha -= 2 * math.pi + continue + + r1x = math.cos(beta) + r1y = math.sin(beta) + p1x = -r1y + p1y = r1x + dest_dx = dest_x - x # vector towards destination + dest_dy = dest_y - y + # destination vector projected on rad: + proj = r1x * dest_dx + r1y * dest_dy + while beta - alpha > 1.5 * math.pi: + beta -= 2 * math.pi + while beta - alpha < -1.5 * math.pi: + beta += 2 * math.pi + prev_beta = beta + proj *= SCALE + if proj > 0.3: + beta -= min(TRACK_TURN_RATE, abs(0.001 * proj)) + if proj < -0.3: + beta += min(TRACK_TURN_RATE, abs(0.001 * proj)) + x += p1x * TRACK_DETAIL_STEP + y += p1y * TRACK_DETAIL_STEP + track.append((alpha, prev_beta * 0.5 + beta * 0.5, x, y)) + if laps > 4: + break + no_freeze -= 1 + if no_freeze == 0: + break + + # Find closed loop range i1..i2, first loop should be ignored, second is OK + i1, i2 = -1, -1 + i = len(track) + while True: + i -= 1 + if i == 0: + return False # Failed + pass_through_start = ( + track[i][0] > self.start_alpha and track[i - 1][0] <= self.start_alpha + ) + if pass_through_start and i2 == -1: + i2 = i + elif pass_through_start and i1 == -1: + i1 = i + break + if self.verbose: + print("Track generation: %i..%i -> %i-tiles track" % (i1, i2, i2 - i1)) + assert i1 != -1 + assert i2 != -1 + + track = track[i1 : i2 - 1] + + first_beta = track[0][1] + first_perp_x = math.cos(first_beta) + first_perp_y = math.sin(first_beta) + # Length of perpendicular jump to put together head and tail + well_glued_together = np.sqrt( + np.square(first_perp_x * (track[0][2] - track[-1][2])) + + np.square(first_perp_y * (track[0][3] - track[-1][3])) + ) + if well_glued_together > TRACK_DETAIL_STEP: + return False + + # Red-white border on hard turns + border = [False] * len(track) + for i in range(len(track)): + good = True + oneside = 0 + for neg in range(BORDER_MIN_COUNT): + beta1 = track[i - neg - 0][1] + beta2 = track[i - neg - 1][1] + good &= abs(beta1 - beta2) > TRACK_TURN_RATE * 0.2 + oneside += np.sign(beta1 - beta2) + good &= abs(oneside) == BORDER_MIN_COUNT + border[i] = good + for i in range(len(track)): + for neg in range(BORDER_MIN_COUNT): + border[i - neg] |= border[i] + + # Create tiles + for i in range(len(track)): + alpha1, beta1, x1, y1 = track[i] + alpha2, beta2, x2, y2 = track[i - 1] + road1_l = ( + x1 - TRACK_WIDTH * math.cos(beta1), + y1 - TRACK_WIDTH * math.sin(beta1), + ) + road1_r = ( + x1 + TRACK_WIDTH * math.cos(beta1), + y1 + TRACK_WIDTH * math.sin(beta1), + ) + road2_l = ( + x2 - TRACK_WIDTH * math.cos(beta2), + y2 - TRACK_WIDTH * math.sin(beta2), + ) + road2_r = ( + x2 + TRACK_WIDTH * math.cos(beta2), + y2 + TRACK_WIDTH * math.sin(beta2), + ) + vertices = [road1_l, road1_r, road2_r, road2_l] + self.fd_tile.shape.vertices = vertices + t = self.world.CreateStaticBody(fixtures=self.fd_tile) + t.userData = t + c = 0.01 * (i % 3) * 255 + t.color = self.road_color + c + t.road_visited = False + t.road_friction = 1.0 + t.idx = i + t.fixtures[0].sensor = True + self.road_poly.append(([road1_l, road1_r, road2_r, road2_l], t.color)) + self.road.append(t) + if border[i]: + side = np.sign(beta2 - beta1) + b1_l = ( + x1 + side * TRACK_WIDTH * math.cos(beta1), + y1 + side * TRACK_WIDTH * math.sin(beta1), + ) + b1_r = ( + x1 + side * (TRACK_WIDTH + BORDER) * math.cos(beta1), + y1 + side * (TRACK_WIDTH + BORDER) * math.sin(beta1), + ) + b2_l = ( + x2 + side * TRACK_WIDTH * math.cos(beta2), + y2 + side * TRACK_WIDTH * math.sin(beta2), + ) + b2_r = ( + x2 + side * (TRACK_WIDTH + BORDER) * math.cos(beta2), + y2 + side * (TRACK_WIDTH + BORDER) * math.sin(beta2), + ) + self.road_poly.append( + ( + [b1_l, b1_r, b2_r, b2_l], + (255, 255, 255) if i % 2 == 0 else (255, 0, 0), + ) + ) + self.track = track + return True + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self._destroy() + self.world.contactListener_bug_workaround = FrictionDetector( + self, self.lap_complete_percent + ) + self.world.contactListener = self.world.contactListener_bug_workaround + self.reward = 0.0 + self.prev_reward = 0.0 + self.tile_visited_count = 0 + self.t = 0.0 + self.new_lap = False + self.road_poly = [] + + if self.domain_randomize: + randomize = True + if isinstance(options, dict): + if "randomize" in options: + randomize = options["randomize"] + + self._reinit_colors(randomize) + + while True: + success = self._create_track() + if success: + break + if self.verbose: + print( + "retry to generate track (normal if there are not many" + "instances of this message)" + ) + self.car = Car(self.world, *self.track[0][1:4]) + + if self.render_mode == "human": + self.render() + return self.step(None)[0], {} + + def step(self, action: Union[np.ndarray, int]): + assert self.car is not None + if action is not None: + if self.continuous: + self.car.steer(-action[0]) + self.car.gas(action[1]) + self.car.brake(action[2]) + else: + if not self.action_space.contains(action): + raise InvalidAction( + f"you passed the invalid action `{action}`. " + f"The supported action_space is `{self.action_space}`" + ) + self.car.steer(-0.6 * (action == 1) + 0.6 * (action == 2)) + self.car.gas(0.2 * (action == 3)) + self.car.brake(0.8 * (action == 4)) + + self.car.step(1.0 / FPS) + self.world.Step(1.0 / FPS, 6 * 30, 2 * 30) + self.t += 1.0 / FPS + + self.state = self._render("state_pixels") + + step_reward = 0 + terminated = False + truncated = False + if action is not None: # First step without action, called from reset() + self.reward -= 0.1 + # We actually don't want to count fuel spent, we want car to be faster. + # self.reward -= 10 * self.car.fuel_spent / ENGINE_POWER + self.car.fuel_spent = 0.0 + step_reward = self.reward - self.prev_reward + self.prev_reward = self.reward + if self.tile_visited_count == len(self.track) or self.new_lap: + # Truncation due to finishing lap + # This should not be treated as a failure + # but like a timeout + truncated = True + x, y = self.car.hull.position + if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD: + terminated = True + step_reward = -100 + + if self.render_mode == "human": + self.render() + return self.state, step_reward, terminated, truncated, {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + else: + return self._render(self.render_mode) + + def _render(self, mode: str): + assert mode in self.metadata["render_modes"] + + pygame.font.init() + if self.screen is None and mode == "human": + pygame.init() + pygame.display.init() + self.screen = pygame.display.set_mode((WINDOW_W, WINDOW_H)) + if self.clock is None: + self.clock = pygame.time.Clock() + + if "t" not in self.__dict__: + return # reset() not called yet + + self.surf = pygame.Surface((WINDOW_W, WINDOW_H)) + + assert self.car is not None + # computing transformations + angle = -self.car.hull.angle + # Animating first second zoom. + zoom = 0.1 * SCALE * max(1 - self.t, 0) + ZOOM * SCALE * min(self.t, 1) + scroll_x = -(self.car.hull.position[0]) * zoom + scroll_y = -(self.car.hull.position[1]) * zoom + trans = pygame.math.Vector2((scroll_x, scroll_y)).rotate_rad(angle) + trans = (WINDOW_W / 2 + trans[0], WINDOW_H / 4 + trans[1]) + + self._render_road(zoom, trans, angle) + self.car.draw( + self.surf, + zoom, + trans, + angle, + mode not in ["state_pixels_list", "state_pixels"], + ) + + self.surf = pygame.transform.flip(self.surf, False, True) + + # showing stats + self._render_indicators(WINDOW_W, WINDOW_H) + + font = pygame.font.Font(pygame.font.get_default_font(), 42) + text = font.render("%04i" % self.reward, True, (255, 255, 255), (0, 0, 0)) + text_rect = text.get_rect() + text_rect.center = (60, WINDOW_H - WINDOW_H * 2.5 / 40.0) + self.surf.blit(text, text_rect) + + if mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + assert self.screen is not None + self.screen.fill(0) + self.screen.blit(self.surf, (0, 0)) + pygame.display.flip() + + if mode == "rgb_array": + return self._create_image_array(self.surf, (VIDEO_W, VIDEO_H)) + elif mode == "state_pixels": + return self._create_image_array(self.surf, (STATE_W, STATE_H)) + else: + return self.isopen + + def _render_road(self, zoom, translation, angle): + bounds = PLAYFIELD + field = [ + (bounds, bounds), + (bounds, -bounds), + (-bounds, -bounds), + (-bounds, bounds), + ] + + # draw background + self._draw_colored_polygon( + self.surf, field, self.bg_color, zoom, translation, angle, clip=False + ) + + # draw grass patches + grass = [] + for x in range(-20, 20, 2): + for y in range(-20, 20, 2): + grass.append( + [ + (GRASS_DIM * x + GRASS_DIM, GRASS_DIM * y + 0), + (GRASS_DIM * x + 0, GRASS_DIM * y + 0), + (GRASS_DIM * x + 0, GRASS_DIM * y + GRASS_DIM), + (GRASS_DIM * x + GRASS_DIM, GRASS_DIM * y + GRASS_DIM), + ] + ) + for poly in grass: + self._draw_colored_polygon( + self.surf, poly, self.grass_color, zoom, translation, angle + ) + + # draw road + for poly, color in self.road_poly: + # converting to pixel coordinates + poly = [(p[0], p[1]) for p in poly] + color = [int(c) for c in color] + self._draw_colored_polygon(self.surf, poly, color, zoom, translation, angle) + + def _render_indicators(self, W, H): + s = W / 40.0 + h = H / 40.0 + color = (0, 0, 0) + polygon = [(W, H), (W, H - 5 * h), (0, H - 5 * h), (0, H)] + pygame.draw.polygon(self.surf, color=color, points=polygon) + + def vertical_ind(place, val): + return [ + (place * s, H - (h + h * val)), + ((place + 1) * s, H - (h + h * val)), + ((place + 1) * s, H - h), + ((place + 0) * s, H - h), + ] + + def horiz_ind(place, val): + return [ + ((place + 0) * s, H - 4 * h), + ((place + val) * s, H - 4 * h), + ((place + val) * s, H - 2 * h), + ((place + 0) * s, H - 2 * h), + ] + + assert self.car is not None + true_speed = np.sqrt( + np.square(self.car.hull.linearVelocity[0]) + + np.square(self.car.hull.linearVelocity[1]) + ) + + # simple wrapper to render if the indicator value is above a threshold + def render_if_min(value, points, color): + if abs(value) > 1e-4: + pygame.draw.polygon(self.surf, points=points, color=color) + + render_if_min(true_speed, vertical_ind(5, 0.02 * true_speed), (255, 255, 255)) + # ABS sensors + render_if_min( + self.car.wheels[0].omega, + vertical_ind(7, 0.01 * self.car.wheels[0].omega), + (0, 0, 255), + ) + render_if_min( + self.car.wheels[1].omega, + vertical_ind(8, 0.01 * self.car.wheels[1].omega), + (0, 0, 255), + ) + render_if_min( + self.car.wheels[2].omega, + vertical_ind(9, 0.01 * self.car.wheels[2].omega), + (51, 0, 255), + ) + render_if_min( + self.car.wheels[3].omega, + vertical_ind(10, 0.01 * self.car.wheels[3].omega), + (51, 0, 255), + ) + + render_if_min( + self.car.wheels[0].joint.angle, + horiz_ind(20, -10.0 * self.car.wheels[0].joint.angle), + (0, 255, 0), + ) + render_if_min( + self.car.hull.angularVelocity, + horiz_ind(30, -0.8 * self.car.hull.angularVelocity), + (255, 0, 0), + ) + + def _draw_colored_polygon( + self, surface, poly, color, zoom, translation, angle, clip=True + ): + poly = [pygame.math.Vector2(c).rotate_rad(angle) for c in poly] + poly = [ + (c[0] * zoom + translation[0], c[1] * zoom + translation[1]) for c in poly + ] + # This checks if the polygon is out of bounds of the screen, and we skip drawing if so. + # Instead of calculating exactly if the polygon and screen overlap, + # we simply check if the polygon is in a larger bounding box whose dimension + # is greater than the screen by MAX_SHAPE_DIM, which is the maximum + # diagonal length of an environment object + if not clip or any( + (-MAX_SHAPE_DIM <= coord[0] <= WINDOW_W + MAX_SHAPE_DIM) + and (-MAX_SHAPE_DIM <= coord[1] <= WINDOW_H + MAX_SHAPE_DIM) + for coord in poly + ): + gfxdraw.aapolygon(self.surf, poly, color) + gfxdraw.filled_polygon(self.surf, poly, color) + + def _create_image_array(self, screen, size): + scaled_screen = pygame.transform.smoothscale(screen, size) + return np.transpose( + np.array(pygame.surfarray.pixels3d(scaled_screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + pygame.display.quit() + self.isopen = False + pygame.quit() + + +if __name__ == "__main__": + a = np.array([0.0, 0.0, 0.0]) + + def register_input(): + global quit, restart + for event in pygame.event.get(): + if event.type == pygame.KEYDOWN: + if event.key == pygame.K_LEFT: + a[0] = -1.0 + if event.key == pygame.K_RIGHT: + a[0] = +1.0 + if event.key == pygame.K_UP: + a[1] = +1.0 + if event.key == pygame.K_DOWN: + a[2] = +0.8 # set 1.0 for wheels to block to zero rotation + if event.key == pygame.K_RETURN: + restart = True + if event.key == pygame.K_ESCAPE: + quit = True + + if event.type == pygame.KEYUP: + if event.key == pygame.K_LEFT: + a[0] = 0 + if event.key == pygame.K_RIGHT: + a[0] = 0 + if event.key == pygame.K_UP: + a[1] = 0 + if event.key == pygame.K_DOWN: + a[2] = 0 + + if event.type == pygame.QUIT: + quit = True + + env = CarRacing(render_mode="human") + + quit = False + while not quit: + env.reset() + total_reward = 0.0 + steps = 0 + restart = False + while True: + register_input() + s, r, terminated, truncated, info = env.step(a) + total_reward += r + if steps % 200 == 0 or terminated or truncated: + print("\naction " + str([f"{x:+0.2f}" for x in a])) + print(f"step {steps} total_reward {total_reward:+0.2f}") + steps += 1 + if terminated or truncated or restart or quit: + break + env.close() diff --git a/MLPY/Lib/site-packages/gym/envs/box2d/lunar_lander.py b/MLPY/Lib/site-packages/gym/envs/box2d/lunar_lander.py new file mode 100644 index 0000000000000000000000000000000000000000..fb8e8e0f93469e5d5d570703b89f0b082e4d57a5 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/box2d/lunar_lander.py @@ -0,0 +1,817 @@ +__credits__ = ["Andrea PIERRÉ"] + +import math +import warnings +from typing import TYPE_CHECKING, Optional + +import numpy as np + +import gym +from gym import error, spaces +from gym.error import DependencyNotInstalled +from gym.utils import EzPickle, colorize +from gym.utils.step_api_compatibility import step_api_compatibility + +try: + import Box2D + from Box2D.b2 import ( + circleShape, + contactListener, + edgeShape, + fixtureDef, + polygonShape, + revoluteJointDef, + ) +except ImportError: + raise DependencyNotInstalled("box2d is not installed, run `pip install gym[box2d]`") + + +if TYPE_CHECKING: + import pygame + + +FPS = 50 +SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well + +MAIN_ENGINE_POWER = 13.0 +SIDE_ENGINE_POWER = 0.6 + +INITIAL_RANDOM = 1000.0 # Set 1500 to make game harder + +LANDER_POLY = [(-14, +17), (-17, 0), (-17, -10), (+17, -10), (+17, 0), (+14, +17)] +LEG_AWAY = 20 +LEG_DOWN = 18 +LEG_W, LEG_H = 2, 8 +LEG_SPRING_TORQUE = 40 + +SIDE_ENGINE_HEIGHT = 14.0 +SIDE_ENGINE_AWAY = 12.0 + +VIEWPORT_W = 600 +VIEWPORT_H = 400 + + +class ContactDetector(contactListener): + def __init__(self, env): + contactListener.__init__(self) + self.env = env + + def BeginContact(self, contact): + if ( + self.env.lander == contact.fixtureA.body + or self.env.lander == contact.fixtureB.body + ): + self.env.game_over = True + for i in range(2): + if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]: + self.env.legs[i].ground_contact = True + + def EndContact(self, contact): + for i in range(2): + if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]: + self.env.legs[i].ground_contact = False + + +class LunarLander(gym.Env, EzPickle): + """ + ### Description + This environment is a classic rocket trajectory optimization problem. + According to Pontryagin's maximum principle, it is optimal to fire the + engine at full throttle or turn it off. This is the reason why this + environment has discrete actions: engine on or off. + + There are two environment versions: discrete or continuous. + The landing pad is always at coordinates (0,0). The coordinates are the + first two numbers in the state vector. + Landing outside of the landing pad is possible. Fuel is infinite, so an agent + can learn to fly and then land on its first attempt. + + To see a heuristic landing, run: + ``` + python gym/envs/box2d/lunar_lander.py + ``` + + + + ### Action Space + There are four discrete actions available: do nothing, fire left + orientation engine, fire main engine, fire right orientation engine. + + ### Observation Space + The state is an 8-dimensional vector: the coordinates of the lander in `x` & `y`, its linear + velocities in `x` & `y`, its angle, its angular velocity, and two booleans + that represent whether each leg is in contact with the ground or not. + + ### Rewards + After every step a reward is granted. The total reward of an episode is the + sum of the rewards for all the steps within that episode. + + For each step, the reward: + - is increased/decreased the closer/further the lander is to the landing pad. + - is increased/decreased the slower/faster the lander is moving. + - is decreased the more the lander is tilted (angle not horizontal). + - is increased by 10 points for each leg that is in contact with the ground. + - is decreased by 0.03 points each frame a side engine is firing. + - is decreased by 0.3 points each frame the main engine is firing. + + The episode receive an additional reward of -100 or +100 points for crashing or landing safely respectively. + + An episode is considered a solution if it scores at least 200 points. + + ### Starting State + The lander starts at the top center of the viewport with a random initial + force applied to its center of mass. + + ### Episode Termination + The episode finishes if: + 1) the lander crashes (the lander body gets in contact with the moon); + 2) the lander gets outside of the viewport (`x` coordinate is greater than 1); + 3) the lander is not awake. From the [Box2D docs](https://box2d.org/documentation/md__d_1__git_hub_box2d_docs_dynamics.html#autotoc_md61), + a body which is not awake is a body which doesn't move and doesn't + collide with any other body: + > When Box2D determines that a body (or group of bodies) has come to rest, + > the body enters a sleep state which has very little CPU overhead. If a + > body is awake and collides with a sleeping body, then the sleeping body + > wakes up. Bodies will also wake up if a joint or contact attached to + > them is destroyed. + + ### Arguments + To use to the _continuous_ environment, you need to specify the + `continuous=True` argument like below: + ```python + import gym + env = gym.make( + "LunarLander-v2", + continuous: bool = False, + gravity: float = -10.0, + enable_wind: bool = False, + wind_power: float = 15.0, + turbulence_power: float = 1.5, + ) + ``` + If `continuous=True` is passed, continuous actions (corresponding to the throttle of the engines) will be used and the + action space will be `Box(-1, +1, (2,), dtype=np.float32)`. + The first coordinate of an action determines the throttle of the main engine, while the second + coordinate specifies the throttle of the lateral boosters. + Given an action `np.array([main, lateral])`, the main engine will be turned off completely if + `main < 0` and the throttle scales affinely from 50% to 100% for `0 <= main <= 1` (in particular, the + main engine doesn't work with less than 50% power). + Similarly, if `-0.5 < lateral < 0.5`, the lateral boosters will not fire at all. If `lateral < -0.5`, the left + booster will fire, and if `lateral > 0.5`, the right booster will fire. Again, the throttle scales affinely + from 50% to 100% between -1 and -0.5 (and 0.5 and 1, respectively). + + `gravity` dictates the gravitational constant, this is bounded to be within 0 and -12. + + If `enable_wind=True` is passed, there will be wind effects applied to the lander. + The wind is generated using the function `tanh(sin(2 k (t+C)) + sin(pi k (t+C)))`. + `k` is set to 0.01. + `C` is sampled randomly between -9999 and 9999. + + `wind_power` dictates the maximum magnitude of linear wind applied to the craft. The recommended value for `wind_power` is between 0.0 and 20.0. + `turbulence_power` dictates the maximum magnitude of rotational wind applied to the craft. The recommended value for `turbulence_power` is between 0.0 and 2.0. + + ### Version History + - v2: Count energy spent and in v0.24, added turbulance with wind power and turbulence_power parameters + - v1: Legs contact with ground added in state vector; contact with ground + give +10 reward points, and -10 if then lose contact; reward + renormalized to 200; harder initial random push. + - v0: Initial version + + + + ### Credits + Created by Oleg Klimov + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": FPS, + } + + def __init__( + self, + render_mode: Optional[str] = None, + continuous: bool = False, + gravity: float = -10.0, + enable_wind: bool = False, + wind_power: float = 15.0, + turbulence_power: float = 1.5, + ): + EzPickle.__init__( + self, + render_mode, + continuous, + gravity, + enable_wind, + wind_power, + turbulence_power, + ) + + assert ( + -12.0 < gravity and gravity < 0.0 + ), f"gravity (current value: {gravity}) must be between -12 and 0" + self.gravity = gravity + + if 0.0 > wind_power or wind_power > 20.0: + warnings.warn( + colorize( + f"WARN: wind_power value is recommended to be between 0.0 and 20.0, (current value: {wind_power})", + "yellow", + ), + ) + self.wind_power = wind_power + + if 0.0 > turbulence_power or turbulence_power > 2.0: + warnings.warn( + colorize( + f"WARN: turbulence_power value is recommended to be between 0.0 and 2.0, (current value: {turbulence_power})", + "yellow", + ), + ) + self.turbulence_power = turbulence_power + + self.enable_wind = enable_wind + self.wind_idx = np.random.randint(-9999, 9999) + self.torque_idx = np.random.randint(-9999, 9999) + + self.screen: pygame.Surface = None + self.clock = None + self.isopen = True + self.world = Box2D.b2World(gravity=(0, gravity)) + self.moon = None + self.lander: Optional[Box2D.b2Body] = None + self.particles = [] + + self.prev_reward = None + + self.continuous = continuous + + low = np.array( + [ + # these are bounds for position + # realistically the environment should have ended + # long before we reach more than 50% outside + -1.5, + -1.5, + # velocity bounds is 5x rated speed + -5.0, + -5.0, + -math.pi, + -5.0, + -0.0, + -0.0, + ] + ).astype(np.float32) + high = np.array( + [ + # these are bounds for position + # realistically the environment should have ended + # long before we reach more than 50% outside + 1.5, + 1.5, + # velocity bounds is 5x rated speed + 5.0, + 5.0, + math.pi, + 5.0, + 1.0, + 1.0, + ] + ).astype(np.float32) + + # useful range is -1 .. +1, but spikes can be higher + self.observation_space = spaces.Box(low, high) + + if self.continuous: + # Action is two floats [main engine, left-right engines]. + # Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power. + # Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off + self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32) + else: + # Nop, fire left engine, main engine, right engine + self.action_space = spaces.Discrete(4) + + self.render_mode = render_mode + + def _destroy(self): + if not self.moon: + return + self.world.contactListener = None + self._clean_particles(True) + self.world.DestroyBody(self.moon) + self.moon = None + self.world.DestroyBody(self.lander) + self.lander = None + self.world.DestroyBody(self.legs[0]) + self.world.DestroyBody(self.legs[1]) + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self._destroy() + self.world.contactListener_keepref = ContactDetector(self) + self.world.contactListener = self.world.contactListener_keepref + self.game_over = False + self.prev_shaping = None + + W = VIEWPORT_W / SCALE + H = VIEWPORT_H / SCALE + + # terrain + CHUNKS = 11 + height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,)) + chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)] + self.helipad_x1 = chunk_x[CHUNKS // 2 - 1] + self.helipad_x2 = chunk_x[CHUNKS // 2 + 1] + self.helipad_y = H / 4 + height[CHUNKS // 2 - 2] = self.helipad_y + height[CHUNKS // 2 - 1] = self.helipad_y + height[CHUNKS // 2 + 0] = self.helipad_y + height[CHUNKS // 2 + 1] = self.helipad_y + height[CHUNKS // 2 + 2] = self.helipad_y + smooth_y = [ + 0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) + for i in range(CHUNKS) + ] + + self.moon = self.world.CreateStaticBody( + shapes=edgeShape(vertices=[(0, 0), (W, 0)]) + ) + self.sky_polys = [] + for i in range(CHUNKS - 1): + p1 = (chunk_x[i], smooth_y[i]) + p2 = (chunk_x[i + 1], smooth_y[i + 1]) + self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1) + self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)]) + + self.moon.color1 = (0.0, 0.0, 0.0) + self.moon.color2 = (0.0, 0.0, 0.0) + + initial_y = VIEWPORT_H / SCALE + self.lander: Box2D.b2Body = self.world.CreateDynamicBody( + position=(VIEWPORT_W / SCALE / 2, initial_y), + angle=0.0, + fixtures=fixtureDef( + shape=polygonShape( + vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY] + ), + density=5.0, + friction=0.1, + categoryBits=0x0010, + maskBits=0x001, # collide only with ground + restitution=0.0, + ), # 0.99 bouncy + ) + self.lander.color1 = (128, 102, 230) + self.lander.color2 = (77, 77, 128) + self.lander.ApplyForceToCenter( + ( + self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), + self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), + ), + True, + ) + + self.legs = [] + for i in [-1, +1]: + leg = self.world.CreateDynamicBody( + position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y), + angle=(i * 0.05), + fixtures=fixtureDef( + shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)), + density=1.0, + restitution=0.0, + categoryBits=0x0020, + maskBits=0x001, + ), + ) + leg.ground_contact = False + leg.color1 = (128, 102, 230) + leg.color2 = (77, 77, 128) + rjd = revoluteJointDef( + bodyA=self.lander, + bodyB=leg, + localAnchorA=(0, 0), + localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE), + enableMotor=True, + enableLimit=True, + maxMotorTorque=LEG_SPRING_TORQUE, + motorSpeed=+0.3 * i, # low enough not to jump back into the sky + ) + if i == -1: + rjd.lowerAngle = ( + +0.9 - 0.5 + ) # The most esoteric numbers here, angled legs have freedom to travel within + rjd.upperAngle = +0.9 + else: + rjd.lowerAngle = -0.9 + rjd.upperAngle = -0.9 + 0.5 + leg.joint = self.world.CreateJoint(rjd) + self.legs.append(leg) + + self.drawlist = [self.lander] + self.legs + + if self.render_mode == "human": + self.render() + return self.step(np.array([0, 0]) if self.continuous else 0)[0], {} + + def _create_particle(self, mass, x, y, ttl): + p = self.world.CreateDynamicBody( + position=(x, y), + angle=0.0, + fixtures=fixtureDef( + shape=circleShape(radius=2 / SCALE, pos=(0, 0)), + density=mass, + friction=0.1, + categoryBits=0x0100, + maskBits=0x001, # collide only with ground + restitution=0.3, + ), + ) + p.ttl = ttl + self.particles.append(p) + self._clean_particles(False) + return p + + def _clean_particles(self, all): + while self.particles and (all or self.particles[0].ttl < 0): + self.world.DestroyBody(self.particles.pop(0)) + + def step(self, action): + assert self.lander is not None + + # Update wind + assert self.lander is not None, "You forgot to call reset()" + if self.enable_wind and not ( + self.legs[0].ground_contact or self.legs[1].ground_contact + ): + # the function used for wind is tanh(sin(2 k x) + sin(pi k x)), + # which is proven to never be periodic, k = 0.01 + wind_mag = ( + math.tanh( + math.sin(0.02 * self.wind_idx) + + (math.sin(math.pi * 0.01 * self.wind_idx)) + ) + * self.wind_power + ) + self.wind_idx += 1 + self.lander.ApplyForceToCenter( + (wind_mag, 0.0), + True, + ) + + # the function used for torque is tanh(sin(2 k x) + sin(pi k x)), + # which is proven to never be periodic, k = 0.01 + torque_mag = math.tanh( + math.sin(0.02 * self.torque_idx) + + (math.sin(math.pi * 0.01 * self.torque_idx)) + ) * (self.turbulence_power) + self.torque_idx += 1 + self.lander.ApplyTorque( + (torque_mag), + True, + ) + + if self.continuous: + action = np.clip(action, -1, +1).astype(np.float32) + else: + assert self.action_space.contains( + action + ), f"{action!r} ({type(action)}) invalid " + + # Engines + tip = (math.sin(self.lander.angle), math.cos(self.lander.angle)) + side = (-tip[1], tip[0]) + dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)] + + m_power = 0.0 + if (self.continuous and action[0] > 0.0) or ( + not self.continuous and action == 2 + ): + # Main engine + if self.continuous: + m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0 + assert m_power >= 0.5 and m_power <= 1.0 + else: + m_power = 1.0 + # 4 is move a bit downwards, +-2 for randomness + ox = tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[1] + oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1] + impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy) + p = self._create_particle( + 3.5, # 3.5 is here to make particle speed adequate + impulse_pos[0], + impulse_pos[1], + m_power, + ) # particles are just a decoration + p.ApplyLinearImpulse( + (ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power), + impulse_pos, + True, + ) + self.lander.ApplyLinearImpulse( + (-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power), + impulse_pos, + True, + ) + + s_power = 0.0 + if (self.continuous and np.abs(action[1]) > 0.5) or ( + not self.continuous and action in [1, 3] + ): + # Orientation engines + if self.continuous: + direction = np.sign(action[1]) + s_power = np.clip(np.abs(action[1]), 0.5, 1.0) + assert s_power >= 0.5 and s_power <= 1.0 + else: + direction = action - 2 + s_power = 1.0 + ox = tip[0] * dispersion[0] + side[0] * ( + 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE + ) + oy = -tip[1] * dispersion[0] - side[1] * ( + 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE + ) + impulse_pos = ( + self.lander.position[0] + ox - tip[0] * 17 / SCALE, + self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE, + ) + p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power) + p.ApplyLinearImpulse( + (ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power), + impulse_pos, + True, + ) + self.lander.ApplyLinearImpulse( + (-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power), + impulse_pos, + True, + ) + + self.world.Step(1.0 / FPS, 6 * 30, 2 * 30) + + pos = self.lander.position + vel = self.lander.linearVelocity + state = [ + (pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2), + (pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_H / SCALE / 2), + vel.x * (VIEWPORT_W / SCALE / 2) / FPS, + vel.y * (VIEWPORT_H / SCALE / 2) / FPS, + self.lander.angle, + 20.0 * self.lander.angularVelocity / FPS, + 1.0 if self.legs[0].ground_contact else 0.0, + 1.0 if self.legs[1].ground_contact else 0.0, + ] + assert len(state) == 8 + + reward = 0 + shaping = ( + -100 * np.sqrt(state[0] * state[0] + state[1] * state[1]) + - 100 * np.sqrt(state[2] * state[2] + state[3] * state[3]) + - 100 * abs(state[4]) + + 10 * state[6] + + 10 * state[7] + ) # And ten points for legs contact, the idea is if you + # lose contact again after landing, you get negative reward + if self.prev_shaping is not None: + reward = shaping - self.prev_shaping + self.prev_shaping = shaping + + reward -= ( + m_power * 0.30 + ) # less fuel spent is better, about -30 for heuristic landing + reward -= s_power * 0.03 + + terminated = False + if self.game_over or abs(state[0]) >= 1.0: + terminated = True + reward = -100 + if not self.lander.awake: + terminated = True + reward = +100 + + if self.render_mode == "human": + self.render() + return np.array(state, dtype=np.float32), reward, terminated, False, {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[box2d]`" + ) + + if self.screen is None and self.render_mode == "human": + pygame.init() + pygame.display.init() + self.screen = pygame.display.set_mode((VIEWPORT_W, VIEWPORT_H)) + if self.clock is None: + self.clock = pygame.time.Clock() + + self.surf = pygame.Surface((VIEWPORT_W, VIEWPORT_H)) + + pygame.transform.scale(self.surf, (SCALE, SCALE)) + pygame.draw.rect(self.surf, (255, 255, 255), self.surf.get_rect()) + + for obj in self.particles: + obj.ttl -= 0.15 + obj.color1 = ( + int(max(0.2, 0.15 + obj.ttl) * 255), + int(max(0.2, 0.5 * obj.ttl) * 255), + int(max(0.2, 0.5 * obj.ttl) * 255), + ) + obj.color2 = ( + int(max(0.2, 0.15 + obj.ttl) * 255), + int(max(0.2, 0.5 * obj.ttl) * 255), + int(max(0.2, 0.5 * obj.ttl) * 255), + ) + + self._clean_particles(False) + + for p in self.sky_polys: + scaled_poly = [] + for coord in p: + scaled_poly.append((coord[0] * SCALE, coord[1] * SCALE)) + pygame.draw.polygon(self.surf, (0, 0, 0), scaled_poly) + gfxdraw.aapolygon(self.surf, scaled_poly, (0, 0, 0)) + + for obj in self.particles + self.drawlist: + for f in obj.fixtures: + trans = f.body.transform + if type(f.shape) is circleShape: + pygame.draw.circle( + self.surf, + color=obj.color1, + center=trans * f.shape.pos * SCALE, + radius=f.shape.radius * SCALE, + ) + pygame.draw.circle( + self.surf, + color=obj.color2, + center=trans * f.shape.pos * SCALE, + radius=f.shape.radius * SCALE, + ) + + else: + path = [trans * v * SCALE for v in f.shape.vertices] + pygame.draw.polygon(self.surf, color=obj.color1, points=path) + gfxdraw.aapolygon(self.surf, path, obj.color1) + pygame.draw.aalines( + self.surf, color=obj.color2, points=path, closed=True + ) + + for x in [self.helipad_x1, self.helipad_x2]: + x = x * SCALE + flagy1 = self.helipad_y * SCALE + flagy2 = flagy1 + 50 + pygame.draw.line( + self.surf, + color=(255, 255, 255), + start_pos=(x, flagy1), + end_pos=(x, flagy2), + width=1, + ) + pygame.draw.polygon( + self.surf, + color=(204, 204, 0), + points=[ + (x, flagy2), + (x, flagy2 - 10), + (x + 25, flagy2 - 5), + ], + ) + gfxdraw.aapolygon( + self.surf, + [(x, flagy2), (x, flagy2 - 10), (x + 25, flagy2 - 5)], + (204, 204, 0), + ) + + self.surf = pygame.transform.flip(self.surf, False, True) + + if self.render_mode == "human": + assert self.screen is not None + self.screen.blit(self.surf, (0, 0)) + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.surf)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False + + +def heuristic(env, s): + """ + The heuristic for + 1. Testing + 2. Demonstration rollout. + + Args: + env: The environment + s (list): The state. Attributes: + s[0] is the horizontal coordinate + s[1] is the vertical coordinate + s[2] is the horizontal speed + s[3] is the vertical speed + s[4] is the angle + s[5] is the angular speed + s[6] 1 if first leg has contact, else 0 + s[7] 1 if second leg has contact, else 0 + + Returns: + a: The heuristic to be fed into the step function defined above to determine the next step and reward. + """ + + angle_targ = s[0] * 0.5 + s[2] * 1.0 # angle should point towards center + if angle_targ > 0.4: + angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad + if angle_targ < -0.4: + angle_targ = -0.4 + hover_targ = 0.55 * np.abs( + s[0] + ) # target y should be proportional to horizontal offset + + angle_todo = (angle_targ - s[4]) * 0.5 - (s[5]) * 1.0 + hover_todo = (hover_targ - s[1]) * 0.5 - (s[3]) * 0.5 + + if s[6] or s[7]: # legs have contact + angle_todo = 0 + hover_todo = ( + -(s[3]) * 0.5 + ) # override to reduce fall speed, that's all we need after contact + + if env.continuous: + a = np.array([hover_todo * 20 - 1, -angle_todo * 20]) + a = np.clip(a, -1, +1) + else: + a = 0 + if hover_todo > np.abs(angle_todo) and hover_todo > 0.05: + a = 2 + elif angle_todo < -0.05: + a = 3 + elif angle_todo > +0.05: + a = 1 + return a + + +def demo_heuristic_lander(env, seed=None, render=False): + + total_reward = 0 + steps = 0 + s, info = env.reset(seed=seed) + while True: + a = heuristic(env, s) + s, r, terminated, truncated, info = step_api_compatibility(env.step(a), True) + total_reward += r + + if render: + still_open = env.render() + if still_open is False: + break + + if steps % 20 == 0 or terminated or truncated: + print("observations:", " ".join([f"{x:+0.2f}" for x in s])) + print(f"step {steps} total_reward {total_reward:+0.2f}") + steps += 1 + if terminated or truncated: + break + if render: + env.close() + return total_reward + + +class LunarLanderContinuous: + def __init__(self): + raise error.Error( + "Error initializing LunarLanderContinuous Environment.\n" + "Currently, we do not support initializing this mode of environment by calling the class directly.\n" + "To use this environment, instead create it by specifying the continuous keyword in gym.make, i.e.\n" + 'gym.make("LunarLander-v2", continuous=True)' + ) + + +if __name__ == "__main__": + demo_heuristic_lander(LunarLander(), render=True) diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__init__.py b/MLPY/Lib/site-packages/gym/envs/classic_control/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..308185f395f67749146c21e8c606b805593335c4 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/__init__.py @@ -0,0 +1,5 @@ +from gym.envs.classic_control.acrobot import AcrobotEnv +from gym.envs.classic_control.cartpole import CartPoleEnv +from gym.envs.classic_control.continuous_mountain_car import Continuous_MountainCarEnv +from gym.envs.classic_control.mountain_car import MountainCarEnv +from gym.envs.classic_control.pendulum import PendulumEnv diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07dbefa6ce72fce300dae9eec8faed02aa830f38 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/acrobot.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/acrobot.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ddf439638caf947d35553adbc8942492d0a31a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/acrobot.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/cartpole.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/cartpole.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8654b3ea9be178eb3b320a8e4d6058537dd362cf Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/cartpole.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/continuous_mountain_car.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/continuous_mountain_car.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34815dd7cd4f27dc3f619128ffa14a5e8843432a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/continuous_mountain_car.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/mountain_car.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/mountain_car.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc763cdb7ff2144a53f78faa036039758440807c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/mountain_car.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/pendulum.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/pendulum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4af246f70f5a4a514d6e8a18922d25425fb240d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/pendulum.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1450c2539309027fd92d6e01f5a302dcc1ec5e8 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/acrobot.py b/MLPY/Lib/site-packages/gym/envs/classic_control/acrobot.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca31ca138fa4611c68999c5da6dd11721112bc1 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/acrobot.py @@ -0,0 +1,465 @@ +"""classic Acrobot task""" +from typing import Optional + +import numpy as np +from numpy import cos, pi, sin + +from gym import core, logger, spaces +from gym.error import DependencyNotInstalled + +__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" +__credits__ = [ + "Alborz Geramifard", + "Robert H. Klein", + "Christoph Dann", + "William Dabney", + "Jonathan P. How", +] +__license__ = "BSD 3-Clause" +__author__ = "Christoph Dann " + +# SOURCE: +# https://github.com/rlpy/rlpy/blob/master/rlpy/Domains/Acrobot.py +from gym.envs.classic_control import utils + + +class AcrobotEnv(core.Env): + """ + ### Description + + The Acrobot environment is based on Sutton's work in + ["Generalization in Reinforcement Learning: Successful Examples Using Sparse Coarse Coding"](https://papers.nips.cc/paper/1995/hash/8f1d43620bc6bb580df6e80b0dc05c48-Abstract.html) + and [Sutton and Barto's book](http://www.incompleteideas.net/book/the-book-2nd.html). + The system consists of two links connected linearly to form a chain, with one end of + the chain fixed. The joint between the two links is actuated. The goal is to apply + torques on the actuated joint to swing the free end of the linear chain above a + given height while starting from the initial state of hanging downwards. + + As seen in the **Gif**: two blue links connected by two green joints. The joint in + between the two links is actuated. The goal is to swing the free end of the outer-link + to reach the target height (black horizontal line above system) by applying torque on + the actuator. + + ### Action Space + + The action is discrete, deterministic, and represents the torque applied on the actuated + joint between the two links. + + | Num | Action | Unit | + |-----|---------------------------------------|--------------| + | 0 | apply -1 torque to the actuated joint | torque (N m) | + | 1 | apply 0 torque to the actuated joint | torque (N m) | + | 2 | apply 1 torque to the actuated joint | torque (N m) | + + ### Observation Space + + The observation is a `ndarray` with shape `(6,)` that provides information about the + two rotational joint angles as well as their angular velocities: + + | Num | Observation | Min | Max | + |-----|------------------------------|---------------------|-------------------| + | 0 | Cosine of `theta1` | -1 | 1 | + | 1 | Sine of `theta1` | -1 | 1 | + | 2 | Cosine of `theta2` | -1 | 1 | + | 3 | Sine of `theta2` | -1 | 1 | + | 4 | Angular velocity of `theta1` | ~ -12.567 (-4 * pi) | ~ 12.567 (4 * pi) | + | 5 | Angular velocity of `theta2` | ~ -28.274 (-9 * pi) | ~ 28.274 (9 * pi) | + + where + - `theta1` is the angle of the first joint, where an angle of 0 indicates the first link is pointing directly + downwards. + - `theta2` is ***relative to the angle of the first link.*** + An angle of 0 corresponds to having the same angle between the two links. + + The angular velocities of `theta1` and `theta2` are bounded at ±4π, and ±9π rad/s respectively. + A state of `[1, 0, 1, 0, ..., ...]` indicates that both links are pointing downwards. + + ### Rewards + + The goal is to have the free end reach a designated target height in as few steps as possible, + and as such all steps that do not reach the goal incur a reward of -1. + Achieving the target height results in termination with a reward of 0. The reward threshold is -100. + + ### Starting State + + Each parameter in the underlying state (`theta1`, `theta2`, and the two angular velocities) is initialized + uniformly between -0.1 and 0.1. This means both links are pointing downwards with some initial stochasticity. + + ### Episode End + + The episode ends if one of the following occurs: + 1. Termination: The free end reaches the target height, which is constructed as: + `-cos(theta1) - cos(theta2 + theta1) > 1.0` + 2. Truncation: Episode length is greater than 500 (200 for v0) + + ### Arguments + + No additional arguments are currently supported. + + ``` + env = gym.make('Acrobot-v1') + ``` + + By default, the dynamics of the acrobot follow those described in Sutton and Barto's book + [Reinforcement Learning: An Introduction](http://incompleteideas.net/book/11/node4.html). + However, a `book_or_nips` parameter can be modified to change the pendulum dynamics to those described + in the original [NeurIPS paper](https://papers.nips.cc/paper/1995/hash/8f1d43620bc6bb580df6e80b0dc05c48-Abstract.html). + + ``` + # To change the dynamics as described above + env.env.book_or_nips = 'nips' + ``` + + See the following note and + the [implementation](https://github.com/openai/gym/blob/master/gym/envs/classic_control/acrobot.py) for details: + + > The dynamics equations were missing some terms in the NIPS paper which + are present in the book. R. Sutton confirmed in personal correspondence + that the experimental results shown in the paper and the book were + generated with the equations shown in the book. + However, there is the option to run the domain with the paper equations + by setting `book_or_nips = 'nips'` + + + ### Version History + + - v1: Maximum number of steps increased from 200 to 500. The observation space for v0 provided direct readings of + `theta1` and `theta2` in radians, having a range of `[-pi, pi]`. The v1 observation space as described here provides the + sine and cosine of each angle instead. + - v0: Initial versions release (1.0.0) (removed from gym for v1) + + ### References + - Sutton, R. S. (1996). Generalization in Reinforcement Learning: Successful Examples Using Sparse Coarse Coding. + In D. Touretzky, M. C. Mozer, & M. Hasselmo (Eds.), Advances in Neural Information Processing Systems (Vol. 8). + MIT Press. https://proceedings.neurips.cc/paper/1995/file/8f1d43620bc6bb580df6e80b0dc05c48-Paper.pdf + - Sutton, R. S., Barto, A. G. (2018 ). Reinforcement Learning: An Introduction. The MIT Press. + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 15, + } + + dt = 0.2 + + LINK_LENGTH_1 = 1.0 # [m] + LINK_LENGTH_2 = 1.0 # [m] + LINK_MASS_1 = 1.0 #: [kg] mass of link 1 + LINK_MASS_2 = 1.0 #: [kg] mass of link 2 + LINK_COM_POS_1 = 0.5 #: [m] position of the center of mass of link 1 + LINK_COM_POS_2 = 0.5 #: [m] position of the center of mass of link 2 + LINK_MOI = 1.0 #: moments of inertia for both links + + MAX_VEL_1 = 4 * pi + MAX_VEL_2 = 9 * pi + + AVAIL_TORQUE = [-1.0, 0.0, +1] + + torque_noise_max = 0.0 + + SCREEN_DIM = 500 + + #: use dynamics equations from the nips paper or the book + book_or_nips = "book" + action_arrow = None + domain_fig = None + actions_num = 3 + + def __init__(self, render_mode: Optional[str] = None): + self.render_mode = render_mode + self.screen = None + self.clock = None + self.isopen = True + high = np.array( + [1.0, 1.0, 1.0, 1.0, self.MAX_VEL_1, self.MAX_VEL_2], dtype=np.float32 + ) + low = -high + self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32) + self.action_space = spaces.Discrete(3) + self.state = None + + def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): + super().reset(seed=seed) + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + low, high = utils.maybe_parse_reset_bounds( + options, -0.1, 0.1 # default low + ) # default high + self.state = self.np_random.uniform(low=low, high=high, size=(4,)).astype( + np.float32 + ) + + if self.render_mode == "human": + self.render() + return self._get_ob(), {} + + def step(self, a): + s = self.state + assert s is not None, "Call reset before using AcrobotEnv object." + torque = self.AVAIL_TORQUE[a] + + # Add noise to the force action + if self.torque_noise_max > 0: + torque += self.np_random.uniform( + -self.torque_noise_max, self.torque_noise_max + ) + + # Now, augment the state with our force action so it can be passed to + # _dsdt + s_augmented = np.append(s, torque) + + ns = rk4(self._dsdt, s_augmented, [0, self.dt]) + + ns[0] = wrap(ns[0], -pi, pi) + ns[1] = wrap(ns[1], -pi, pi) + ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1) + ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2) + self.state = ns + terminated = self._terminal() + reward = -1.0 if not terminated else 0.0 + + if self.render_mode == "human": + self.render() + return (self._get_ob(), reward, terminated, False, {}) + + def _get_ob(self): + s = self.state + assert s is not None, "Call reset before using AcrobotEnv object." + return np.array( + [cos(s[0]), sin(s[0]), cos(s[1]), sin(s[1]), s[2], s[3]], dtype=np.float32 + ) + + def _terminal(self): + s = self.state + assert s is not None, "Call reset before using AcrobotEnv object." + return bool(-cos(s[0]) - cos(s[1] + s[0]) > 1.0) + + def _dsdt(self, s_augmented): + m1 = self.LINK_MASS_1 + m2 = self.LINK_MASS_2 + l1 = self.LINK_LENGTH_1 + lc1 = self.LINK_COM_POS_1 + lc2 = self.LINK_COM_POS_2 + I1 = self.LINK_MOI + I2 = self.LINK_MOI + g = 9.8 + a = s_augmented[-1] + s = s_augmented[:-1] + theta1 = s[0] + theta2 = s[1] + dtheta1 = s[2] + dtheta2 = s[3] + d1 = ( + m1 * lc1**2 + + m2 * (l1**2 + lc2**2 + 2 * l1 * lc2 * cos(theta2)) + + I1 + + I2 + ) + d2 = m2 * (lc2**2 + l1 * lc2 * cos(theta2)) + I2 + phi2 = m2 * lc2 * g * cos(theta1 + theta2 - pi / 2.0) + phi1 = ( + -m2 * l1 * lc2 * dtheta2**2 * sin(theta2) + - 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * sin(theta2) + + (m1 * lc1 + m2 * l1) * g * cos(theta1 - pi / 2) + + phi2 + ) + if self.book_or_nips == "nips": + # the following line is consistent with the description in the + # paper + ddtheta2 = (a + d2 / d1 * phi1 - phi2) / (m2 * lc2**2 + I2 - d2**2 / d1) + else: + # the following line is consistent with the java implementation and the + # book + ddtheta2 = ( + a + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1**2 * sin(theta2) - phi2 + ) / (m2 * lc2**2 + I2 - d2**2 / d1) + ddtheta1 = -(d2 * ddtheta2 + phi1) / d1 + return dtheta1, dtheta2, ddtheta1, ddtheta2, 0.0 + + def render(self): + if self.render_mode is None: + logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.SCREEN_DIM, self.SCREEN_DIM) + ) + else: # mode in "rgb_array" + self.screen = pygame.Surface((self.SCREEN_DIM, self.SCREEN_DIM)) + if self.clock is None: + self.clock = pygame.time.Clock() + + surf = pygame.Surface((self.SCREEN_DIM, self.SCREEN_DIM)) + surf.fill((255, 255, 255)) + s = self.state + + bound = self.LINK_LENGTH_1 + self.LINK_LENGTH_2 + 0.2 # 2.2 for default + scale = self.SCREEN_DIM / (bound * 2) + offset = self.SCREEN_DIM / 2 + + if s is None: + return None + + p1 = [ + -self.LINK_LENGTH_1 * cos(s[0]) * scale, + self.LINK_LENGTH_1 * sin(s[0]) * scale, + ] + + p2 = [ + p1[0] - self.LINK_LENGTH_2 * cos(s[0] + s[1]) * scale, + p1[1] + self.LINK_LENGTH_2 * sin(s[0] + s[1]) * scale, + ] + + xys = np.array([[0, 0], p1, p2])[:, ::-1] + thetas = [s[0] - pi / 2, s[0] + s[1] - pi / 2] + link_lengths = [self.LINK_LENGTH_1 * scale, self.LINK_LENGTH_2 * scale] + + pygame.draw.line( + surf, + start_pos=(-2.2 * scale + offset, 1 * scale + offset), + end_pos=(2.2 * scale + offset, 1 * scale + offset), + color=(0, 0, 0), + ) + + for ((x, y), th, llen) in zip(xys, thetas, link_lengths): + x = x + offset + y = y + offset + l, r, t, b = 0, llen, 0.1 * scale, -0.1 * scale + coords = [(l, b), (l, t), (r, t), (r, b)] + transformed_coords = [] + for coord in coords: + coord = pygame.math.Vector2(coord).rotate_rad(th) + coord = (coord[0] + x, coord[1] + y) + transformed_coords.append(coord) + gfxdraw.aapolygon(surf, transformed_coords, (0, 204, 204)) + gfxdraw.filled_polygon(surf, transformed_coords, (0, 204, 204)) + + gfxdraw.aacircle(surf, int(x), int(y), int(0.1 * scale), (204, 204, 0)) + gfxdraw.filled_circle(surf, int(x), int(y), int(0.1 * scale), (204, 204, 0)) + + surf = pygame.transform.flip(surf, False, True) + self.screen.blit(surf, (0, 0)) + + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False + + +def wrap(x, m, M): + """Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which + truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n + For example, m = -180, M = 180 (degrees), x = 360 --> returns 0. + + Args: + x: a scalar + m: minimum possible value in range + M: maximum possible value in range + + Returns: + x: a scalar, wrapped + """ + diff = M - m + while x > M: + x = x - diff + while x < m: + x = x + diff + return x + + +def bound(x, m, M=None): + """Either have m as scalar, so bound(x,m,M) which returns m <= x <= M *OR* + have m as length 2 vector, bound(x,m, ) returns m[0] <= x <= m[1]. + + Args: + x: scalar + m: The lower bound + M: The upper bound + + Returns: + x: scalar, bound between min (m) and Max (M) + """ + if M is None: + M = m[1] + m = m[0] + # bound x between min (m) and Max (M) + return min(max(x, m), M) + + +def rk4(derivs, y0, t): + """ + Integrate 1-D or N-D system of ODEs using 4-th order Runge-Kutta. + + Example for 2D system: + + >>> def derivs(x): + ... d1 = x[0] + 2*x[1] + ... d2 = -3*x[0] + 4*x[1] + ... return d1, d2 + + >>> dt = 0.0005 + >>> t = np.arange(0.0, 2.0, dt) + >>> y0 = (1,2) + >>> yout = rk4(derivs, y0, t) + + Args: + derivs: the derivative of the system and has the signature ``dy = derivs(yi)`` + y0: initial state vector + t: sample times + + Returns: + yout: Runge-Kutta approximation of the ODE + """ + + try: + Ny = len(y0) + except TypeError: + yout = np.zeros((len(t),), np.float_) + else: + yout = np.zeros((len(t), Ny), np.float_) + + yout[0] = y0 + + for i in np.arange(len(t) - 1): + + this = t[i] + dt = t[i + 1] - this + dt2 = dt / 2.0 + y0 = yout[i] + + k1 = np.asarray(derivs(y0)) + k2 = np.asarray(derivs(y0 + dt2 * k1)) + k3 = np.asarray(derivs(y0 + dt2 * k2)) + k4 = np.asarray(derivs(y0 + dt * k3)) + yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4) + # We only care about the final timestep and we cleave off action value which will be zero + return yout[-1][:4] diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/assets/clockwise.png b/MLPY/Lib/site-packages/gym/envs/classic_control/assets/clockwise.png new file mode 100644 index 0000000000000000000000000000000000000000..1aa423652b279e6bf31cd1f06bed6a9c421f9323 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/classic_control/assets/clockwise.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/cartpole.py b/MLPY/Lib/site-packages/gym/envs/classic_control/cartpole.py new file mode 100644 index 0000000000000000000000000000000000000000..39005d7f8772903a924295250847539da3dfe902 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/cartpole.py @@ -0,0 +1,312 @@ +""" +Classic cart-pole system implemented by Rich Sutton et al. +Copied from http://incompleteideas.net/sutton/book/code/pole.c +permalink: https://perma.cc/C9ZM-652R +""" +import math +from typing import Optional, Union + +import numpy as np + +import gym +from gym import logger, spaces +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + + +class CartPoleEnv(gym.Env[np.ndarray, Union[int, np.ndarray]]): + """ + ### Description + + This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson in + ["Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem"](https://ieeexplore.ieee.org/document/6313077). + A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. + The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces + in the left and right direction on the cart. + + ### Action Space + + The action is a `ndarray` with shape `(1,)` which can take values `{0, 1}` indicating the direction + of the fixed force the cart is pushed with. + + | Num | Action | + |-----|------------------------| + | 0 | Push cart to the left | + | 1 | Push cart to the right | + + **Note**: The velocity that is reduced or increased by the applied force is not fixed and it depends on the angle + the pole is pointing. The center of gravity of the pole varies the amount of energy needed to move the cart underneath it + + ### Observation Space + + The observation is a `ndarray` with shape `(4,)` with the values corresponding to the following positions and velocities: + + | Num | Observation | Min | Max | + |-----|-----------------------|---------------------|-------------------| + | 0 | Cart Position | -4.8 | 4.8 | + | 1 | Cart Velocity | -Inf | Inf | + | 2 | Pole Angle | ~ -0.418 rad (-24°) | ~ 0.418 rad (24°) | + | 3 | Pole Angular Velocity | -Inf | Inf | + + **Note:** While the ranges above denote the possible values for observation space of each element, + it is not reflective of the allowed values of the state space in an unterminated episode. Particularly: + - The cart x-position (index 0) can be take values between `(-4.8, 4.8)`, but the episode terminates + if the cart leaves the `(-2.4, 2.4)` range. + - The pole angle can be observed between `(-.418, .418)` radians (or **±24°**), but the episode terminates + if the pole angle is not in the range `(-.2095, .2095)` (or **±12°**) + + ### Rewards + + Since the goal is to keep the pole upright for as long as possible, a reward of `+1` for every step taken, + including the termination step, is allotted. The threshold for rewards is 475 for v1. + + ### Starting State + + All observations are assigned a uniformly random value in `(-0.05, 0.05)` + + ### Episode End + + The episode ends if any one of the following occurs: + + 1. Termination: Pole Angle is greater than ±12° + 2. Termination: Cart Position is greater than ±2.4 (center of the cart reaches the edge of the display) + 3. Truncation: Episode length is greater than 500 (200 for v0) + + ### Arguments + + ``` + gym.make('CartPole-v1') + ``` + + No additional arguments are currently supported. + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 50, + } + + def __init__(self, render_mode: Optional[str] = None): + self.gravity = 9.8 + self.masscart = 1.0 + self.masspole = 0.1 + self.total_mass = self.masspole + self.masscart + self.length = 0.5 # actually half the pole's length + self.polemass_length = self.masspole * self.length + self.force_mag = 10.0 + self.tau = 0.02 # seconds between state updates + self.kinematics_integrator = "euler" + + # Angle at which to fail the episode + self.theta_threshold_radians = 12 * 2 * math.pi / 360 + self.x_threshold = 2.4 + + # Angle limit set to 2 * theta_threshold_radians so failing observation + # is still within bounds. + high = np.array( + [ + self.x_threshold * 2, + np.finfo(np.float32).max, + self.theta_threshold_radians * 2, + np.finfo(np.float32).max, + ], + dtype=np.float32, + ) + + self.action_space = spaces.Discrete(2) + self.observation_space = spaces.Box(-high, high, dtype=np.float32) + + self.render_mode = render_mode + + self.screen_width = 600 + self.screen_height = 400 + self.screen = None + self.clock = None + self.isopen = True + self.state = None + + self.steps_beyond_terminated = None + + def step(self, action): + err_msg = f"{action!r} ({type(action)}) invalid" + assert self.action_space.contains(action), err_msg + assert self.state is not None, "Call reset before using step method." + x, x_dot, theta, theta_dot = self.state + force = self.force_mag if action == 1 else -self.force_mag + costheta = math.cos(theta) + sintheta = math.sin(theta) + + # For the interested reader: + # https://coneural.org/florian/papers/05_cart_pole.pdf + temp = ( + force + self.polemass_length * theta_dot**2 * sintheta + ) / self.total_mass + thetaacc = (self.gravity * sintheta - costheta * temp) / ( + self.length * (4.0 / 3.0 - self.masspole * costheta**2 / self.total_mass) + ) + xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass + + if self.kinematics_integrator == "euler": + x = x + self.tau * x_dot + x_dot = x_dot + self.tau * xacc + theta = theta + self.tau * theta_dot + theta_dot = theta_dot + self.tau * thetaacc + else: # semi-implicit euler + x_dot = x_dot + self.tau * xacc + x = x + self.tau * x_dot + theta_dot = theta_dot + self.tau * thetaacc + theta = theta + self.tau * theta_dot + + self.state = (x, x_dot, theta, theta_dot) + + terminated = bool( + x < -self.x_threshold + or x > self.x_threshold + or theta < -self.theta_threshold_radians + or theta > self.theta_threshold_radians + ) + + if not terminated: + reward = 1.0 + elif self.steps_beyond_terminated is None: + # Pole just fell! + self.steps_beyond_terminated = 0 + reward = 1.0 + else: + if self.steps_beyond_terminated == 0: + logger.warn( + "You are calling 'step()' even though this " + "environment has already returned terminated = True. You " + "should always call 'reset()' once you receive 'terminated = " + "True' -- any further steps are undefined behavior." + ) + self.steps_beyond_terminated += 1 + reward = 0.0 + + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), reward, terminated, False, {} + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + low, high = utils.maybe_parse_reset_bounds( + options, -0.05, 0.05 # default low + ) # default high + self.state = self.np_random.uniform(low=low, high=high, size=(4,)) + self.steps_beyond_terminated = None + + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_width, self.screen_height) + ) + else: # mode == "rgb_array" + self.screen = pygame.Surface((self.screen_width, self.screen_height)) + if self.clock is None: + self.clock = pygame.time.Clock() + + world_width = self.x_threshold * 2 + scale = self.screen_width / world_width + polewidth = 10.0 + polelen = scale * (2 * self.length) + cartwidth = 50.0 + cartheight = 30.0 + + if self.state is None: + return None + + x = self.state + + self.surf = pygame.Surface((self.screen_width, self.screen_height)) + self.surf.fill((255, 255, 255)) + + l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2 + axleoffset = cartheight / 4.0 + cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART + carty = 100 # TOP OF CART + cart_coords = [(l, b), (l, t), (r, t), (r, b)] + cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords] + gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0)) + gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0)) + + l, r, t, b = ( + -polewidth / 2, + polewidth / 2, + polelen - polewidth / 2, + -polewidth / 2, + ) + + pole_coords = [] + for coord in [(l, b), (l, t), (r, t), (r, b)]: + coord = pygame.math.Vector2(coord).rotate_rad(-x[2]) + coord = (coord[0] + cartx, coord[1] + carty + axleoffset) + pole_coords.append(coord) + gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101)) + gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101)) + + gfxdraw.aacircle( + self.surf, + int(cartx), + int(carty + axleoffset), + int(polewidth / 2), + (129, 132, 203), + ) + gfxdraw.filled_circle( + self.surf, + int(cartx), + int(carty + axleoffset), + int(polewidth / 2), + (129, 132, 203), + ) + + gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0)) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/continuous_mountain_car.py b/MLPY/Lib/site-packages/gym/envs/classic_control/continuous_mountain_car.py new file mode 100644 index 0000000000000000000000000000000000000000..0995b6232ae3a30b982169de64f5b0b623e366a9 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/continuous_mountain_car.py @@ -0,0 +1,300 @@ +""" +@author: Olivier Sigaud + +A merge between two sources: + +* Adaptation of the MountainCar Environment from the "FAReinforcement" library +of Jose Antonio Martin H. (version 1.0), adapted by 'Tom Schaul, tom@idsia.ch' +and then modified by Arnaud de Broissia + +* the gym MountainCar environment +itself from +http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp +permalink: https://perma.cc/6Z2N-PFWC +""" + +import math +from typing import Optional + +import numpy as np + +import gym +from gym import spaces +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + + +class Continuous_MountainCarEnv(gym.Env): + """ + ### Description + + The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically + at the bottom of a sinusoidal valley, with the only possible actions being the accelerations + that can be applied to the car in either direction. The goal of the MDP is to strategically + accelerate the car to reach the goal state on top of the right hill. There are two versions + of the mountain car domain in gym: one with discrete actions and one with continuous. + This version is the one with continuous actions. + + This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf) + + ``` + @TECHREPORT{Moore90efficientmemory-based, + author = {Andrew William Moore}, + title = {Efficient Memory-based Learning for Robot Control}, + institution = {University of Cambridge}, + year = {1990} + } + ``` + + ### Observation Space + + The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Unit | + |-----|--------------------------------------|------|-----|--------------| + | 0 | position of the car along the x-axis | -Inf | Inf | position (m) | + | 1 | velocity of the car | -Inf | Inf | position (m) | + + ### Action Space + + The action is a `ndarray` with shape `(1,)`, representing the directional force applied on the car. + The action is clipped in the range `[-1,1]` and multiplied by a power of 0.0015. + + ### Transition Dynamics: + + Given an action, the mountain car follows the following transition dynamics: + + *velocityt+1 = velocityt+1 + force * self.power - 0.0025 * cos(3 * positiont)* + + *positiont+1 = positiont + velocityt+1* + + where force is the action clipped to the range `[-1,1]` and power is a constant 0.0015. + The collisions at either end are inelastic with the velocity set to 0 upon collision with the wall. + The position is clipped to the range [-1.2, 0.6] and velocity is clipped to the range [-0.07, 0.07]. + + ### Reward + + A negative reward of *-0.1 * action2* is received at each timestep to penalise for + taking actions of large magnitude. If the mountain car reaches the goal then a positive reward of +100 + is added to the negative reward for that timestep. + + ### Starting State + + The position of the car is assigned a uniform random value in `[-0.6 , -0.4]`. + The starting velocity of the car is always assigned to 0. + + ### Episode End + + The episode ends if either of the following happens: + 1. Termination: The position of the car is greater than or equal to 0.45 (the goal position on top of the right hill) + 2. Truncation: The length of the episode is 999. + + ### Arguments + + ``` + gym.make('MountainCarContinuous-v0') + ``` + + ### Version History + + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 30, + } + + def __init__(self, render_mode: Optional[str] = None, goal_velocity=0): + self.min_action = -1.0 + self.max_action = 1.0 + self.min_position = -1.2 + self.max_position = 0.6 + self.max_speed = 0.07 + self.goal_position = ( + 0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version + ) + self.goal_velocity = goal_velocity + self.power = 0.0015 + + self.low_state = np.array( + [self.min_position, -self.max_speed], dtype=np.float32 + ) + self.high_state = np.array( + [self.max_position, self.max_speed], dtype=np.float32 + ) + + self.render_mode = render_mode + + self.screen_width = 600 + self.screen_height = 400 + self.screen = None + self.clock = None + self.isopen = True + + self.action_space = spaces.Box( + low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32 + ) + self.observation_space = spaces.Box( + low=self.low_state, high=self.high_state, dtype=np.float32 + ) + + def step(self, action: np.ndarray): + + position = self.state[0] + velocity = self.state[1] + force = min(max(action[0], self.min_action), self.max_action) + + velocity += force * self.power - 0.0025 * math.cos(3 * position) + if velocity > self.max_speed: + velocity = self.max_speed + if velocity < -self.max_speed: + velocity = -self.max_speed + position += velocity + if position > self.max_position: + position = self.max_position + if position < self.min_position: + position = self.min_position + if position == self.min_position and velocity < 0: + velocity = 0 + + # Convert a possible numpy bool to a Python bool. + terminated = bool( + position >= self.goal_position and velocity >= self.goal_velocity + ) + + reward = 0 + if terminated: + reward = 100.0 + reward -= math.pow(action[0], 2) * 0.1 + + self.state = np.array([position, velocity], dtype=np.float32) + + if self.render_mode == "human": + self.render() + return self.state, reward, terminated, False, {} + + def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): + super().reset(seed=seed) + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4) + self.state = np.array([self.np_random.uniform(low=low, high=high), 0]) + + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), {} + + def _height(self, xs): + return np.sin(3 * xs) * 0.45 + 0.55 + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_width, self.screen_height) + ) + else: # mode == "rgb_array": + self.screen = pygame.Surface((self.screen_width, self.screen_height)) + if self.clock is None: + self.clock = pygame.time.Clock() + + world_width = self.max_position - self.min_position + scale = self.screen_width / world_width + carwidth = 40 + carheight = 20 + + self.surf = pygame.Surface((self.screen_width, self.screen_height)) + self.surf.fill((255, 255, 255)) + + pos = self.state[0] + + xs = np.linspace(self.min_position, self.max_position, 100) + ys = self._height(xs) + xys = list(zip((xs - self.min_position) * scale, ys * scale)) + + pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0)) + + clearance = 10 + + l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0 + coords = [] + for c in [(l, b), (l, t), (r, t), (r, b)]: + c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) + coords.append( + ( + c[0] + (pos - self.min_position) * scale, + c[1] + clearance + self._height(pos) * scale, + ) + ) + + gfxdraw.aapolygon(self.surf, coords, (0, 0, 0)) + gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0)) + + for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]: + c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) + wheel = ( + int(c[0] + (pos - self.min_position) * scale), + int(c[1] + clearance + self._height(pos) * scale), + ) + + gfxdraw.aacircle( + self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) + ) + gfxdraw.filled_circle( + self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) + ) + + flagx = int((self.goal_position - self.min_position) * scale) + flagy1 = int(self._height(self.goal_position) * scale) + flagy2 = flagy1 + 50 + gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0)) + + gfxdraw.aapolygon( + self.surf, + [(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], + (204, 204, 0), + ) + gfxdraw.filled_polygon( + self.surf, + [(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], + (204, 204, 0), + ) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/mountain_car.py b/MLPY/Lib/site-packages/gym/envs/classic_control/mountain_car.py new file mode 100644 index 0000000000000000000000000000000000000000..1bae60fe497e5b8a0ef78b67932942960d998e0f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/mountain_car.py @@ -0,0 +1,282 @@ +""" +http://incompleteideas.net/MountainCar/MountainCar1.cp +permalink: https://perma.cc/6Z2N-PFWC +""" +import math +from typing import Optional + +import numpy as np + +import gym +from gym import spaces +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + + +class MountainCarEnv(gym.Env): + """ + ### Description + + The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically + at the bottom of a sinusoidal valley, with the only possible actions being the accelerations + that can be applied to the car in either direction. The goal of the MDP is to strategically + accelerate the car to reach the goal state on top of the right hill. There are two versions + of the mountain car domain in gym: one with discrete actions and one with continuous. + This version is the one with discrete actions. + + This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf) + + ``` + @TECHREPORT{Moore90efficientmemory-based, + author = {Andrew William Moore}, + title = {Efficient Memory-based Learning for Robot Control}, + institution = {University of Cambridge}, + year = {1990} + } + ``` + + ### Observation Space + + The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Unit | + |-----|--------------------------------------|------|-----|--------------| + | 0 | position of the car along the x-axis | -Inf | Inf | position (m) | + | 1 | velocity of the car | -Inf | Inf | position (m) | + + ### Action Space + + There are 3 discrete deterministic actions: + + | Num | Observation | Value | Unit | + |-----|-------------------------|-------|--------------| + | 0 | Accelerate to the left | Inf | position (m) | + | 1 | Don't accelerate | Inf | position (m) | + | 2 | Accelerate to the right | Inf | position (m) | + + ### Transition Dynamics: + + Given an action, the mountain car follows the following transition dynamics: + + *velocityt+1 = velocityt + (action - 1) * force - cos(3 * positiont) * gravity* + + *positiont+1 = positiont + velocityt+1* + + where force = 0.001 and gravity = 0.0025. The collisions at either end are inelastic with the velocity set to 0 + upon collision with the wall. The position is clipped to the range `[-1.2, 0.6]` and + velocity is clipped to the range `[-0.07, 0.07]`. + + + ### Reward: + + The goal is to reach the flag placed on top of the right hill as quickly as possible, as such the agent is + penalised with a reward of -1 for each timestep. + + ### Starting State + + The position of the car is assigned a uniform random value in *[-0.6 , -0.4]*. + The starting velocity of the car is always assigned to 0. + + ### Episode End + + The episode ends if either of the following happens: + 1. Termination: The position of the car is greater than or equal to 0.5 (the goal position on top of the right hill) + 2. Truncation: The length of the episode is 200. + + + ### Arguments + + ``` + gym.make('MountainCar-v0') + ``` + + ### Version History + + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 30, + } + + def __init__(self, render_mode: Optional[str] = None, goal_velocity=0): + self.min_position = -1.2 + self.max_position = 0.6 + self.max_speed = 0.07 + self.goal_position = 0.5 + self.goal_velocity = goal_velocity + + self.force = 0.001 + self.gravity = 0.0025 + + self.low = np.array([self.min_position, -self.max_speed], dtype=np.float32) + self.high = np.array([self.max_position, self.max_speed], dtype=np.float32) + + self.render_mode = render_mode + + self.screen_width = 600 + self.screen_height = 400 + self.screen = None + self.clock = None + self.isopen = True + + self.action_space = spaces.Discrete(3) + self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32) + + def step(self, action: int): + assert self.action_space.contains( + action + ), f"{action!r} ({type(action)}) invalid" + + position, velocity = self.state + velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity) + velocity = np.clip(velocity, -self.max_speed, self.max_speed) + position += velocity + position = np.clip(position, self.min_position, self.max_position) + if position == self.min_position and velocity < 0: + velocity = 0 + + terminated = bool( + position >= self.goal_position and velocity >= self.goal_velocity + ) + reward = -1.0 + + self.state = (position, velocity) + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), reward, terminated, False, {} + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4) + self.state = np.array([self.np_random.uniform(low=low, high=high), 0]) + + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), {} + + def _height(self, xs): + return np.sin(3 * xs) * 0.45 + 0.55 + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_width, self.screen_height) + ) + else: # mode in "rgb_array" + self.screen = pygame.Surface((self.screen_width, self.screen_height)) + if self.clock is None: + self.clock = pygame.time.Clock() + + world_width = self.max_position - self.min_position + scale = self.screen_width / world_width + carwidth = 40 + carheight = 20 + + self.surf = pygame.Surface((self.screen_width, self.screen_height)) + self.surf.fill((255, 255, 255)) + + pos = self.state[0] + + xs = np.linspace(self.min_position, self.max_position, 100) + ys = self._height(xs) + xys = list(zip((xs - self.min_position) * scale, ys * scale)) + + pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0)) + + clearance = 10 + + l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0 + coords = [] + for c in [(l, b), (l, t), (r, t), (r, b)]: + c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) + coords.append( + ( + c[0] + (pos - self.min_position) * scale, + c[1] + clearance + self._height(pos) * scale, + ) + ) + + gfxdraw.aapolygon(self.surf, coords, (0, 0, 0)) + gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0)) + + for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]: + c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) + wheel = ( + int(c[0] + (pos - self.min_position) * scale), + int(c[1] + clearance + self._height(pos) * scale), + ) + + gfxdraw.aacircle( + self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) + ) + gfxdraw.filled_circle( + self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) + ) + + flagx = int((self.goal_position - self.min_position) * scale) + flagy1 = int(self._height(self.goal_position) * scale) + flagy2 = flagy1 + 50 + gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0)) + + gfxdraw.aapolygon( + self.surf, + [(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], + (204, 204, 0), + ) + gfxdraw.filled_polygon( + self.surf, + [(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], + (204, 204, 0), + ) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def get_keys_to_action(self): + # Control with left and right arrow keys. + return {(): 1, (276,): 0, (275,): 2, (275, 276): 1} + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/pendulum.py b/MLPY/Lib/site-packages/gym/envs/classic_control/pendulum.py new file mode 100644 index 0000000000000000000000000000000000000000..536d57e909d597dd1cd238c48ba5a396f326f41e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/pendulum.py @@ -0,0 +1,271 @@ +__credits__ = ["Carlos Luis"] + +from os import path +from typing import Optional + +import numpy as np + +import gym +from gym import spaces +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + +DEFAULT_X = np.pi +DEFAULT_Y = 1.0 + + +class PendulumEnv(gym.Env): + """ + ### Description + + The inverted pendulum swingup problem is based on the classic problem in control theory. + The system consists of a pendulum attached at one end to a fixed point, and the other end being free. + The pendulum starts in a random position and the goal is to apply torque on the free end to swing it + into an upright position, with its center of gravity right above the fixed point. + + The diagram below specifies the coordinate system used for the implementation of the pendulum's + dynamic equations. + + ![Pendulum Coordinate System](./diagrams/pendulum.png) + + - `x-y`: cartesian coordinates of the pendulum's end in meters. + - `theta` : angle in radians. + - `tau`: torque in `N m`. Defined as positive _counter-clockwise_. + + ### Action Space + + The action is a `ndarray` with shape `(1,)` representing the torque applied to free end of the pendulum. + + | Num | Action | Min | Max | + |-----|--------|------|-----| + | 0 | Torque | -2.0 | 2.0 | + + + ### Observation Space + + The observation is a `ndarray` with shape `(3,)` representing the x-y coordinates of the pendulum's free + end and its angular velocity. + + | Num | Observation | Min | Max | + |-----|------------------|------|-----| + | 0 | x = cos(theta) | -1.0 | 1.0 | + | 1 | y = sin(theta) | -1.0 | 1.0 | + | 2 | Angular Velocity | -8.0 | 8.0 | + + ### Rewards + + The reward function is defined as: + + *r = -(theta2 + 0.1 * theta_dt2 + 0.001 * torque2)* + + where `$\theta$` is the pendulum's angle normalized between *[-pi, pi]* (with 0 being in the upright position). + Based on the above equation, the minimum reward that can be obtained is + *-(pi2 + 0.1 * 82 + 0.001 * 22) = -16.2736044*, + while the maximum reward is zero (pendulum is upright with zero velocity and no torque applied). + + ### Starting State + + The starting state is a random angle in *[-pi, pi]* and a random angular velocity in *[-1,1]*. + + ### Episode Truncation + + The episode truncates at 200 time steps. + + ### Arguments + + - `g`: acceleration of gravity measured in *(m s-2)* used to calculate the pendulum dynamics. + The default value is g = 10.0 . + + ``` + gym.make('Pendulum-v1', g=9.81) + ``` + + ### Version History + + * v1: Simplify the math equations, no difference in behavior. + * v0: Initial versions release (1.0.0) + + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 30, + } + + def __init__(self, render_mode: Optional[str] = None, g=10.0): + self.max_speed = 8 + self.max_torque = 2.0 + self.dt = 0.05 + self.g = g + self.m = 1.0 + self.l = 1.0 + + self.render_mode = render_mode + + self.screen_dim = 500 + self.screen = None + self.clock = None + self.isopen = True + + high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32) + # This will throw a warning in tests/envs/test_envs in utils/env_checker.py as the space is not symmetric + # or normalised as max_torque == 2 by default. Ignoring the issue here as the default settings are too old + # to update to follow the openai gym api + self.action_space = spaces.Box( + low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32 + ) + self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32) + + def step(self, u): + th, thdot = self.state # th := theta + + g = self.g + m = self.m + l = self.l + dt = self.dt + + u = np.clip(u, -self.max_torque, self.max_torque)[0] + self.last_u = u # for rendering + costs = angle_normalize(th) ** 2 + 0.1 * thdot**2 + 0.001 * (u**2) + + newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / (m * l**2) * u) * dt + newthdot = np.clip(newthdot, -self.max_speed, self.max_speed) + newth = th + newthdot * dt + + self.state = np.array([newth, newthdot]) + + if self.render_mode == "human": + self.render() + return self._get_obs(), -costs, False, False, {} + + def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): + super().reset(seed=seed) + if options is None: + high = np.array([DEFAULT_X, DEFAULT_Y]) + else: + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + x = options.get("x_init") if "x_init" in options else DEFAULT_X + y = options.get("y_init") if "y_init" in options else DEFAULT_Y + x = utils.verify_number_and_cast(x) + y = utils.verify_number_and_cast(y) + high = np.array([x, y]) + low = -high # We enforce symmetric limits. + self.state = self.np_random.uniform(low=low, high=high) + self.last_u = None + + if self.render_mode == "human": + self.render() + return self._get_obs(), {} + + def _get_obs(self): + theta, thetadot = self.state + return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_dim, self.screen_dim) + ) + else: # mode in "rgb_array" + self.screen = pygame.Surface((self.screen_dim, self.screen_dim)) + if self.clock is None: + self.clock = pygame.time.Clock() + + self.surf = pygame.Surface((self.screen_dim, self.screen_dim)) + self.surf.fill((255, 255, 255)) + + bound = 2.2 + scale = self.screen_dim / (bound * 2) + offset = self.screen_dim // 2 + + rod_length = 1 * scale + rod_width = 0.2 * scale + l, r, t, b = 0, rod_length, rod_width / 2, -rod_width / 2 + coords = [(l, b), (l, t), (r, t), (r, b)] + transformed_coords = [] + for c in coords: + c = pygame.math.Vector2(c).rotate_rad(self.state[0] + np.pi / 2) + c = (c[0] + offset, c[1] + offset) + transformed_coords.append(c) + gfxdraw.aapolygon(self.surf, transformed_coords, (204, 77, 77)) + gfxdraw.filled_polygon(self.surf, transformed_coords, (204, 77, 77)) + + gfxdraw.aacircle(self.surf, offset, offset, int(rod_width / 2), (204, 77, 77)) + gfxdraw.filled_circle( + self.surf, offset, offset, int(rod_width / 2), (204, 77, 77) + ) + + rod_end = (rod_length, 0) + rod_end = pygame.math.Vector2(rod_end).rotate_rad(self.state[0] + np.pi / 2) + rod_end = (int(rod_end[0] + offset), int(rod_end[1] + offset)) + gfxdraw.aacircle( + self.surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77) + ) + gfxdraw.filled_circle( + self.surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77) + ) + + fname = path.join(path.dirname(__file__), "assets/clockwise.png") + img = pygame.image.load(fname) + if self.last_u is not None: + scale_img = pygame.transform.smoothscale( + img, + (scale * np.abs(self.last_u) / 2, scale * np.abs(self.last_u) / 2), + ) + is_flip = bool(self.last_u > 0) + scale_img = pygame.transform.flip(scale_img, is_flip, True) + self.surf.blit( + scale_img, + ( + offset - scale_img.get_rect().centerx, + offset - scale_img.get_rect().centery, + ), + ) + + # drawing axle + gfxdraw.aacircle(self.surf, offset, offset, int(0.05 * scale), (0, 0, 0)) + gfxdraw.filled_circle(self.surf, offset, offset, int(0.05 * scale), (0, 0, 0)) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + else: # mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False + + +def angle_normalize(x): + return ((x + np.pi) % (2 * np.pi)) - np.pi diff --git a/MLPY/Lib/site-packages/gym/envs/classic_control/utils.py b/MLPY/Lib/site-packages/gym/envs/classic_control/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2aa514b15a0a8dab213d71d9b7a80e18ca945f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/classic_control/utils.py @@ -0,0 +1,46 @@ +""" +Utility functions used for classic control environments. +""" + +from typing import Optional, SupportsFloat, Tuple + + +def verify_number_and_cast(x: SupportsFloat) -> float: + """Verify parameter is a single number and cast to a float.""" + try: + x = float(x) + except (ValueError, TypeError): + raise ValueError(f"An option ({x}) could not be converted to a float.") + return x + + +def maybe_parse_reset_bounds( + options: Optional[dict], default_low: float, default_high: float +) -> Tuple[float, float]: + """ + This function can be called during a reset() to customize the sampling + ranges for setting the initial state distributions. + + Args: + options: Options passed in to reset(). + default_low: Default lower limit to use, if none specified in options. + default_high: Default upper limit to use, if none specified in options. + + Returns: + Tuple of the lower and upper limits. + """ + if options is None: + return default_low, default_high + + low = options.get("low") if "low" in options else default_low + high = options.get("high") if "high" in options else default_high + + # We expect only numerical inputs. + low = verify_number_and_cast(low) + high = verify_number_and_cast(high) + if low > high: + raise ValueError( + f"Lower bound ({low}) must be lower than higher bound ({high})." + ) + + return low, high diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__init__.py b/MLPY/Lib/site-packages/gym/envs/mujoco/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..286143be6ba0511665a27a750d247a899eb24e1d --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/__init__.py @@ -0,0 +1,13 @@ +from gym.envs.mujoco.mujoco_env import MujocoEnv, MuJocoPyEnv # isort:skip + +from gym.envs.mujoco.ant import AntEnv +from gym.envs.mujoco.half_cheetah import HalfCheetahEnv +from gym.envs.mujoco.hopper import HopperEnv +from gym.envs.mujoco.humanoid import HumanoidEnv +from gym.envs.mujoco.humanoidstandup import HumanoidStandupEnv +from gym.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv +from gym.envs.mujoco.inverted_pendulum import InvertedPendulumEnv +from gym.envs.mujoco.pusher import PusherEnv +from gym.envs.mujoco.reacher import ReacherEnv +from gym.envs.mujoco.swimmer import SwimmerEnv +from gym.envs.mujoco.walker2d import Walker2dEnv diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0351d8f9c33dc0311da77e4f6582343d7875492 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b77b0cde2ff62a405edef89e773fe4739aa659c0 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..316c82aef8f6891834faa3a21839de7e04451715 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb1d3de05168489a5f801ac6fc037e40e4e587fb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/ant_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f17ff186ce8054e7ace06477d1881e9bf6603619 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..283db7a1f8c72269269110880bef81455806825c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5049e94280095be5a112f148bfca20b37cc85191 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/half_cheetah_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b4c5e77c00cbe62d5b8258ab79486ba8dec170 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8ce6d2f67d5e99bd7688204e523b9ba84a013b0 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e46681289a5cca032f538469a0bfedc9ae57d8ec Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/hopper_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7a529fe81ddfecf0aa87c0142c8160bb3b314f2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe669d59de434f2f220c223b727b0aca9cf5df40 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06a9d827776a3158a776124b1f0b2c32a806f2c6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoid_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ba5a0a8d131e05d3cfd56a7a2418dbde7651435 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb09ab2fded8f842827bde5fa7d100e6ed2ab65a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/humanoidstandup_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0efb0b57017142464ac52ca576b7022db6d1bdbe Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..163f5405f7f681bb1ae0714e9e7c0e909d545a9d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_double_pendulum_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe72b5aff0f90234d31454126783fb28c3c1a5c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3828c664c0f6864e01566b211c9987613961f0fd Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/inverted_pendulum_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_env.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_env.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2635458d1f1334298afd2a13a4fb979d49c86cf5 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_env.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_rendering.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_rendering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0762eba9b2f3fd073ce4e1d4842ea9bd55f9312 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/mujoco_rendering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d128b4826566b02dfd382fbc9508424ff26d0f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1089b60500ba0b129b0d7b60a121dc432d12094a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/pusher_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fb5000e7f95a44e7ce44060850cccbd4f618c51 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4dec49a9051e0b11c8ebf9a418c3b2179a9627 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/reacher_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90024f4f08f6264e7de68e46bcf439241d28d197 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ffbd068b54b23b4f9d362a605d9ae06bfd9f15c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1c5f2cb5c2752b308d18da611200008b2e37b26 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/swimmer_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f1c447989e281011ffb91b5cd1a7f8bd13b9050 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v3.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9346bd85270b34a77a825432caeb2842f749742f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v4.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v4.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec4c154092a7cfb5d5b9493fa1cd8125b5d42876 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/mujoco/__pycache__/walker2d_v4.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/ant.py b/MLPY/Lib/site-packages/gym/envs/mujoco/ant.py new file mode 100644 index 0000000000000000000000000000000000000000..09e006be6b4c74b2189efc7bcf31b8a8ec36edf8 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/ant.py @@ -0,0 +1,80 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class AntEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + observation_space = Box( + low=-np.inf, high=np.inf, shape=(111,), dtype=np.float64 + ) + MuJocoPyEnv.__init__( + self, "ant.xml", 5, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, a): + xposbefore = self.get_body_com("torso")[0] + self.do_simulation(a, self.frame_skip) + xposafter = self.get_body_com("torso")[0] + + forward_reward = (xposafter - xposbefore) / self.dt + ctrl_cost = 0.5 * np.square(a).sum() + contact_cost = ( + 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) + ) + survive_reward = 1.0 + reward = forward_reward - ctrl_cost - contact_cost + survive_reward + state = self.state_vector() + not_terminated = ( + np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0 + ) + terminated = not not_terminated + ob = self._get_obs() + + if self.render_mode == "human": + self.render() + return ( + ob, + reward, + terminated, + False, + dict( + reward_forward=forward_reward, + reward_ctrl=-ctrl_cost, + reward_contact=-contact_cost, + reward_survive=survive_reward, + ), + ) + + def _get_obs(self): + return np.concatenate( + [ + self.sim.data.qpos.flat[2:], + self.sim.data.qvel.flat, + np.clip(self.sim.data.cfrc_ext, -1, 1).flat, + ] + ) + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.model.nq, low=-0.1, high=0.1 + ) + qvel = self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1 + self.set_state(qpos, qvel) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.distance = self.model.stat.extent * 0.5 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..751efabdffcaf41115a9ce295ffba4f74b53b198 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v3.py @@ -0,0 +1,186 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "distance": 4.0, +} + + +class AntEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__( + self, + xml_file="ant.xml", + ctrl_cost_weight=0.5, + contact_cost_weight=5e-4, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_z_range=(0.2, 1.0), + contact_force_range=(-1.0, 1.0), + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + ctrl_cost_weight, + contact_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + contact_force_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._ctrl_cost_weight = ctrl_cost_weight + self._contact_cost_weight = contact_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + self._healthy_z_range = healthy_z_range + + self._contact_force_range = contact_force_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(111,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(113,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 5, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def contact_forces(self): + raw_contact_forces = self.sim.data.cfrc_ext + min_value, max_value = self._contact_force_range + contact_forces = np.clip(raw_contact_forces, min_value, max_value) + return contact_forces + + @property + def contact_cost(self): + contact_cost = self._contact_cost_weight * np.sum( + np.square(self.contact_forces) + ) + return contact_cost + + @property + def is_healthy(self): + state = self.state_vector() + min_z, max_z = self._healthy_z_range + is_healthy = np.isfinite(state).all() and min_z <= state[2] <= max_z + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def step(self, action): + xy_position_before = self.get_body_com("torso")[:2].copy() + self.do_simulation(action, self.frame_skip) + xy_position_after = self.get_body_com("torso")[:2].copy() + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + ctrl_cost = self.control_cost(action) + contact_cost = self.contact_cost + + forward_reward = x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + contact_cost + + reward = rewards - costs + terminated = self.terminated + observation = self._get_obs() + info = { + "reward_forward": forward_reward, + "reward_ctrl": -ctrl_cost, + "reward_contact": -contact_cost, + "reward_survive": healthy_reward, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + contact_force = self.contact_forces.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + observations = np.concatenate((position, velocity, contact_force)) + + return observations + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = ( + self.init_qvel + + self._reset_noise_scale * self.np_random.standard_normal(self.model.nv) + ) + self.set_state(qpos, qvel) + + observation = self._get_obs() + + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..93782641e63c8d3473f6c402d665f590baa9603c --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/ant_v4.py @@ -0,0 +1,356 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "distance": 4.0, +} + + +class AntEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is based on the environment introduced by Schulman, + Moritz, Levine, Jordan and Abbeel in ["High-Dimensional Continuous Control + Using Generalized Advantage Estimation"](https://arxiv.org/abs/1506.02438). + The ant is a 3D robot consisting of one torso (free rotational body) with + four legs attached to it with each leg having two links. The goal is to + coordinate the four legs to move in the forward (right) direction by applying + torques on the eight hinges connecting the two links of each leg and the torso + (nine parts and eight hinges). + + ### Action Space + The action space is a `Box(-1, 1, (8,), float32)`. An action represents the torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ----------------------------------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ | + | 0 | Torque applied on the rotor between the torso and front left hip | -1 | 1 | hip_1 (front_left_leg) | hinge | torque (N m) | + | 1 | Torque applied on the rotor between the front left two links | -1 | 1 | angle_1 (front_left_leg) | hinge | torque (N m) | + | 2 | Torque applied on the rotor between the torso and front right hip | -1 | 1 | hip_2 (front_right_leg) | hinge | torque (N m) | + | 3 | Torque applied on the rotor between the front right two links | -1 | 1 | angle_2 (front_right_leg) | hinge | torque (N m) | + | 4 | Torque applied on the rotor between the torso and back left hip | -1 | 1 | hip_3 (back_leg) | hinge | torque (N m) | + | 5 | Torque applied on the rotor between the back left two links | -1 | 1 | angle_3 (back_leg) | hinge | torque (N m) | + | 6 | Torque applied on the rotor between the torso and back right hip | -1 | 1 | hip_4 (right_back_leg) | hinge | torque (N m) | + | 7 | Torque applied on the rotor between the back right two links | -1 | 1 | angle_4 (right_back_leg) | hinge | torque (N m) | + + ### Observation Space + + Observations consist of positional values of different body parts of the ant, + followed by the velocities of those individual parts (their derivatives) with all + the positions ordered before all the velocities. + + By default, observations do not include the x- and y-coordinates of the ant's torso. These may + be included by passing `exclude_current_positions_from_observation=False` during construction. + In that case, the observation space will have 113 dimensions where the first two dimensions + represent the x- and y- coordinates of the ant's torso. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x- and y-coordinates + of the torso will be returned in `info` with keys `"x_position"` and `"y_position"`, respectively. + + However, by default, an observation is a `ndarray` with shape `(111,)` + where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + |-----|--------------------------------------------------------------|--------|--------|----------------------------------------|-------|--------------------------| + | 0 | z-coordinate of the torso (centre) | -Inf | Inf | torso | free | position (m) | + | 1 | x-orientation of the torso (centre) | -Inf | Inf | torso | free | angle (rad) | + | 2 | y-orientation of the torso (centre) | -Inf | Inf | torso | free | angle (rad) | + | 3 | z-orientation of the torso (centre) | -Inf | Inf | torso | free | angle (rad) | + | 4 | w-orientation of the torso (centre) | -Inf | Inf | torso | free | angle (rad) | + | 5 | angle between torso and first link on front left | -Inf | Inf | hip_1 (front_left_leg) | hinge | angle (rad) | + | 6 | angle between the two links on the front left | -Inf | Inf | ankle_1 (front_left_leg) | hinge | angle (rad) | + | 7 | angle between torso and first link on front right | -Inf | Inf | hip_2 (front_right_leg) | hinge | angle (rad) | + | 8 | angle between the two links on the front right | -Inf | Inf | ankle_2 (front_right_leg) | hinge | angle (rad) | + | 9 | angle between torso and first link on back left | -Inf | Inf | hip_3 (back_leg) | hinge | angle (rad) | + | 10 | angle between the two links on the back left | -Inf | Inf | ankle_3 (back_leg) | hinge | angle (rad) | + | 11 | angle between torso and first link on back right | -Inf | Inf | hip_4 (right_back_leg) | hinge | angle (rad) | + | 12 | angle between the two links on the back right | -Inf | Inf | ankle_4 (right_back_leg) | hinge | angle (rad) | + | 13 | x-coordinate velocity of the torso | -Inf | Inf | torso | free | velocity (m/s) | + | 14 | y-coordinate velocity of the torso | -Inf | Inf | torso | free | velocity (m/s) | + | 15 | z-coordinate velocity of the torso | -Inf | Inf | torso | free | velocity (m/s) | + | 16 | x-coordinate angular velocity of the torso | -Inf | Inf | torso | free | angular velocity (rad/s) | + | 17 | y-coordinate angular velocity of the torso | -Inf | Inf | torso | free | angular velocity (rad/s) | + | 18 | z-coordinate angular velocity of the torso | -Inf | Inf | torso | free | angular velocity (rad/s) | + | 19 | angular velocity of angle between torso and front left link | -Inf | Inf | hip_1 (front_left_leg) | hinge | angle (rad) | + | 20 | angular velocity of the angle between front left links | -Inf | Inf | ankle_1 (front_left_leg) | hinge | angle (rad) | + | 21 | angular velocity of angle between torso and front right link | -Inf | Inf | hip_2 (front_right_leg) | hinge | angle (rad) | + | 22 | angular velocity of the angle between front right links | -Inf | Inf | ankle_2 (front_right_leg) | hinge | angle (rad) | + | 23 | angular velocity of angle between torso and back left link | -Inf | Inf | hip_3 (back_leg) | hinge | angle (rad) | + | 24 | angular velocity of the angle between back left links | -Inf | Inf | ankle_3 (back_leg) | hinge | angle (rad) | + | 25 | angular velocity of angle between torso and back right link | -Inf | Inf | hip_4 (right_back_leg) | hinge | angle (rad) | + | 26 |angular velocity of the angle between back right links | -Inf | Inf | ankle_4 (right_back_leg) | hinge | angle (rad) | + + + The remaining 14*6 = 84 elements of the observation are contact forces + (external forces - force x, y, z and torque x, y, z) applied to the + center of mass of each of the links. The 14 links are: the ground link, + the torso link, and 3 links for each leg (1 + 1 + 12) with the 6 external forces. + + The (x,y,z) coordinates are translational DOFs while the orientations are rotational + DOFs expressed as quaternions. One can read more about free joints on the [Mujoco Documentation](https://mujoco.readthedocs.io/en/latest/XMLreference.html). + + + **Note:** Ant-v4 environment no longer has the following contact forces issue. + If using previous Humanoid versions from v4, there have been reported issues that using a Mujoco-Py version > 2.0 results + in the contact forces always being 0. As such we recommend to use a Mujoco-Py version < 2.0 + when using the Ant environment if you would like to report results with contact forces (if + contact forces are not used in your experiments, you can use version > 2.0). + + ### Rewards + The reward consists of three parts: + - *healthy_reward*: Every timestep that the ant is healthy (see definition in section "Episode Termination"), it gets a reward of fixed value `healthy_reward` + - *forward_reward*: A reward of moving forward which is measured as + *(x-coordinate before action - x-coordinate after action)/dt*. *dt* is the time + between actions and is dependent on the `frame_skip` parameter (default is 5), + where the frametime is 0.01 - making the default *dt = 5 * 0.01 = 0.05*. + This reward would be positive if the ant moves forward (in positive x direction). + - *ctrl_cost*: A negative reward for penalising the ant if it takes actions + that are too large. It is measured as *`ctrl_cost_weight` * sum(action2)* + where *`ctr_cost_weight`* is a parameter set for the control and has a default value of 0.5. + - *contact_cost*: A negative reward for penalising the ant if the external contact + force is too large. It is calculated *`contact_cost_weight` * sum(clip(external contact + force to `contact_force_range`)2)*. + + The total reward returned is ***reward*** *=* *healthy_reward + forward_reward - ctrl_cost - contact_cost* and `info` will also contain the individual reward terms. + + ### Starting State + All observations start in state + (0.0, 0.0, 0.75, 1.0, 0.0 ... 0.0) with a uniform noise in the range + of [-`reset_noise_scale`, `reset_noise_scale`] added to the positional values and standard normal noise + with mean 0 and standard deviation `reset_noise_scale` added to the velocity values for + stochasticity. Note that the initial z coordinate is intentionally selected + to be slightly high, thereby indicating a standing up ant. The initial orientation + is designed to make it face forward as well. + + ### Episode End + The ant is said to be unhealthy if any of the following happens: + + 1. Any of the state space values is no longer finite + 2. The z-coordinate of the torso is **not** in the closed interval given by `healthy_z_range` (defaults to [0.2, 1.0]) + + If `terminate_when_unhealthy=True` is passed during construction (which is the default), + the episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 1000 timesteps + 2. Termination: The ant is unhealthy + + If `terminate_when_unhealthy=False` is passed, the episode is ended only when 1000 timesteps are exceeded. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + env = gym.make('Ant-v2') + ``` + + v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('Ant-v4', ctrl_cost_weight=0.1, ...) + ``` + + | Parameter | Type | Default |Description | + |-------------------------|------------|--------------|-------------------------------| + | `xml_file` | **str** | `"ant.xml"` | Path to a MuJoCo model | + | `ctrl_cost_weight` | **float** | `0.5` | Weight for *ctrl_cost* term (see section on reward) | + | `contact_cost_weight` | **float** | `5e-4` | Weight for *contact_cost* term (see section on reward) | + | `healthy_reward` | **float** | `1` | Constant reward given if the ant is "healthy" after timestep | + | `terminate_when_unhealthy` | **bool**| `True` | If true, issue a done signal if the z-coordinate of the torso is no longer in the `healthy_z_range` | + | `healthy_z_range` | **tuple** | `(0.2, 1)` | The ant is considered healthy if the z-coordinate of the torso is in this range | + | `contact_force_range` | **tuple** | `(-1, 1)` | Contact forces are clipped to this range in the computation of *contact_cost* | + | `reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation`| **bool** | `True`| Whether or not to omit the x- and y-coordinates from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + ### Version History + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__( + self, + xml_file="ant.xml", + ctrl_cost_weight=0.5, + use_contact_forces=False, + contact_cost_weight=5e-4, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_z_range=(0.2, 1.0), + contact_force_range=(-1.0, 1.0), + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + ctrl_cost_weight, + use_contact_forces, + contact_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + contact_force_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._ctrl_cost_weight = ctrl_cost_weight + self._contact_cost_weight = contact_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + self._healthy_z_range = healthy_z_range + + self._contact_force_range = contact_force_range + + self._reset_noise_scale = reset_noise_scale + + self._use_contact_forces = use_contact_forces + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + obs_shape = 27 + if not exclude_current_positions_from_observation: + obs_shape += 2 + if use_contact_forces: + obs_shape += 84 + + observation_space = Box( + low=-np.inf, high=np.inf, shape=(obs_shape,), dtype=np.float64 + ) + + MujocoEnv.__init__( + self, xml_file, 5, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def contact_forces(self): + raw_contact_forces = self.data.cfrc_ext + min_value, max_value = self._contact_force_range + contact_forces = np.clip(raw_contact_forces, min_value, max_value) + return contact_forces + + @property + def contact_cost(self): + contact_cost = self._contact_cost_weight * np.sum( + np.square(self.contact_forces) + ) + return contact_cost + + @property + def is_healthy(self): + state = self.state_vector() + min_z, max_z = self._healthy_z_range + is_healthy = np.isfinite(state).all() and min_z <= state[2] <= max_z + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def step(self, action): + xy_position_before = self.get_body_com("torso")[:2].copy() + self.do_simulation(action, self.frame_skip) + xy_position_after = self.get_body_com("torso")[:2].copy() + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + forward_reward = x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + + costs = ctrl_cost = self.control_cost(action) + + terminated = self.terminated + observation = self._get_obs() + info = { + "reward_forward": forward_reward, + "reward_ctrl": -ctrl_cost, + "reward_survive": healthy_reward, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + if self._use_contact_forces: + contact_cost = self.contact_cost + costs += contact_cost + info["reward_ctrl"] = -contact_cost + + reward = rewards - costs + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = self.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + if self._use_contact_forces: + contact_force = self.contact_forces.flat.copy() + return np.concatenate((position, velocity, contact_force)) + else: + return np.concatenate((position, velocity)) + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = ( + self.init_qvel + + self._reset_noise_scale * self.np_random.standard_normal(self.model.nv) + ) + self.set_state(qpos, qvel) + + observation = self._get_obs() + + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/ant.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/ant.xml new file mode 100644 index 0000000000000000000000000000000000000000..ee4d679981c136b949ce278f39ddbd16e42e5de3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/ant.xml @@ -0,0 +1,81 @@ + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/half_cheetah.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/half_cheetah.xml new file mode 100644 index 0000000000000000000000000000000000000000..338c2e87a705c425ab7c2ff733d5c40c96fcc918 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/half_cheetah.xml @@ -0,0 +1,96 @@ + + + + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/hopper.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/hopper.xml new file mode 100644 index 0000000000000000000000000000000000000000..34803032fa5a4bf94534227718836f91c4694f79 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/hopper.xml @@ -0,0 +1,48 @@ + + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoid.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoid.xml new file mode 100644 index 0000000000000000000000000000000000000000..c8d5fc61a1829cfef4f1ef1fc752dc5d4fc3c5d3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoid.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoidstandup.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoidstandup.xml new file mode 100644 index 0000000000000000000000000000000000000000..8dd36ba87a851290ebf4c42d58cfbd65eb54e268 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/humanoidstandup.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_double_pendulum.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_double_pendulum.xml new file mode 100644 index 0000000000000000000000000000000000000000..a274e8c5d5b7e13127c52d7cb1dcdff80fbe2222 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_double_pendulum.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_pendulum.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_pendulum.xml new file mode 100644 index 0000000000000000000000000000000000000000..85145042985eb4a1015cd253c84a3bdc5afb9944 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/inverted_pendulum.xml @@ -0,0 +1,27 @@ + + + + + + + + + \ No newline at end of file diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/point.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/point.xml new file mode 100644 index 0000000000000000000000000000000000000000..e35ef3de807585cf5dc586990b608ec1ef30c056 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/point.xml @@ -0,0 +1,31 @@ + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/pusher.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/pusher.xml new file mode 100644 index 0000000000000000000000000000000000000000..31a5ef706065f42c6fa62201edf800cc005b50e7 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/pusher.xml @@ -0,0 +1,91 @@ + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/reacher.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/reacher.xml new file mode 100644 index 0000000000000000000000000000000000000000..151d1f851135eab45c9bce6217bfaa95f8766405 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/reacher.xml @@ -0,0 +1,39 @@ + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/swimmer.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/swimmer.xml new file mode 100644 index 0000000000000000000000000000000000000000..a12287722b9d82cfd9a37636bf93637a63df075f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/swimmer.xml @@ -0,0 +1,39 @@ + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/assets/walker2d.xml b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/walker2d.xml new file mode 100644 index 0000000000000000000000000000000000000000..254ffedbb06edd8eebba2c92ebe150bc34c95328 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/assets/walker2d.xml @@ -0,0 +1,62 @@ + + + + + + + diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah.py b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah.py new file mode 100644 index 0000000000000000000000000000000000000000..d29422a1a50c3ba5d85a8e785536804d8a8c6654 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah.py @@ -0,0 +1,64 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class HalfCheetahEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "half_cheetah.xml", 5, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, action): + xposbefore = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + xposafter = self.sim.data.qpos[0] + + ob = self._get_obs() + reward_ctrl = -0.1 * np.square(action).sum() + reward_run = (xposafter - xposbefore) / self.dt + reward = reward_ctrl + reward_run + terminated = False + + if self.render_mode == "human": + self.render() + return ( + ob, + reward, + terminated, + False, + dict(reward_run=reward_run, reward_ctrl=reward_ctrl), + ) + + def _get_obs(self): + return np.concatenate( + [ + self.sim.data.qpos.flat[1:], + self.sim.data.qvel.flat, + ] + ) + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + low=-0.1, high=0.1, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1 + self.set_state(qpos, qvel) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.distance = self.model.stat.extent * 0.5 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..665ad5fcca5de978965f24ef7d5a0c90251b890a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v3.py @@ -0,0 +1,127 @@ +__credits__ = ["Rushiv Arora"] + +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "distance": 4.0, +} + + +class HalfCheetahEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__( + self, + xml_file="half_cheetah.xml", + forward_reward_weight=1.0, + ctrl_cost_weight=0.1, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + forward_reward_weight, + ctrl_cost_weight, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 5, observation_space=observation_space, **kwargs + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + terminated = False + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + "reward_run": forward_reward, + "reward_ctrl": -ctrl_cost, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = ( + self.init_qvel + + self._reset_noise_scale * self.np_random.standard_normal(self.model.nv) + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..b30c2861b1df43ddda8154af283d54e529701ee9 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/half_cheetah_v4.py @@ -0,0 +1,245 @@ +__credits__ = ["Rushiv Arora"] + +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "distance": 4.0, +} + + +class HalfCheetahEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is based on the work by P. Wawrzyński in + ["A Cat-Like Robot Real-Time Learning to Run"](http://staff.elka.pw.edu.pl/~pwawrzyn/pub-s/0812_LSCLRR.pdf). + The HalfCheetah is a 2-dimensional robot consisting of 9 links and 8 + joints connecting them (including two paws). The goal is to apply a torque + on the joints to make the cheetah run forward (right) as fast as possible, + with a positive reward allocated based on the distance moved forward and a + negative reward allocated for moving backward. The torso and head of the + cheetah are fixed, and the torque can only be applied on the other 6 joints + over the front and back thighs (connecting to the torso), shins + (connecting to the thighs) and feet (connecting to the shins). + + ### Action Space + The action space is a `Box(-1, 1, (6,), float32)`. An action represents the torques applied between *links*. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + | --- | --------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ | + | 0 | Torque applied on the back thigh rotor | -1 | 1 | bthigh | hinge | torque (N m) | + | 1 | Torque applied on the back shin rotor | -1 | 1 | bshin | hinge | torque (N m) | + | 2 | Torque applied on the back foot rotor | -1 | 1 | bfoot | hinge | torque (N m) | + | 3 | Torque applied on the front thigh rotor | -1 | 1 | fthigh | hinge | torque (N m) | + | 4 | Torque applied on the front shin rotor | -1 | 1 | fshin | hinge | torque (N m) | + | 5 | Torque applied on the front foot rotor | -1 | 1 | ffoot | hinge | torque (N m) | + + + ### Observation Space + + Observations consist of positional values of different body parts of the + cheetah, followed by the velocities of those individual parts (their derivatives) with all the positions ordered before all the velocities. + + By default, observations do not include the x-coordinate of the cheetah's center of mass. It may + be included by passing `exclude_current_positions_from_observation=False` during construction. + In that case, the observation space will have 18 dimensions where the first dimension + represents the x-coordinate of the cheetah's center of mass. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x-coordinate + will be returned in `info` with key `"x_position"`. + + However, by default, the observation is a `ndarray` with shape `(17,)` where the elements correspond to the following: + + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | z-coordinate of the front tip | -Inf | Inf | rootz | slide | position (m) | + | 1 | angle of the front tip | -Inf | Inf | rooty | hinge | angle (rad) | + | 2 | angle of the second rotor | -Inf | Inf | bthigh | hinge | angle (rad) | + | 3 | angle of the second rotor | -Inf | Inf | bshin | hinge | angle (rad) | + | 4 | velocity of the tip along the x-axis | -Inf | Inf | bfoot | hinge | angle (rad) | + | 5 | velocity of the tip along the y-axis | -Inf | Inf | fthigh | hinge | angle (rad) | + | 6 | angular velocity of front tip | -Inf | Inf | fshin | hinge | angle (rad) | + | 7 | angular velocity of second rotor | -Inf | Inf | ffoot | hinge | angle (rad) | + | 8 | x-coordinate of the front tip | -Inf | Inf | rootx | slide | velocity (m/s) | + | 9 | y-coordinate of the front tip | -Inf | Inf | rootz | slide | velocity (m/s) | + | 10 | angle of the front tip | -Inf | Inf | rooty | hinge | angular velocity (rad/s) | + | 11 | angle of the second rotor | -Inf | Inf | bthigh | hinge | angular velocity (rad/s) | + | 12 | angle of the second rotor | -Inf | Inf | bshin | hinge | angular velocity (rad/s) | + | 13 | velocity of the tip along the x-axis | -Inf | Inf | bfoot | hinge | angular velocity (rad/s) | + | 14 | velocity of the tip along the y-axis | -Inf | Inf | fthigh | hinge | angular velocity (rad/s) | + | 15 | angular velocity of front tip | -Inf | Inf | fshin | hinge | angular velocity (rad/s) | + | 16 | angular velocity of second rotor | -Inf | Inf | ffoot | hinge | angular velocity (rad/s) | + + ### Rewards + The reward consists of two parts: + - *forward_reward*: A reward of moving forward which is measured + as *`forward_reward_weight` * (x-coordinate before action - x-coordinate after action)/dt*. *dt* is + the time between actions and is dependent on the frame_skip parameter + (fixed to 5), where the frametime is 0.01 - making the + default *dt = 5 * 0.01 = 0.05*. This reward would be positive if the cheetah + runs forward (right). + - *ctrl_cost*: A cost for penalising the cheetah if it takes + actions that are too large. It is measured as *`ctrl_cost_weight` * + sum(action2)* where *`ctrl_cost_weight`* is a parameter set for the + control and has a default value of 0.1 + + The total reward returned is ***reward*** *=* *forward_reward - ctrl_cost* and `info` will also contain the individual reward terms + + ### Starting State + All observations start in state (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,) with a noise added to the + initial state for stochasticity. As seen before, the first 8 values in the + state are positional and the last 9 values are velocity. A uniform noise in + the range of [-`reset_noise_scale`, `reset_noise_scale`] is added to the positional values while a standard + normal noise with a mean of 0 and standard deviation of `reset_noise_scale` is added to the + initial velocity values of all zeros. + + ### Episode End + The episode truncates when the episode length is greater than 1000. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + env = gym.make('HalfCheetah-v2') + ``` + + v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('HalfCheetah-v4', ctrl_cost_weight=0.1, ....) + ``` + + | Parameter | Type | Default | Description | + | -------------------------------------------- | --------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `xml_file` | **str** | `"half_cheetah.xml"` | Path to a MuJoCo model | + | `forward_reward_weight` | **float** | `1.0` | Weight for _forward_reward_ term (see section on reward) | + | `ctrl_cost_weight` | **float** | `0.1` | Weight for _ctrl_cost_ weight (see section on reward) | + | `reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__( + self, + forward_reward_weight=1.0, + ctrl_cost_weight=0.1, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + forward_reward_weight, + ctrl_cost_weight, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64 + ) + + MujocoEnv.__init__( + self, "half_cheetah.xml", 5, observation_space=observation_space, **kwargs + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + terminated = False + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + "reward_run": forward_reward, + "reward_ctrl": -ctrl_cost, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = self.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = ( + self.init_qvel + + self._reset_noise_scale * self.np_random.standard_normal(self.model.nv) + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/hopper.py b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper.py new file mode 100644 index 0000000000000000000000000000000000000000..315f605c448333ee9ada85fa4b634c8557fe50a9 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper.py @@ -0,0 +1,67 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class HopperEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "hopper.xml", 4, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, a): + posbefore = self.sim.data.qpos[0] + self.do_simulation(a, self.frame_skip) + posafter, height, ang = self.sim.data.qpos[0:3] + + alive_bonus = 1.0 + reward = (posafter - posbefore) / self.dt + reward += alive_bonus + reward -= 1e-3 * np.square(a).sum() + s = self.state_vector() + terminated = not ( + np.isfinite(s).all() + and (np.abs(s[2:]) < 100).all() + and (height > 0.7) + and (abs(ang) < 0.2) + ) + ob = self._get_obs() + + if self.render_mode == "human": + self.render() + return ob, reward, terminated, False, {} + + def _get_obs(self): + return np.concatenate( + [self.sim.data.qpos.flat[1:], np.clip(self.sim.data.qvel.flat, -10, 10)] + ) + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nv + ) + self.set_state(qpos, qvel) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 2 + self.viewer.cam.distance = self.model.stat.extent * 0.75 + self.viewer.cam.lookat[2] = 1.15 + self.viewer.cam.elevation = -20 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..e31fc06a7864ffcf1c5563dd90307d7788507aab --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v3.py @@ -0,0 +1,177 @@ +__credits__ = ["Rushiv Arora"] + +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 2, + "distance": 3.0, + "lookat": np.array((0.0, 0.0, 1.15)), + "elevation": -20.0, +} + + +class HopperEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__( + self, + xml_file="hopper.xml", + forward_reward_weight=1.0, + ctrl_cost_weight=1e-3, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_state_range=(-100.0, 100.0), + healthy_z_range=(0.7, float("inf")), + healthy_angle_range=(-0.2, 0.2), + reset_noise_scale=5e-3, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + forward_reward_weight, + ctrl_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_state_range, + healthy_z_range, + healthy_angle_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + + self._healthy_state_range = healthy_state_range + self._healthy_z_range = healthy_z_range + self._healthy_angle_range = healthy_angle_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(12,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 4, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def is_healthy(self): + z, angle = self.sim.data.qpos[1:3] + state = self.state_vector()[2:] + + min_state, max_state = self._healthy_state_range + min_z, max_z = self._healthy_z_range + min_angle, max_angle = self._healthy_angle_range + + healthy_state = np.all(np.logical_and(min_state < state, state < max_state)) + healthy_z = min_z < z < max_z + healthy_angle = min_angle < angle < max_angle + + is_healthy = all((healthy_state, healthy_z, healthy_angle)) + + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10) + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + + observation = self._get_obs() + reward = rewards - costs + terminated = self.terminated + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..b3490b7aa13fa75dcd77f204a94628ef76f4bb60 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/hopper_v4.py @@ -0,0 +1,298 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 2, + "distance": 3.0, + "lookat": np.array((0.0, 0.0, 1.15)), + "elevation": -20.0, +} + + +class HopperEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is based on the work done by Erez, Tassa, and Todorov in + ["Infinite Horizon Model Predictive Control for Nonlinear Periodic Tasks"](http://www.roboticsproceedings.org/rss07/p10.pdf). The environment aims to + increase the number of independent state and control variables as compared to + the classic control environments. The hopper is a two-dimensional + one-legged figure that consist of four main body parts - the torso at the + top, the thigh in the middle, the leg in the bottom, and a single foot on + which the entire body rests. The goal is to make hops that move in the + forward (right) direction by applying torques on the three hinges + connecting the four body parts. + + ### Action Space + The action space is a `Box(-1, 1, (3,), float32)`. An action represents the torques applied between *links* + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|------------------------------------|-------------|-------------|----------------------------------|-------|--------------| + | 0 | Torque applied on the thigh rotor | -1 | 1 | thigh_joint | hinge | torque (N m) | + | 1 | Torque applied on the leg rotor | -1 | 1 | leg_joint | hinge | torque (N m) | + | 3 | Torque applied on the foot rotor | -1 | 1 | foot_joint | hinge | torque (N m) | + + ### Observation Space + + Observations consist of positional values of different body parts of the + hopper, followed by the velocities of those individual parts + (their derivatives) with all the positions ordered before all the velocities. + + By default, observations do not include the x-coordinate of the hopper. It may + be included by passing `exclude_current_positions_from_observation=False` during construction. + In that case, the observation space will have 12 dimensions where the first dimension + represents the x-coordinate of the hopper. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x-coordinate + will be returned in `info` with key `"x_position"`. + + However, by default, the observation is a `ndarray` with shape `(11,)` where the elements + correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ------------------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | z-coordinate of the top (height of hopper) | -Inf | Inf | rootz | slide | position (m) | + | 1 | angle of the top | -Inf | Inf | rooty | hinge | angle (rad) | + | 2 | angle of the thigh joint | -Inf | Inf | thigh_joint | hinge | angle (rad) | + | 3 | angle of the leg joint | -Inf | Inf | leg_joint | hinge | angle (rad) | + | 4 | angle of the foot joint | -Inf | Inf | foot_joint | hinge | angle (rad) | + | 5 | velocity of the x-coordinate of the top | -Inf | Inf | rootx | slide | velocity (m/s) | + | 6 | velocity of the z-coordinate (height) of the top | -Inf | Inf | rootz | slide | velocity (m/s) | + | 7 | angular velocity of the angle of the top | -Inf | Inf | rooty | hinge | angular velocity (rad/s) | + | 8 | angular velocity of the thigh hinge | -Inf | Inf | thigh_joint | hinge | angular velocity (rad/s) | + | 9 | angular velocity of the leg hinge | -Inf | Inf | leg_joint | hinge | angular velocity (rad/s) | + | 10 | angular velocity of the foot hinge | -Inf | Inf | foot_joint | hinge | angular velocity (rad/s) | + + + ### Rewards + The reward consists of three parts: + - *healthy_reward*: Every timestep that the hopper is healthy (see definition in section "Episode Termination"), it gets a reward of fixed value `healthy_reward`. + - *forward_reward*: A reward of hopping forward which is measured + as *`forward_reward_weight` * (x-coordinate before action - x-coordinate after action)/dt*. *dt* is + the time between actions and is dependent on the frame_skip parameter + (fixed to 4), where the frametime is 0.002 - making the + default *dt = 4 * 0.002 = 0.008*. This reward would be positive if the hopper + hops forward (positive x direction). + - *ctrl_cost*: A cost for penalising the hopper if it takes + actions that are too large. It is measured as *`ctrl_cost_weight` * + sum(action2)* where *`ctrl_cost_weight`* is a parameter set for the + control and has a default value of 0.001 + + The total reward returned is ***reward*** *=* *healthy_reward + forward_reward - ctrl_cost* and `info` will also contain the individual reward terms + + ### Starting State + All observations start in state + (0.0, 1.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) with a uniform noise + in the range of [-`reset_noise_scale`, `reset_noise_scale`] added to the values for stochasticity. + + ### Episode End + The hopper is said to be unhealthy if any of the following happens: + + 1. An element of `observation[1:]` (if `exclude_current_positions_from_observation=True`, else `observation[2:]`) is no longer contained in the closed interval specified by the argument `healthy_state_range` + 2. The height of the hopper (`observation[0]` if `exclude_current_positions_from_observation=True`, else `observation[1]`) is no longer contained in the closed interval specified by the argument `healthy_z_range` (usually meaning that it has fallen) + 3. The angle (`observation[1]` if `exclude_current_positions_from_observation=True`, else `observation[2]`) is no longer contained in the closed interval specified by the argument `healthy_angle_range` + + If `terminate_when_unhealthy=True` is passed during construction (which is the default), + the episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 1000 timesteps + 2. Termination: The hopper is unhealthy + + If `terminate_when_unhealthy=False` is passed, the episode is ended only when 1000 timesteps are exceeded. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + env = gym.make('Hopper-v2') + ``` + + v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('Hopper-v4', ctrl_cost_weight=0.1, ....) + ``` + + | Parameter | Type | Default | Description | + | -------------------------------------------- | --------- | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `xml_file` | **str** | `"hopper.xml"` | Path to a MuJoCo model | + | `forward_reward_weight` | **float** | `1.0` | Weight for _forward_reward_ term (see section on reward) | + | `ctrl_cost_weight` | **float** | `0.001` | Weight for _ctrl_cost_ reward (see section on reward) | + | `healthy_reward` | **float** | `1` | Constant reward given if the ant is "healthy" after timestep | + | `terminate_when_unhealthy` | **bool** | `True` | If true, issue a done signal if the hopper is no longer healthy | + | `healthy_state_range` | **tuple** | `(-100, 100)` | The elements of `observation[1:]` (if `exclude_current_positions_from_observation=True`, else `observation[2:]`) must be in this range for the hopper to be considered healthy | + | `healthy_z_range` | **tuple** | `(0.7, float("inf"))` | The z-coordinate must be in this range for the hopper to be considered healthy | + | `healthy_angle_range` | **tuple** | `(-0.2, 0.2)` | The angle given by `observation[1]` (if `exclude_current_positions_from_observation=True`, else `observation[2]`) must be in this range for the hopper to be considered healthy | + | `reset_noise_scale` | **float** | `5e-3` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__( + self, + forward_reward_weight=1.0, + ctrl_cost_weight=1e-3, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_state_range=(-100.0, 100.0), + healthy_z_range=(0.7, float("inf")), + healthy_angle_range=(-0.2, 0.2), + reset_noise_scale=5e-3, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + forward_reward_weight, + ctrl_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_state_range, + healthy_z_range, + healthy_angle_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + + self._healthy_state_range = healthy_state_range + self._healthy_z_range = healthy_z_range + self._healthy_angle_range = healthy_angle_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(12,), dtype=np.float64 + ) + + MujocoEnv.__init__( + self, "hopper.xml", 4, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def is_healthy(self): + z, angle = self.data.qpos[1:3] + state = self.state_vector()[2:] + + min_state, max_state = self._healthy_state_range + min_z, max_z = self._healthy_z_range + min_angle, max_angle = self._healthy_angle_range + + healthy_state = np.all(np.logical_and(min_state < state, state < max_state)) + healthy_z = min_z < z < max_z + healthy_angle = min_angle < angle < max_angle + + is_healthy = all((healthy_state, healthy_z, healthy_angle)) + + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = np.clip(self.data.qvel.flat.copy(), -10, 10) + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def step(self, action): + x_position_before = self.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + + observation = self._get_obs() + reward = rewards - costs + terminated = self.terminated + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid.py b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid.py new file mode 100644 index 0000000000000000000000000000000000000000..5dde40a0a267c948724f157044e3f0cd896bf572 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid.py @@ -0,0 +1,94 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +def mass_center(model, sim): + mass = np.expand_dims(model.body_mass, 1) + xpos = sim.data.xipos + return (np.sum(mass * xpos, 0) / np.sum(mass))[0] + + +class HumanoidEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 67, + } + + def __init__(self, **kwargs): + observation_space = Box( + low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64 + ) + MuJocoPyEnv.__init__( + self, "humanoid.xml", 5, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def _get_obs(self): + data = self.sim.data + return np.concatenate( + [ + data.qpos.flat[2:], + data.qvel.flat, + data.cinert.flat, + data.cvel.flat, + data.qfrc_actuator.flat, + data.cfrc_ext.flat, + ] + ) + + def step(self, a): + pos_before = mass_center(self.model, self.sim) + self.do_simulation(a, self.frame_skip) + pos_after = mass_center(self.model, self.sim) + + alive_bonus = 5.0 + data = self.sim.data + lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt + quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() + quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum() + quad_impact_cost = min(quad_impact_cost, 10) + reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus + qpos = self.sim.data.qpos + terminated = bool((qpos[2] < 1.0) or (qpos[2] > 2.0)) + + if self.render_mode == "human": + self.render() + return ( + self._get_obs(), + reward, + terminated, + False, + dict( + reward_linvel=lin_vel_cost, + reward_quadctrl=-quad_ctrl_cost, + reward_alive=alive_bonus, + reward_impact=-quad_impact_cost, + ), + ) + + def reset_model(self): + c = 0.01 + self.set_state( + self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), + self.init_qvel + + self.np_random.uniform( + low=-c, + high=c, + size=self.model.nv, + ), + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 1 + self.viewer.cam.distance = self.model.stat.extent * 1.0 + self.viewer.cam.lookat[2] = 2.0 + self.viewer.cam.elevation = -20 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..36e53bf180214fa1f1eea34862f2313227c3a7ed --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v3.py @@ -0,0 +1,199 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 1, + "distance": 4.0, + "lookat": np.array((0.0, 0.0, 2.0)), + "elevation": -20.0, +} + + +def mass_center(model, sim): + mass = np.expand_dims(model.body_mass, axis=1) + xpos = sim.data.xipos + return (np.sum(mass * xpos, axis=0) / np.sum(mass))[0:2].copy() + + +class HumanoidEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 67, + } + + def __init__( + self, + xml_file="humanoid.xml", + forward_reward_weight=1.25, + ctrl_cost_weight=0.1, + contact_cost_weight=5e-7, + contact_cost_range=(-np.inf, 10.0), + healthy_reward=5.0, + terminate_when_unhealthy=True, + healthy_z_range=(1.0, 2.0), + reset_noise_scale=1e-2, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + forward_reward_weight, + ctrl_cost_weight, + contact_cost_weight, + contact_cost_range, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + self._contact_cost_weight = contact_cost_weight + self._contact_cost_range = contact_cost_range + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + self._healthy_z_range = healthy_z_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(378,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 5, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(self.sim.data.ctrl)) + return control_cost + + @property + def contact_cost(self): + contact_forces = self.sim.data.cfrc_ext + contact_cost = self._contact_cost_weight * np.sum(np.square(contact_forces)) + min_cost, max_cost = self._contact_cost_range + contact_cost = np.clip(contact_cost, min_cost, max_cost) + return contact_cost + + @property + def is_healthy(self): + min_z, max_z = self._healthy_z_range + is_healthy = min_z < self.sim.data.qpos[2] < max_z + + return is_healthy + + @property + def terminated(self): + terminated = (not self.is_healthy) if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + com_inertia = self.sim.data.cinert.flat.copy() + com_velocity = self.sim.data.cvel.flat.copy() + + actuator_forces = self.sim.data.qfrc_actuator.flat.copy() + external_contact_forces = self.sim.data.cfrc_ext.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + return np.concatenate( + ( + position, + velocity, + com_inertia, + com_velocity, + actuator_forces, + external_contact_forces, + ) + ) + + def step(self, action): + xy_position_before = mass_center(self.model, self.sim) + self.do_simulation(action, self.frame_skip) + xy_position_after = mass_center(self.model, self.sim) + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + ctrl_cost = self.control_cost(action) + contact_cost = self.contact_cost + + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + contact_cost + + observation = self._get_obs() + reward = rewards - costs + terminated = self.terminated + info = { + "reward_linvel": forward_reward, + "reward_quadctrl": -ctrl_cost, + "reward_alive": healthy_reward, + "reward_impact": -contact_cost, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..add57a02fe00af47c0a59661f6779feef0e6fa44 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoid_v4.py @@ -0,0 +1,374 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 1, + "distance": 4.0, + "lookat": np.array((0.0, 0.0, 2.0)), + "elevation": -20.0, +} + + +def mass_center(model, data): + mass = np.expand_dims(model.body_mass, axis=1) + xpos = data.xipos + return (np.sum(mass * xpos, axis=0) / np.sum(mass))[0:2].copy() + + +class HumanoidEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is based on the environment introduced by Tassa, Erez and Todorov + in ["Synthesis and stabilization of complex behaviors through online trajectory optimization"](https://ieeexplore.ieee.org/document/6386025). + The 3D bipedal robot is designed to simulate a human. It has a torso (abdomen) with a pair of + legs and arms. The legs each consist of two links, and so the arms (representing the knees and + elbows respectively). The goal of the environment is to walk forward as fast as possible without falling over. + + ### Action Space + The action space is a `Box(-1, 1, (17,), float32)`. An action represents the torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|----------------------|---------------|----------------|---------------------------------------|-------|------| + | 0 | Torque applied on the hinge in the y-coordinate of the abdomen | -0.4 | 0.4 | hip_1 (front_left_leg) | hinge | torque (N m) | + | 1 | Torque applied on the hinge in the z-coordinate of the abdomen | -0.4 | 0.4 | angle_1 (front_left_leg) | hinge | torque (N m) | + | 2 | Torque applied on the hinge in the x-coordinate of the abdomen | -0.4 | 0.4 | hip_2 (front_right_leg) | hinge | torque (N m) | + | 3 | Torque applied on the rotor between torso/abdomen and the right hip (x-coordinate) | -0.4 | 0.4 | right_hip_x (right_thigh) | hinge | torque (N m) | + | 4 | Torque applied on the rotor between torso/abdomen and the right hip (z-coordinate) | -0.4 | 0.4 | right_hip_z (right_thigh) | hinge | torque (N m) | + | 5 | Torque applied on the rotor between torso/abdomen and the right hip (y-coordinate) | -0.4 | 0.4 | right_hip_y (right_thigh) | hinge | torque (N m) | + | 6 | Torque applied on the rotor between the right hip/thigh and the right shin | -0.4 | 0.4 | right_knee | hinge | torque (N m) | + | 7 | Torque applied on the rotor between torso/abdomen and the left hip (x-coordinate) | -0.4 | 0.4 | left_hip_x (left_thigh) | hinge | torque (N m) | + | 8 | Torque applied on the rotor between torso/abdomen and the left hip (z-coordinate) | -0.4 | 0.4 | left_hip_z (left_thigh) | hinge | torque (N m) | + | 9 | Torque applied on the rotor between torso/abdomen and the left hip (y-coordinate) | -0.4 | 0.4 | left_hip_y (left_thigh) | hinge | torque (N m) | + | 10 | Torque applied on the rotor between the left hip/thigh and the left shin | -0.4 | 0.4 | left_knee | hinge | torque (N m) | + | 11 | Torque applied on the rotor between the torso and right upper arm (coordinate -1) | -0.4 | 0.4 | right_shoulder1 | hinge | torque (N m) | + | 12 | Torque applied on the rotor between the torso and right upper arm (coordinate -2) | -0.4 | 0.4 | right_shoulder2 | hinge | torque (N m) | + | 13 | Torque applied on the rotor between the right upper arm and right lower arm | -0.4 | 0.4 | right_elbow | hinge | torque (N m) | + | 14 | Torque applied on the rotor between the torso and left upper arm (coordinate -1) | -0.4 | 0.4 | left_shoulder1 | hinge | torque (N m) | + | 15 | Torque applied on the rotor between the torso and left upper arm (coordinate -2) | -0.4 | 0.4 | left_shoulder2 | hinge | torque (N m) | + | 16 | Torque applied on the rotor between the left upper arm and left lower arm | -0.4 | 0.4 | left_elbow | hinge | torque (N m) | + + ### Observation Space + + Observations consist of positional values of different body parts of the Humanoid, + followed by the velocities of those individual parts (their derivatives) with all the + positions ordered before all the velocities. + + By default, observations do not include the x- and y-coordinates of the torso. These may + be included by passing `exclude_current_positions_from_observation=False` during construction. + In that case, the observation space will have 378 dimensions where the first two dimensions + represent the x- and y-coordinates of the torso. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x- and y-coordinates + will be returned in `info` with keys `"x_position"` and `"y_position"`, respectively. + + However, by default, the observation is a `ndarray` with shape `(376,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | --------------------------------------------------------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | -------------------------- | + | 0 | z-coordinate of the torso (centre) | -Inf | Inf | root | free | position (m) | + | 1 | x-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 2 | y-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 3 | z-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 4 | w-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 5 | z-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | angle (rad) | + | 6 | y-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | angle (rad) | + | 7 | x-angle of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | angle (rad) | + | 8 | x-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | angle (rad) | + | 9 | z-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | angle (rad) | + | 19 | y-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | angle (rad) | + | 11 | angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | angle (rad) | + | 12 | x-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | angle (rad) | + | 13 | z-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | angle (rad) | + | 14 | y-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | angle (rad) | + | 15 | angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | angle (rad) | + | 16 | coordinate-1 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | angle (rad) | + | 17 | coordinate-2 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | angle (rad) | + | 18 | angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | angle (rad) | + | 19 | coordinate-1 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | angle (rad) | + | 20 | coordinate-2 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | angle (rad) | + | 21 | angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | angle (rad) | + | 22 | x-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 23 | y-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 24 | z-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 25 | x-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 26 | y-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 27 | z-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 28 | z-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | anglular velocity (rad/s) | + | 29 | y-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | anglular velocity (rad/s) | + | 30 | x-coordinate of angular velocity of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | aanglular velocity (rad/s) | + | 31 | x-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | anglular velocity (rad/s) | + | 32 | z-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | anglular velocity (rad/s) | + | 33 | y-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | anglular velocity (rad/s) | + | 34 | angular velocity of the angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | anglular velocity (rad/s) | + | 35 | x-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | anglular velocity (rad/s) | + | 36 | z-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | anglular velocity (rad/s) | + | 37 | y-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | anglular velocity (rad/s) | + | 38 | angular velocity of the angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | anglular velocity (rad/s) | + | 39 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | anglular velocity (rad/s) | + | 40 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | anglular velocity (rad/s) | + | 41 | angular velocity of the angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | anglular velocity (rad/s) | + | 42 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | anglular velocity (rad/s) | + | 43 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | anglular velocity (rad/s) | + | 44 | angular velocitty of the angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | anglular velocity (rad/s) | + + Additionally, after all the positional and velocity based values in the table, + the observation contains (in order): + - *cinert:* Mass and inertia of a single rigid body relative to the center of mass + (this is an intermediate result of transition). It has shape 14*10 (*nbody * 10*) + and hence adds to another 140 elements in the state space. + - *cvel:* Center of mass based velocity. It has shape 14 * 6 (*nbody * 6*) and hence + adds another 84 elements in the state space + - *qfrc_actuator:* Constraint force generated as the actuator force. This has shape + `(23,)` *(nv * 1)* and hence adds another 23 elements to the state space. + - *cfrc_ext:* This is the center of mass based external force on the body. It has shape + 14 * 6 (*nbody * 6*) and hence adds to another 84 elements in the state space. + where *nbody* stands for the number of bodies in the robot and *nv* stands for the + number of degrees of freedom (*= dim(qvel)*) + + The (x,y,z) coordinates are translational DOFs while the orientations are rotational + DOFs expressed as quaternions. One can read more about free joints on the + [Mujoco Documentation](https://mujoco.readthedocs.io/en/latest/XMLreference.html). + + **Note:** Humanoid-v4 environment no longer has the following contact forces issue. + If using previous Humanoid versions from v4, there have been reported issues that using a Mujoco-Py version > 2.0 + results in the contact forces always being 0. As such we recommend to use a Mujoco-Py + version < 2.0 when using the Humanoid environment if you would like to report results + with contact forces (if contact forces are not used in your experiments, you can use + version > 2.0). + + ### Rewards + The reward consists of three parts: + - *healthy_reward*: Every timestep that the humanoid is alive (see section Episode Termination for definition), it gets a reward of fixed value `healthy_reward` + - *forward_reward*: A reward of walking forward which is measured as *`forward_reward_weight` * + (average center of mass before action - average center of mass after action)/dt*. + *dt* is the time between actions and is dependent on the frame_skip parameter + (default is 5), where the frametime is 0.003 - making the default *dt = 5 * 0.003 = 0.015*. + This reward would be positive if the humanoid walks forward (in positive x-direction). The calculation + for the center of mass is defined in the `.py` file for the Humanoid. + - *ctrl_cost*: A negative reward for penalising the humanoid if it has too + large of a control force. If there are *nu* actuators/controls, then the control has + shape `nu x 1`. It is measured as *`ctrl_cost_weight` * sum(control2)*. + - *contact_cost*: A negative reward for penalising the humanoid if the external + contact force is too large. It is calculated by clipping + *`contact_cost_weight` * sum(external contact force2)* to the interval specified by `contact_cost_range`. + + The total reward returned is ***reward*** *=* *healthy_reward + forward_reward - ctrl_cost - contact_cost* and `info` will also contain the individual reward terms + + ### Starting State + All observations start in state + (0.0, 0.0, 1.4, 1.0, 0.0 ... 0.0) with a uniform noise in the range + of [-`reset_noise_scale`, `reset_noise_scale`] added to the positional and velocity values (values in the table) + for stochasticity. Note that the initial z coordinate is intentionally + selected to be high, thereby indicating a standing up humanoid. The initial + orientation is designed to make it face forward as well. + + ### Episode End + The humanoid is said to be unhealthy if the z-position of the torso is no longer contained in the + closed interval specified by the argument `healthy_z_range`. + + If `terminate_when_unhealthy=True` is passed during construction (which is the default), + the episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 1000 timesteps + 3. Termination: The humanoid is unhealthy + + If `terminate_when_unhealthy=False` is passed, the episode is ended only when 1000 timesteps are exceeded. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + env = gym.make('Humanoid-v4') + ``` + + v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('Humanoid-v4', ctrl_cost_weight=0.1, ....) + ``` + + | Parameter | Type | Default | Description | + | -------------------------------------------- | --------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `xml_file` | **str** | `"humanoid.xml"` | Path to a MuJoCo model | + | `forward_reward_weight` | **float** | `1.25` | Weight for _forward_reward_ term (see section on reward) | + | `ctrl_cost_weight` | **float** | `0.1` | Weight for _ctrl_cost_ term (see section on reward) | + | `contact_cost_weight` | **float** | `5e-7` | Weight for _contact_cost_ term (see section on reward) | + | `healthy_reward` | **float** | `5.0` | Constant reward given if the humanoid is "healthy" after timestep | + | `terminate_when_unhealthy` | **bool** | `True` | If true, issue a done signal if the z-coordinate of the torso is no longer in the `healthy_z_range` | + | `healthy_z_range` | **tuple** | `(1.0, 2.0)` | The humanoid is considered healthy if the z-coordinate of the torso is in this range | + | `reset_noise_scale` | **float** | `1e-2` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x- and y-coordinates from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 67, + } + + def __init__( + self, + forward_reward_weight=1.25, + ctrl_cost_weight=0.1, + healthy_reward=5.0, + terminate_when_unhealthy=True, + healthy_z_range=(1.0, 2.0), + reset_noise_scale=1e-2, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + forward_reward_weight, + ctrl_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + self._healthy_z_range = healthy_z_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(378,), dtype=np.float64 + ) + + MujocoEnv.__init__( + self, "humanoid.xml", 5, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(self.data.ctrl)) + return control_cost + + @property + def is_healthy(self): + min_z, max_z = self._healthy_z_range + is_healthy = min_z < self.data.qpos[2] < max_z + + return is_healthy + + @property + def terminated(self): + terminated = (not self.is_healthy) if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = self.data.qvel.flat.copy() + + com_inertia = self.data.cinert.flat.copy() + com_velocity = self.data.cvel.flat.copy() + + actuator_forces = self.data.qfrc_actuator.flat.copy() + external_contact_forces = self.data.cfrc_ext.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + return np.concatenate( + ( + position, + velocity, + com_inertia, + com_velocity, + actuator_forces, + external_contact_forces, + ) + ) + + def step(self, action): + xy_position_before = mass_center(self.model, self.data) + self.do_simulation(action, self.frame_skip) + xy_position_after = mass_center(self.model, self.data) + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + + observation = self._get_obs() + reward = rewards - ctrl_cost + terminated = self.terminated + info = { + "reward_linvel": forward_reward, + "reward_quadctrl": -ctrl_cost, + "reward_alive": healthy_reward, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup.py b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup.py new file mode 100644 index 0000000000000000000000000000000000000000..db25e12b36fac93ac70e0c94c72aa1c0742599f1 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup.py @@ -0,0 +1,87 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class HumanoidStandupEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 67, + } + + def __init__(self, **kwargs): + observation_space = Box( + low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64 + ) + MuJocoPyEnv.__init__( + self, + "humanoidstandup.xml", + 5, + observation_space=observation_space, + **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def _get_obs(self): + data = self.sim.data + return np.concatenate( + [ + data.qpos.flat[2:], + data.qvel.flat, + data.cinert.flat, + data.cvel.flat, + data.qfrc_actuator.flat, + data.cfrc_ext.flat, + ] + ) + + def step(self, a): + self.do_simulation(a, self.frame_skip) + pos_after = self.sim.data.qpos[2] + data = self.sim.data + uph_cost = (pos_after - 0) / self.model.opt.timestep + + quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() + quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum() + quad_impact_cost = min(quad_impact_cost, 10) + reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1 + + if self.render_mode == "human": + self.render() + return ( + self._get_obs(), + reward, + False, + False, + dict( + reward_linup=uph_cost, + reward_quadctrl=-quad_ctrl_cost, + reward_impact=-quad_impact_cost, + ), + ) + + def reset_model(self): + c = 0.01 + self.set_state( + self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), + self.init_qvel + + self.np_random.uniform( + low=-c, + high=c, + size=self.model.nv, + ), + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 1 + self.viewer.cam.distance = self.model.stat.extent * 1.0 + self.viewer.cam.lookat[2] = 0.8925 + self.viewer.cam.elevation = -20 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..b04351229d900b114420428d194d3d1a88697a86 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/humanoidstandup_v4.py @@ -0,0 +1,259 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + + +class HumanoidStandupEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is based on the environment introduced by Tassa, Erez and Todorov + in ["Synthesis and stabilization of complex behaviors through online trajectory optimization"](https://ieeexplore.ieee.org/document/6386025). + The 3D bipedal robot is designed to simulate a human. It has a torso (abdomen) with a + pair of legs and arms. The legs each consist of two links, and so the arms (representing the + knees and elbows respectively). The environment starts with the humanoid laying on the ground, + and then the goal of the environment is to make the humanoid standup and then keep it standing + by applying torques on the various hinges. + + ### Action Space + The agent take a 17-element vector for actions. + + The action space is a continuous `(action, ...)` all in `[-1, 1]`, where `action` + represents the numerical torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ---------------------------------------------------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ | + | 0 | Torque applied on the hinge in the y-coordinate of the abdomen | -0.4 | 0.4 | hip_1 (front_left_leg) | hinge | torque (N m) | + | 1 | Torque applied on the hinge in the z-coordinate of the abdomen | -0.4 | 0.4 | angle_1 (front_left_leg) | hinge | torque (N m) | + | 2 | Torque applied on the hinge in the x-coordinate of the abdomen | -0.4 | 0.4 | hip_2 (front_right_leg) | hinge | torque (N m) | + | 3 | Torque applied on the rotor between torso/abdomen and the right hip (x-coordinate) | -0.4 | 0.4 | right_hip_x (right_thigh) | hinge | torque (N m) | + | 4 | Torque applied on the rotor between torso/abdomen and the right hip (z-coordinate) | -0.4 | 0.4 | right_hip_z (right_thigh) | hinge | torque (N m) | + | 5 | Torque applied on the rotor between torso/abdomen and the right hip (y-coordinate) | -0.4 | 0.4 | right_hip_y (right_thigh) | hinge | torque (N m) | + | 6 | Torque applied on the rotor between the right hip/thigh and the right shin | -0.4 | 0.4 | right_knee | hinge | torque (N m) | + | 7 | Torque applied on the rotor between torso/abdomen and the left hip (x-coordinate) | -0.4 | 0.4 | left_hip_x (left_thigh) | hinge | torque (N m) | + | 8 | Torque applied on the rotor between torso/abdomen and the left hip (z-coordinate) | -0.4 | 0.4 | left_hip_z (left_thigh) | hinge | torque (N m) | + | 9 | Torque applied on the rotor between torso/abdomen and the left hip (y-coordinate) | -0.4 | 0.4 | left_hip_y (left_thigh) | hinge | torque (N m) | + | 10 | Torque applied on the rotor between the left hip/thigh and the left shin | -0.4 | 0.4 | left_knee | hinge | torque (N m) | + | 11 | Torque applied on the rotor between the torso and right upper arm (coordinate -1) | -0.4 | 0.4 | right_shoulder1 | hinge | torque (N m) | + | 12 | Torque applied on the rotor between the torso and right upper arm (coordinate -2) | -0.4 | 0.4 | right_shoulder2 | hinge | torque (N m) | + | 13 | Torque applied on the rotor between the right upper arm and right lower arm | -0.4 | 0.4 | right_elbow | hinge | torque (N m) | + | 14 | Torque applied on the rotor between the torso and left upper arm (coordinate -1) | -0.4 | 0.4 | left_shoulder1 | hinge | torque (N m) | + | 15 | Torque applied on the rotor between the torso and left upper arm (coordinate -2) | -0.4 | 0.4 | left_shoulder2 | hinge | torque (N m) | + | 16 | Torque applied on the rotor between the left upper arm and left lower arm | -0.4 | 0.4 | left_elbow | hinge | torque (N m) | + + ### Observation Space + + The state space consists of positional values of different body parts of the Humanoid, + followed by the velocities of those individual parts (their derivatives) with all the positions ordered before all the velocities. + + **Note:** The x- and y-coordinates of the torso are being omitted to produce position-agnostic behavior in policies + + The observation is a `ndarray` with shape `(376,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | --------------------------------------------------------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | -------------------------- | + | 0 | z-coordinate of the torso (centre) | -Inf | Inf | root | free | position (m) | + | 1 | x-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 2 | y-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 3 | z-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 4 | w-orientation of the torso (centre) | -Inf | Inf | root | free | angle (rad) | + | 5 | z-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | angle (rad) | + | 6 | y-angle of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | angle (rad) | + | 7 | x-angle of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | angle (rad) | + | 8 | x-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | angle (rad) | + | 9 | z-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | angle (rad) | + | 10 | y-coordinate of angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | angle (rad) | + | 11 | angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | angle (rad) | + | 12 | x-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | angle (rad) | + | 13 | z-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | angle (rad) | + | 14 | y-coordinate of angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | angle (rad) | + | 15 | angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | angle (rad) | + | 16 | coordinate-1 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | angle (rad) | + | 17 | coordinate-2 (multi-axis) angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | angle (rad) | + | 18 | angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | angle (rad) | + | 19 | coordinate-1 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | angle (rad) | + | 20 | coordinate-2 (multi-axis) angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | angle (rad) | + | 21 | angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | angle (rad) | + | 22 | x-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 23 | y-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 24 | z-coordinate velocity of the torso (centre) | -Inf | Inf | root | free | velocity (m/s) | + | 25 | x-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 26 | y-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 27 | z-coordinate angular velocity of the torso (centre) | -Inf | Inf | root | free | anglular velocity (rad/s) | + | 28 | z-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_z | hinge | anglular velocity (rad/s) | + | 29 | y-coordinate of angular velocity of the abdomen (in lower_waist) | -Inf | Inf | abdomen_y | hinge | anglular velocity (rad/s) | + | 30 | x-coordinate of angular velocity of the abdomen (in pelvis) | -Inf | Inf | abdomen_x | hinge | aanglular velocity (rad/s) | + | 31 | x-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_x | hinge | anglular velocity (rad/s) | + | 32 | z-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_z | hinge | anglular velocity (rad/s) | + | 33 | y-coordinate of the angular velocity of the angle between pelvis and right hip (in right_thigh) | -Inf | Inf | right_hip_y | hinge | anglular velocity (rad/s) | + | 35 | angular velocity of the angle between right hip and the right shin (in right_knee) | -Inf | Inf | right_knee | hinge | anglular velocity (rad/s) | + | 36 | x-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_x | hinge | anglular velocity (rad/s) | + | 37 | z-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_z | hinge | anglular velocity (rad/s) | + | 38 | y-coordinate of the angular velocity of the angle between pelvis and left hip (in left_thigh) | -Inf | Inf | left_hip_y | hinge | anglular velocity (rad/s) | + | 39 | angular velocity of the angle between left hip and the left shin (in left_knee) | -Inf | Inf | left_knee | hinge | anglular velocity (rad/s) | + | 40 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder1 | hinge | anglular velocity (rad/s) | + | 41 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and right arm (in right_upper_arm) | -Inf | Inf | right_shoulder2 | hinge | anglular velocity (rad/s) | + | 42 | angular velocity of the angle between right upper arm and right_lower_arm | -Inf | Inf | right_elbow | hinge | anglular velocity (rad/s) | + | 43 | coordinate-1 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder1 | hinge | anglular velocity (rad/s) | + | 44 | coordinate-2 (multi-axis) of the angular velocity of the angle between torso and left arm (in left_upper_arm) | -Inf | Inf | left_shoulder2 | hinge | anglular velocity (rad/s) | + | 45 | angular velocitty of the angle between left upper arm and left_lower_arm | -Inf | Inf | left_elbow | hinge | anglular velocity (rad/s) | + + + Additionally, after all the positional and velocity based values in the table, + the state_space consists of (in order): + - *cinert:* Mass and inertia of a single rigid body relative to the center of mass + (this is an intermediate result of transition). It has shape 14*10 (*nbody * 10*) + and hence adds to another 140 elements in the state space. + - *cvel:* Center of mass based velocity. It has shape 14 * 6 (*nbody * 6*) and hence + adds another 84 elements in the state space + - *qfrc_actuator:* Constraint force generated as the actuator force. This has shape + `(23,)` *(nv * 1)* and hence adds another 23 elements to the state space. + - *cfrc_ext:* This is the center of mass based external force on the body. It has shape + 14 * 6 (*nbody * 6*) and hence adds to another 84 elements in the state space. + where *nbody* stands for the number of bodies in the robot and *nv* stands for the number + of degrees of freedom (*= dim(qvel)*) + + The (x,y,z) coordinates are translational DOFs while the orientations are rotational + DOFs expressed as quaternions. One can read more about free joints on the + [Mujoco Documentation](https://mujoco.readthedocs.io/en/latest/XMLreference.html). + + **Note:** HumanoidStandup-v4 environment no longer has the following contact forces issue. + If using previous HumanoidStandup versions from v4, there have been reported issues that using a Mujoco-Py version > 2.0 results + in the contact forces always being 0. As such we recommend to use a Mujoco-Py version < 2.0 + when using the Humanoid environment if you would like to report results with contact forces + (if contact forces are not used in your experiments, you can use version > 2.0). + + ### Rewards + The reward consists of three parts: + - *uph_cost*: A reward for moving upward (in an attempt to stand up). This is not a relative + reward which measures how much upward it has moved from the last timestep, but it is an + absolute reward which measures how much upward the Humanoid has moved overall. It is + measured as *(z coordinate after action - 0)/(atomic timestep)*, where *z coordinate after + action* is index 0 in the state/index 2 in the table, and *atomic timestep* is the time for + one frame of movement even though the simulation has a framerate of 5 (done in order to inflate + rewards a little for faster learning). + - *quad_ctrl_cost*: A negative reward for penalising the humanoid if it has too large of + a control force. If there are *nu* actuators/controls, then the control has shape `nu x 1`. + It is measured as *0.1 **x** sum(control2)*. + - *quad_impact_cost*: A negative reward for penalising the humanoid if the external + contact force is too large. It is calculated as *min(0.5 * 0.000001 * sum(external + contact force2), 10)*. + + The total reward returned is ***reward*** *=* *uph_cost + 1 - quad_ctrl_cost - quad_impact_cost* + + ### Starting State + All observations start in state + (0.0, 0.0, 0.105, 1.0, 0.0 ... 0.0) with a uniform noise in the range of + [-0.01, 0.01] added to the positional and velocity values (values in the table) + for stochasticity. Note that the initial z coordinate is intentionally selected + to be low, thereby indicating a laying down humanoid. The initial orientation is + designed to make it face forward as well. + + ### Episode End + The episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 1000 timesteps + 2. Termination: Any of the state space values is no longer finite + + ### Arguments + + No additional arguments are currently supported. + + ``` + env = gym.make('HumanoidStandup-v4') + ``` + + There is no v3 for HumanoidStandup, unlike the robot environments where a v3 and + beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 67, + } + + def __init__(self, **kwargs): + observation_space = Box( + low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64 + ) + MujocoEnv.__init__( + self, + "humanoidstandup.xml", + 5, + observation_space=observation_space, + **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def _get_obs(self): + data = self.data + return np.concatenate( + [ + data.qpos.flat[2:], + data.qvel.flat, + data.cinert.flat, + data.cvel.flat, + data.qfrc_actuator.flat, + data.cfrc_ext.flat, + ] + ) + + def step(self, a): + self.do_simulation(a, self.frame_skip) + pos_after = self.data.qpos[2] + data = self.data + uph_cost = (pos_after - 0) / self.model.opt.timestep + + quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() + quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum() + quad_impact_cost = min(quad_impact_cost, 10) + reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1 + + if self.render_mode == "human": + self.render() + return ( + self._get_obs(), + reward, + False, + False, + dict( + reward_linup=uph_cost, + reward_quadctrl=-quad_ctrl_cost, + reward_impact=-quad_impact_cost, + ), + ) + + def reset_model(self): + c = 0.01 + self.set_state( + self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), + self.init_qvel + + self.np_random.uniform( + low=-c, + high=c, + size=self.model.nv, + ), + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 1 + self.viewer.cam.distance = self.model.stat.extent * 1.0 + self.viewer.cam.lookat[2] = 0.8925 + self.viewer.cam.elevation = -20 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum.py b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum.py new file mode 100644 index 0000000000000000000000000000000000000000..639b965579d46d2a92882910038a53c9ac8c62fe --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum.py @@ -0,0 +1,69 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class InvertedDoublePendulumEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, + "inverted_double_pendulum.xml", + 5, + observation_space=observation_space, + **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, action): + self.do_simulation(action, self.frame_skip) + + ob = self._get_obs() + x, _, y = self.sim.data.site_xpos[0] + dist_penalty = 0.01 * x**2 + (y - 2) ** 2 + v1, v2 = self.sim.data.qvel[1:3] + vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2 + alive_bonus = 10 + r = alive_bonus - dist_penalty - vel_penalty + terminated = bool(y <= 1) + + if self.render_mode == "human": + self.render() + return ob, r, terminated, False, {} + + def _get_obs(self): + return np.concatenate( + [ + self.sim.data.qpos[:1], # cart x pos + np.sin(self.sim.data.qpos[1:]), # link angles + np.cos(self.sim.data.qpos[1:]), + np.clip(self.sim.data.qvel, -10, 10), + np.clip(self.sim.data.qfrc_constraint, -10, 10), + ] + ).ravel() + + def reset_model(self): + self.set_state( + self.init_qpos + + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq), + self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1, + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + v = self.viewer + v.cam.trackbodyid = 0 + v.cam.distance = self.model.stat.extent * 0.5 + v.cam.lookat[2] = 0.12250000000000005 # v.model.stat.center[2] diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d992564a7fcc54890bf25d57b81f992b5a6212 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_double_pendulum_v4.py @@ -0,0 +1,173 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + + +class InvertedDoublePendulumEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment originates from control theory and builds on the cartpole + environment based on the work done by Barto, Sutton, and Anderson in + ["Neuronlike adaptive elements that can solve difficult learning control problems"](https://ieeexplore.ieee.org/document/6313077), + powered by the Mujoco physics simulator - allowing for more complex experiments + (such as varying the effects of gravity or constraints). This environment involves a cart that can + moved linearly, with a pole fixed on it and a second pole fixed on the other end of the first one + (leaving the second pole as the only one with one free end). The cart can be pushed left or right, + and the goal is to balance the second pole on top of the first pole, which is in turn on top of the + cart, by applying continuous forces on the cart. + + ### Action Space + The agent take a 1-element vector for actions. + The action space is a continuous `(action)` in `[-1, 1]`, where `action` represents the + numerical force applied to the cart (with magnitude representing the amount of force and + sign representing the direction) + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|---------------------------|-------------|-------------|----------------------------------|-------|-----------| + | 0 | Force applied on the cart | -1 | 1 | slider | slide | Force (N) | + + ### Observation Space + + The state space consists of positional values of different body parts of the pendulum system, + followed by the velocities of those individual parts (their derivatives) with all the + positions ordered before all the velocities. + + The observation is a `ndarray` with shape `(11,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ----------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | position of the cart along the linear surface | -Inf | Inf | slider | slide | position (m) | + | 1 | sine of the angle between the cart and the first pole | -Inf | Inf | sin(hinge) | hinge | unitless | + | 2 | sine of the angle between the two poles | -Inf | Inf | sin(hinge2) | hinge | unitless | + | 3 | cosine of the angle between the cart and the first pole | -Inf | Inf | cos(hinge) | hinge | unitless | + | 4 | cosine of the angle between the two poles | -Inf | Inf | cos(hinge2) | hinge | unitless | + | 5 | velocity of the cart | -Inf | Inf | slider | slide | velocity (m/s) | + | 6 | angular velocity of the angle between the cart and the first pole | -Inf | Inf | hinge | hinge | angular velocity (rad/s) | + | 7 | angular velocity of the angle between the two poles | -Inf | Inf | hinge2 | hinge | angular velocity (rad/s) | + | 8 | constraint force - 1 | -Inf | Inf | | | Force (N) | + | 9 | constraint force - 2 | -Inf | Inf | | | Force (N) | + | 10 | constraint force - 3 | -Inf | Inf | | | Force (N) | + + + There is physical contact between the robots and their environment - and Mujoco + attempts at getting realisitic physics simulations for the possible physical contact + dynamics by aiming for physical accuracy and computational efficiency. + + There is one constraint force for contacts for each degree of freedom (3). + The approach and handling of constraints by Mujoco is unique to the simulator + and is based on their research. Once can find more information in their + [*documentation*](https://mujoco.readthedocs.io/en/latest/computation.html) + or in their paper + ["Analytically-invertible dynamics with contacts and constraints: Theory and implementation in MuJoCo"](https://homes.cs.washington.edu/~todorov/papers/TodorovICRA14.pdf). + + + ### Rewards + + The reward consists of two parts: + - *alive_bonus*: The goal is to make the second inverted pendulum stand upright + (within a certain angle limit) as long as possible - as such a reward of +10 is awarded + for each timestep that the second pole is upright. + - *distance_penalty*: This reward is a measure of how far the *tip* of the second pendulum + (the only free end) moves, and it is calculated as + *0.01 * x2 + (y - 2)2*, where *x* is the x-coordinate of the tip + and *y* is the y-coordinate of the tip of the second pole. + - *velocity_penalty*: A negative reward for penalising the agent if it moves too + fast *0.001 * v12 + 0.005 * v2 2* + + The total reward returned is ***reward*** *=* *alive_bonus - distance_penalty - velocity_penalty* + + ### Starting State + All observations start in state + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) with a uniform noise in the range + of [-0.1, 0.1] added to the positional values (cart position and pole angles) and standard + normal force with a standard deviation of 0.1 added to the velocity values for stochasticity. + + ### Episode End + The episode ends when any of the following happens: + + 1.Truncation: The episode duration reaches 1000 timesteps. + 2.Termination: Any of the state space values is no longer finite. + 3.Termination: The y_coordinate of the tip of the second pole *is less than or equal* to 1. The maximum standing height of the system is 1.196 m when all the parts are perpendicularly vertical on top of each other). + + ### Arguments + + No additional arguments are currently supported. + + ``` + env = gym.make('InvertedDoublePendulum-v4') + ``` + There is no v3 for InvertedPendulum, unlike the robot environments where a v3 and + beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks (including inverted pendulum) + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64) + MujocoEnv.__init__( + self, + "inverted_double_pendulum.xml", + 5, + observation_space=observation_space, + **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, action): + self.do_simulation(action, self.frame_skip) + ob = self._get_obs() + x, _, y = self.data.site_xpos[0] + dist_penalty = 0.01 * x**2 + (y - 2) ** 2 + v1, v2 = self.data.qvel[1:3] + vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2 + alive_bonus = 10 + r = alive_bonus - dist_penalty - vel_penalty + terminated = bool(y <= 1) + if self.render_mode == "human": + self.render() + return ob, r, terminated, False, {} + + def _get_obs(self): + return np.concatenate( + [ + self.data.qpos[:1], # cart x pos + np.sin(self.data.qpos[1:]), # link angles + np.cos(self.data.qpos[1:]), + np.clip(self.data.qvel, -10, 10), + np.clip(self.data.qfrc_constraint, -10, 10), + ] + ).ravel() + + def reset_model(self): + self.set_state( + self.init_qpos + + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq), + self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1, + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + v = self.viewer + v.cam.trackbodyid = 0 + v.cam.distance = self.model.stat.extent * 0.5 + v.cam.lookat[2] = 0.12250000000000005 # v.model.stat.center[2] diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum.py b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum.py new file mode 100644 index 0000000000000000000000000000000000000000..eebdbbb0038c498108849660bbd6ce021c2a9119 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum.py @@ -0,0 +1,56 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class InvertedPendulumEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 25, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, + "inverted_pendulum.xml", + 2, + observation_space=observation_space, + **kwargs + ) + + def step(self, a): + reward = 1.0 + self.do_simulation(a, self.frame_skip) + + ob = self._get_obs() + terminated = bool(not np.isfinite(ob).all() or (np.abs(ob[1]) > 0.2)) + + if self.render_mode == "human": + self.render() + return ob, reward, terminated, False, {} + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.model.nq, low=-0.01, high=0.01 + ) + qvel = self.init_qvel + self.np_random.uniform( + size=self.model.nv, low=-0.01, high=0.01 + ) + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 0 + self.viewer.cam.distance = self.model.stat.extent diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..25824275699663ba427e194534c77844ffd8bf92 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/inverted_pendulum_v4.py @@ -0,0 +1,132 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + + +class InvertedPendulumEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment is the cartpole environment based on the work done by + Barto, Sutton, and Anderson in ["Neuronlike adaptive elements that can + solve difficult learning control problems"](https://ieeexplore.ieee.org/document/6313077), + just like in the classic environments but now powered by the Mujoco physics simulator - + allowing for more complex experiments (such as varying the effects of gravity). + This environment involves a cart that can moved linearly, with a pole fixed on it + at one end and having another end free. The cart can be pushed left or right, and the + goal is to balance the pole on the top of the cart by applying forces on the cart. + + ### Action Space + The agent take a 1-element vector for actions. + + The action space is a continuous `(action)` in `[-3, 3]`, where `action` represents + the numerical force applied to the cart (with magnitude representing the amount of + force and sign representing the direction) + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|---------------------------|-------------|-------------|----------------------------------|-------|-----------| + | 0 | Force applied on the cart | -3 | 3 | slider | slide | Force (N) | + + ### Observation Space + + The state space consists of positional values of different body parts of + the pendulum system, followed by the velocities of those individual parts (their derivatives) + with all the positions ordered before all the velocities. + + The observation is a `ndarray` with shape `(4,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | --------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------- | + | 0 | position of the cart along the linear surface | -Inf | Inf | slider | slide | position (m) | + | 1 | vertical angle of the pole on the cart | -Inf | Inf | hinge | hinge | angle (rad) | + | 2 | linear velocity of the cart | -Inf | Inf | slider | slide | velocity (m/s) | + | 3 | angular velocity of the pole on the cart | -Inf | Inf | hinge | hinge | anglular velocity (rad/s) | + + + ### Rewards + + The goal is to make the inverted pendulum stand upright (within a certain angle limit) + as long as possible - as such a reward of +1 is awarded for each timestep that + the pole is upright. + + ### Starting State + All observations start in state + (0.0, 0.0, 0.0, 0.0) with a uniform noise in the range + of [-0.01, 0.01] added to the values for stochasticity. + + ### Episode End + The episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches 1000 timesteps. + 2. Termination: Any of the state space values is no longer finite. + 3. Termination: The absolutely value of the vertical angle between the pole and the cart is greater than 0.2 radian. + + ### Arguments + + No additional arguments are currently supported. + + ``` + env = gym.make('InvertedPendulum-v4') + ``` + There is no v3 for InvertedPendulum, unlike the robot environments where a + v3 and beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks (including inverted pendulum) + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 25, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float64) + MujocoEnv.__init__( + self, + "inverted_pendulum.xml", + 2, + observation_space=observation_space, + **kwargs + ) + + def step(self, a): + reward = 1.0 + self.do_simulation(a, self.frame_skip) + ob = self._get_obs() + terminated = bool(not np.isfinite(ob).all() or (np.abs(ob[1]) > 0.2)) + if self.render_mode == "human": + self.render() + return ob, reward, terminated, False, {} + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.model.nq, low=-0.01, high=0.01 + ) + qvel = self.init_qvel + self.np_random.uniform( + size=self.model.nv, low=-0.01, high=0.01 + ) + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + return np.concatenate([self.data.qpos, self.data.qvel]).ravel() + + def viewer_setup(self): + assert self.viewer is not None + v = self.viewer + v.cam.trackbodyid = 0 + v.cam.distance = self.model.stat.extent diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_env.py b/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_env.py new file mode 100644 index 0000000000000000000000000000000000000000..62e01c3a37b27264d81ddf95fbbe7470c6bb9f53 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_env.py @@ -0,0 +1,437 @@ +from os import path +from typing import Optional, Union + +import numpy as np + +import gym +from gym import error, logger, spaces +from gym.spaces import Space + +try: + import mujoco_py +except ImportError as e: + MUJOCO_PY_IMPORT_ERROR = e +else: + MUJOCO_PY_IMPORT_ERROR = None + +try: + import mujoco +except ImportError as e: + MUJOCO_IMPORT_ERROR = e +else: + MUJOCO_IMPORT_ERROR = None + + +DEFAULT_SIZE = 480 + + +class BaseMujocoEnv(gym.Env): + """Superclass for all MuJoCo environments.""" + + def __init__( + self, + model_path, + frame_skip, + observation_space: Space, + render_mode: Optional[str] = None, + width: int = DEFAULT_SIZE, + height: int = DEFAULT_SIZE, + camera_id: Optional[int] = None, + camera_name: Optional[str] = None, + ): + if model_path.startswith("/"): + self.fullpath = model_path + else: + self.fullpath = path.join(path.dirname(__file__), "assets", model_path) + if not path.exists(self.fullpath): + raise OSError(f"File {self.fullpath} does not exist") + + self.width = width + self.height = height + self._initialize_simulation() # may use width and height + + self.init_qpos = self.data.qpos.ravel().copy() + self.init_qvel = self.data.qvel.ravel().copy() + self._viewers = {} + + self.frame_skip = frame_skip + + self.viewer = None + + assert self.metadata["render_modes"] == [ + "human", + "rgb_array", + "depth_array", + ], self.metadata["render_modes"] + assert ( + int(np.round(1.0 / self.dt)) == self.metadata["render_fps"] + ), f'Expected value: {int(np.round(1.0 / self.dt))}, Actual value: {self.metadata["render_fps"]}' + + self.observation_space = observation_space + self._set_action_space() + + self.render_mode = render_mode + self.camera_name = camera_name + self.camera_id = camera_id + + def _set_action_space(self): + bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) + low, high = bounds.T + self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) + return self.action_space + + # methods to override: + # ---------------------------- + + def reset_model(self): + """ + Reset the robot degrees of freedom (qpos and qvel). + Implement this in each subclass. + """ + raise NotImplementedError + + def viewer_setup(self): + """ + This method is called when the viewer is initialized. + Optionally implement this method, if you need to tinker with camera position and so forth. + """ + + def _initialize_simulation(self): + """ + Initialize MuJoCo simulation data structures mjModel and mjData. + """ + raise NotImplementedError + + def _reset_simulation(self): + """ + Reset MuJoCo simulation data structures, mjModel and mjData. + """ + raise NotImplementedError + + def _step_mujoco_simulation(self, ctrl, n_frames): + """ + Step over the MuJoCo simulation. + """ + raise NotImplementedError + + def render(self): + """ + Render a frame from the MuJoCo simulation as specified by the render_mode. + """ + raise NotImplementedError + + # ----------------------------- + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + + self._reset_simulation() + + ob = self.reset_model() + if self.render_mode == "human": + self.render() + return ob, {} + + def set_state(self, qpos, qvel): + """ + Set the joints position qpos and velocity qvel of the model. Override this method depending on the MuJoCo bindings used. + """ + assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,) + + @property + def dt(self): + return self.model.opt.timestep * self.frame_skip + + def do_simulation(self, ctrl, n_frames): + """ + Step the simulation n number of frames and applying a control action. + """ + # Check control input is contained in the action space + if np.array(ctrl).shape != self.action_space.shape: + raise ValueError("Action dimension mismatch") + self._step_mujoco_simulation(ctrl, n_frames) + + def close(self): + if self.viewer is not None: + self.viewer = None + self._viewers = {} + + def get_body_com(self, body_name): + """Return the cartesian position of a body frame""" + raise NotImplementedError + + def state_vector(self): + """Return the position and velocity joint states of the model""" + return np.concatenate([self.data.qpos.flat, self.data.qvel.flat]) + + +class MuJocoPyEnv(BaseMujocoEnv): + def __init__( + self, + model_path: str, + frame_skip: int, + observation_space: Space, + render_mode: Optional[str] = None, + width: int = DEFAULT_SIZE, + height: int = DEFAULT_SIZE, + camera_id: Optional[int] = None, + camera_name: Optional[str] = None, + ): + if MUJOCO_PY_IMPORT_ERROR is not None: + raise error.DependencyNotInstalled( + f"{MUJOCO_PY_IMPORT_ERROR}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)" + ) + + logger.warn( + "This version of the mujoco environments depends " + "on the mujoco-py bindings, which are no longer maintained " + "and may stop working. Please upgrade to the v4 versions of " + "the environments (which depend on the mujoco python bindings instead), unless " + "you are trying to precisely replicate previous works)." + ) + + super().__init__( + model_path, + frame_skip, + observation_space, + render_mode, + width, + height, + camera_id, + camera_name, + ) + + def _initialize_simulation(self): + self.model = mujoco_py.load_model_from_path(self.fullpath) + self.sim = mujoco_py.MjSim(self.model) + self.data = self.sim.data + + def _reset_simulation(self): + self.sim.reset() + + def set_state(self, qpos, qvel): + super().set_state(qpos, qvel) + state = self.sim.get_state() + state = mujoco_py.MjSimState(state.time, qpos, qvel, state.act, state.udd_state) + self.sim.set_state(state) + self.sim.forward() + + def _step_mujoco_simulation(self, ctrl, n_frames): + self.sim.data.ctrl[:] = ctrl + + for _ in range(n_frames): + self.sim.step() + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + width, height = self.width, self.height + camera_name, camera_id = self.camera_name, self.camera_id + if self.render_mode in {"rgb_array", "depth_array"}: + if camera_id is not None and camera_name is not None: + raise ValueError( + "Both `camera_id` and `camera_name` cannot be" + " specified at the same time." + ) + + no_camera_specified = camera_name is None and camera_id is None + if no_camera_specified: + camera_name = "track" + + if camera_id is None and camera_name in self.model._camera_name2id: + if camera_name in self.model._camera_name2id: + camera_id = self.model.camera_name2id(camera_name) + + self._get_viewer(self.render_mode).render( + width, height, camera_id=camera_id + ) + + if self.render_mode == "rgb_array": + data = self._get_viewer(self.render_mode).read_pixels( + width, height, depth=False + ) + # original image is upside-down, so flip it + return data[::-1, :, :] + elif self.render_mode == "depth_array": + self._get_viewer(self.render_mode).render(width, height) + # Extract depth part of the read_pixels() tuple + data = self._get_viewer(self.render_mode).read_pixels( + width, height, depth=True + )[1] + # original image is upside-down, so flip it + return data[::-1, :] + elif self.render_mode == "human": + self._get_viewer(self.render_mode).render() + + def _get_viewer( + self, mode + ) -> Union["mujoco_py.MjViewer", "mujoco_py.MjRenderContextOffscreen"]: + self.viewer = self._viewers.get(mode) + if self.viewer is None: + if mode == "human": + self.viewer = mujoco_py.MjViewer(self.sim) + + elif mode in {"rgb_array", "depth_array"}: + self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) + else: + raise AttributeError( + f"Unknown mode: {mode}, expected modes: {self.metadata['render_modes']}" + ) + + self.viewer_setup() + self._viewers[mode] = self.viewer + + return self.viewer + + def get_body_com(self, body_name): + return self.data.get_body_xpos(body_name) + + +class MujocoEnv(BaseMujocoEnv): + """Superclass for MuJoCo environments.""" + + def __init__( + self, + model_path, + frame_skip, + observation_space: Space, + render_mode: Optional[str] = None, + width: int = DEFAULT_SIZE, + height: int = DEFAULT_SIZE, + camera_id: Optional[int] = None, + camera_name: Optional[str] = None, + ): + if MUJOCO_IMPORT_ERROR is not None: + raise error.DependencyNotInstalled( + f"{MUJOCO_IMPORT_ERROR}. (HINT: you need to install mujoco)" + ) + super().__init__( + model_path, + frame_skip, + observation_space, + render_mode, + width, + height, + camera_id, + camera_name, + ) + + def _initialize_simulation(self): + self.model = mujoco.MjModel.from_xml_path(self.fullpath) + # MjrContext will copy model.vis.global_.off* to con.off* + self.model.vis.global_.offwidth = self.width + self.model.vis.global_.offheight = self.height + self.data = mujoco.MjData(self.model) + + def _reset_simulation(self): + mujoco.mj_resetData(self.model, self.data) + + def set_state(self, qpos, qvel): + super().set_state(qpos, qvel) + self.data.qpos[:] = np.copy(qpos) + self.data.qvel[:] = np.copy(qvel) + if self.model.na == 0: + self.data.act[:] = None + mujoco.mj_forward(self.model, self.data) + + def _step_mujoco_simulation(self, ctrl, n_frames): + self.data.ctrl[:] = ctrl + + mujoco.mj_step(self.model, self.data, nstep=self.frame_skip) + + # As of MuJoCo 2.0, force-related quantities like cacc are not computed + # unless there's a force sensor in the model. + # See https://github.com/openai/gym/issues/1541 + mujoco.mj_rnePostConstraint(self.model, self.data) + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + if self.render_mode in { + "rgb_array", + "depth_array", + }: + camera_id = self.camera_id + camera_name = self.camera_name + + if camera_id is not None and camera_name is not None: + raise ValueError( + "Both `camera_id` and `camera_name` cannot be" + " specified at the same time." + ) + + no_camera_specified = camera_name is None and camera_id is None + if no_camera_specified: + camera_name = "track" + + if camera_id is None: + camera_id = mujoco.mj_name2id( + self.model, + mujoco.mjtObj.mjOBJ_CAMERA, + camera_name, + ) + + self._get_viewer(self.render_mode).render(camera_id=camera_id) + + if self.render_mode == "rgb_array": + data = self._get_viewer(self.render_mode).read_pixels(depth=False) + # original image is upside-down, so flip it + return data[::-1, :, :] + elif self.render_mode == "depth_array": + self._get_viewer(self.render_mode).render() + # Extract depth part of the read_pixels() tuple + data = self._get_viewer(self.render_mode).read_pixels(depth=True)[1] + # original image is upside-down, so flip it + return data[::-1, :] + elif self.render_mode == "human": + self._get_viewer(self.render_mode).render() + + def close(self): + if self.viewer is not None: + self.viewer.close() + super().close() + + def _get_viewer( + self, mode + ) -> Union[ + "gym.envs.mujoco.mujoco_rendering.Viewer", + "gym.envs.mujoco.mujoco_rendering.RenderContextOffscreen", + ]: + self.viewer = self._viewers.get(mode) + if self.viewer is None: + if mode == "human": + from gym.envs.mujoco.mujoco_rendering import Viewer + + self.viewer = Viewer(self.model, self.data) + elif mode in {"rgb_array", "depth_array"}: + from gym.envs.mujoco.mujoco_rendering import RenderContextOffscreen + + self.viewer = RenderContextOffscreen(self.model, self.data) + else: + raise AttributeError( + f"Unexpected mode: {mode}, expected modes: {self.metadata['render_modes']}" + ) + + self.viewer_setup() + self._viewers[mode] = self.viewer + return self.viewer + + def get_body_com(self, body_name): + return self.data.body(body_name).xpos diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_rendering.py b/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_rendering.py new file mode 100644 index 0000000000000000000000000000000000000000..9102ab122fee613ead5b79f7e2b7b152c4556f0e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/mujoco_rendering.py @@ -0,0 +1,552 @@ +import collections +import os +import time +from threading import Lock + +import glfw +import imageio +import mujoco +import numpy as np + + +def _import_egl(width, height): + from mujoco.egl import GLContext + + return GLContext(width, height) + + +def _import_glfw(width, height): + from mujoco.glfw import GLContext + + return GLContext(width, height) + + +def _import_osmesa(width, height): + from mujoco.osmesa import GLContext + + return GLContext(width, height) + + +_ALL_RENDERERS = collections.OrderedDict( + [ + ("glfw", _import_glfw), + ("egl", _import_egl), + ("osmesa", _import_osmesa), + ] +) + + +class RenderContext: + """Render context superclass for offscreen and window rendering.""" + + def __init__(self, model, data, offscreen=True): + + self.model = model + self.data = data + self.offscreen = offscreen + self.offwidth = model.vis.global_.offwidth + self.offheight = model.vis.global_.offheight + max_geom = 1000 + + mujoco.mj_forward(self.model, self.data) + + self.scn = mujoco.MjvScene(self.model, max_geom) + self.cam = mujoco.MjvCamera() + self.vopt = mujoco.MjvOption() + self.pert = mujoco.MjvPerturb() + self.con = mujoco.MjrContext(self.model, mujoco.mjtFontScale.mjFONTSCALE_150) + + self._markers = [] + self._overlays = {} + + self._init_camera() + self._set_mujoco_buffers() + + def _set_mujoco_buffers(self): + if self.offscreen: + mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_OFFSCREEN, self.con) + if self.con.currentBuffer != mujoco.mjtFramebuffer.mjFB_OFFSCREEN: + raise RuntimeError("Offscreen rendering not supported") + else: + mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_WINDOW, self.con) + if self.con.currentBuffer != mujoco.mjtFramebuffer.mjFB_WINDOW: + raise RuntimeError("Window rendering not supported") + + def render(self, camera_id=None, segmentation=False): + width, height = self.offwidth, self.offheight + rect = mujoco.MjrRect(left=0, bottom=0, width=width, height=height) + + if camera_id is not None: + if camera_id == -1: + self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE + else: + self.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED + self.cam.fixedcamid = camera_id + + mujoco.mjv_updateScene( + self.model, + self.data, + self.vopt, + self.pert, + self.cam, + mujoco.mjtCatBit.mjCAT_ALL, + self.scn, + ) + + if segmentation: + self.scn.flags[mujoco.mjtRndFlag.mjRND_SEGMENT] = 1 + self.scn.flags[mujoco.mjtRndFlag.mjRND_IDCOLOR] = 1 + + for marker_params in self._markers: + self._add_marker_to_scene(marker_params) + + mujoco.mjr_render(rect, self.scn, self.con) + + for gridpos, (text1, text2) in self._overlays.items(): + mujoco.mjr_overlay( + mujoco.mjtFontScale.mjFONTSCALE_150, + gridpos, + rect, + text1.encode(), + text2.encode(), + self.con, + ) + + if segmentation: + self.scn.flags[mujoco.mjtRndFlag.mjRND_SEGMENT] = 0 + self.scn.flags[mujoco.mjtRndFlag.mjRND_IDCOLOR] = 0 + + def read_pixels(self, depth=True, segmentation=False): + width, height = self.offwidth, self.offheight + rect = mujoco.MjrRect(left=0, bottom=0, width=width, height=height) + + rgb_arr = np.zeros(3 * rect.width * rect.height, dtype=np.uint8) + depth_arr = np.zeros(rect.width * rect.height, dtype=np.float32) + + mujoco.mjr_readPixels(rgb_arr, depth_arr, rect, self.con) + rgb_img = rgb_arr.reshape(rect.height, rect.width, 3) + + ret_img = rgb_img + if segmentation: + seg_img = ( + rgb_img[:, :, 0] + + rgb_img[:, :, 1] * (2**8) + + rgb_img[:, :, 2] * (2**16) + ) + seg_img[seg_img >= (self.scn.ngeom + 1)] = 0 + seg_ids = np.full((self.scn.ngeom + 1, 2), fill_value=-1, dtype=np.int32) + + for i in range(self.scn.ngeom): + geom = self.scn.geoms[i] + if geom.segid != -1: + seg_ids[geom.segid + 1, 0] = geom.objtype + seg_ids[geom.segid + 1, 1] = geom.objid + ret_img = seg_ids[seg_img] + + if depth: + depth_img = depth_arr.reshape(rect.height, rect.width) + return (ret_img, depth_img) + else: + return ret_img + + def _init_camera(self): + self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE + self.cam.fixedcamid = -1 + for i in range(3): + self.cam.lookat[i] = np.median(self.data.geom_xpos[:, i]) + self.cam.distance = self.model.stat.extent + + def add_overlay(self, gridpos: int, text1: str, text2: str): + """Overlays text on the scene.""" + if gridpos not in self._overlays: + self._overlays[gridpos] = ["", ""] + self._overlays[gridpos][0] += text1 + "\n" + self._overlays[gridpos][1] += text2 + "\n" + + def add_marker(self, **marker_params): + self._markers.append(marker_params) + + def _add_marker_to_scene(self, marker): + if self.scn.ngeom >= self.scn.maxgeom: + raise RuntimeError("Ran out of geoms. maxgeom: %d" % self.scn.maxgeom) + + g = self.scn.geoms[self.scn.ngeom] + # default values. + g.dataid = -1 + g.objtype = mujoco.mjtObj.mjOBJ_UNKNOWN + g.objid = -1 + g.category = mujoco.mjtCatBit.mjCAT_DECOR + g.texid = -1 + g.texuniform = 0 + g.texrepeat[0] = 1 + g.texrepeat[1] = 1 + g.emission = 0 + g.specular = 0.5 + g.shininess = 0.5 + g.reflectance = 0 + g.type = mujoco.mjtGeom.mjGEOM_BOX + g.size[:] = np.ones(3) * 0.1 + g.mat[:] = np.eye(3) + g.rgba[:] = np.ones(4) + + for key, value in marker.items(): + if isinstance(value, (int, float, mujoco._enums.mjtGeom)): + setattr(g, key, value) + elif isinstance(value, (tuple, list, np.ndarray)): + attr = getattr(g, key) + attr[:] = np.asarray(value).reshape(attr.shape) + elif isinstance(value, str): + assert key == "label", "Only label is a string in mjtGeom." + if value is None: + g.label[0] = 0 + else: + g.label = value + elif hasattr(g, key): + raise ValueError( + "mjtGeom has attr {} but type {} is invalid".format( + key, type(value) + ) + ) + else: + raise ValueError("mjtGeom doesn't have field %s" % key) + + self.scn.ngeom += 1 + + def close(self): + """Override close in your rendering subclass to perform any necessary cleanup + after env.close() is called. + """ + pass + + +class RenderContextOffscreen(RenderContext): + """Offscreen rendering class with opengl context.""" + + def __init__(self, model, data): + # We must make GLContext before MjrContext + width = model.vis.global_.offwidth + height = model.vis.global_.offheight + self._get_opengl_backend(width, height) + self.opengl_context.make_current() + + super().__init__(model, data, offscreen=True) + + def _get_opengl_backend(self, width, height): + + backend = os.environ.get("MUJOCO_GL") + if backend is not None: + try: + self.opengl_context = _ALL_RENDERERS[backend](width, height) + except KeyError: + raise RuntimeError( + "Environment variable {} must be one of {!r}: got {!r}.".format( + "MUJOCO_GL", _ALL_RENDERERS.keys(), backend + ) + ) + + else: + for name, _ in _ALL_RENDERERS.items(): + try: + self.opengl_context = _ALL_RENDERERS[name](width, height) + backend = name + break + except: # noqa:E722 + pass + if backend is None: + raise RuntimeError( + "No OpenGL backend could be imported. Attempting to create a " + "rendering context will result in a RuntimeError." + ) + + +class Viewer(RenderContext): + """Class for window rendering in all MuJoCo environments.""" + + def __init__(self, model, data): + self._gui_lock = Lock() + self._button_left_pressed = False + self._button_right_pressed = False + self._last_mouse_x = 0 + self._last_mouse_y = 0 + self._paused = False + self._transparent = False + self._contacts = False + self._render_every_frame = True + self._image_idx = 0 + self._image_path = "/tmp/frame_%07d.png" + self._time_per_render = 1 / 60.0 + self._run_speed = 1.0 + self._loop_count = 0 + self._advance_by_one_step = False + self._hide_menu = False + + # glfw init + glfw.init() + width, height = glfw.get_video_mode(glfw.get_primary_monitor()).size + self.window = glfw.create_window(width // 2, height // 2, "mujoco", None, None) + glfw.make_context_current(self.window) + glfw.swap_interval(1) + + framebuffer_width, framebuffer_height = glfw.get_framebuffer_size(self.window) + window_width, _ = glfw.get_window_size(self.window) + self._scale = framebuffer_width * 1.0 / window_width + + # set callbacks + glfw.set_cursor_pos_callback(self.window, self._cursor_pos_callback) + glfw.set_mouse_button_callback(self.window, self._mouse_button_callback) + glfw.set_scroll_callback(self.window, self._scroll_callback) + glfw.set_key_callback(self.window, self._key_callback) + + # get viewport + self.viewport = mujoco.MjrRect(0, 0, framebuffer_width, framebuffer_height) + + super().__init__(model, data, offscreen=False) + + def _key_callback(self, window, key, scancode, action, mods): + if action != glfw.RELEASE: + return + # Switch cameras + elif key == glfw.KEY_TAB: + self.cam.fixedcamid += 1 + self.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED + if self.cam.fixedcamid >= self.model.ncam: + self.cam.fixedcamid = -1 + self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE + # Pause simulation + elif key == glfw.KEY_SPACE and self._paused is not None: + self._paused = not self._paused + # Advances simulation by one step. + elif key == glfw.KEY_RIGHT and self._paused is not None: + self._advance_by_one_step = True + self._paused = True + # Slows down simulation + elif key == glfw.KEY_S: + self._run_speed /= 2.0 + # Speeds up simulation + elif key == glfw.KEY_F: + self._run_speed *= 2.0 + # Turn off / turn on rendering every frame. + elif key == glfw.KEY_D: + self._render_every_frame = not self._render_every_frame + # Capture screenshot + elif key == glfw.KEY_T: + img = np.zeros( + ( + glfw.get_framebuffer_size(self.window)[1], + glfw.get_framebuffer_size(self.window)[0], + 3, + ), + dtype=np.uint8, + ) + mujoco.mjr_readPixels(img, None, self.viewport, self.con) + imageio.imwrite(self._image_path % self._image_idx, np.flipud(img)) + self._image_idx += 1 + # Display contact forces + elif key == glfw.KEY_C: + self._contacts = not self._contacts + self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = self._contacts + self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTFORCE] = self._contacts + # Display coordinate frames + elif key == glfw.KEY_E: + self.vopt.frame = 1 - self.vopt.frame + # Hide overlay menu + elif key == glfw.KEY_H: + self._hide_menu = not self._hide_menu + # Make transparent + elif key == glfw.KEY_R: + self._transparent = not self._transparent + if self._transparent: + self.model.geom_rgba[:, 3] /= 5.0 + else: + self.model.geom_rgba[:, 3] *= 5.0 + # Geom group visibility + elif key in (glfw.KEY_0, glfw.KEY_1, glfw.KEY_2, glfw.KEY_3, glfw.KEY_4): + self.vopt.geomgroup[key - glfw.KEY_0] ^= 1 + # Quit + if key == glfw.KEY_ESCAPE: + print("Pressed ESC") + print("Quitting.") + glfw.destroy_window(self.window) + glfw.terminate() + + def _cursor_pos_callback(self, window, xpos, ypos): + if not (self._button_left_pressed or self._button_right_pressed): + return + + mod_shift = ( + glfw.get_key(window, glfw.KEY_LEFT_SHIFT) == glfw.PRESS + or glfw.get_key(window, glfw.KEY_RIGHT_SHIFT) == glfw.PRESS + ) + if self._button_right_pressed: + action = ( + mujoco.mjtMouse.mjMOUSE_MOVE_H + if mod_shift + else mujoco.mjtMouse.mjMOUSE_MOVE_V + ) + elif self._button_left_pressed: + action = ( + mujoco.mjtMouse.mjMOUSE_ROTATE_H + if mod_shift + else mujoco.mjtMouse.mjMOUSE_ROTATE_V + ) + else: + action = mujoco.mjtMouse.mjMOUSE_ZOOM + + dx = int(self._scale * xpos) - self._last_mouse_x + dy = int(self._scale * ypos) - self._last_mouse_y + width, height = glfw.get_framebuffer_size(window) + + with self._gui_lock: + mujoco.mjv_moveCamera( + self.model, action, dx / height, dy / height, self.scn, self.cam + ) + + self._last_mouse_x = int(self._scale * xpos) + self._last_mouse_y = int(self._scale * ypos) + + def _mouse_button_callback(self, window, button, act, mods): + self._button_left_pressed = ( + glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_LEFT) == glfw.PRESS + ) + self._button_right_pressed = ( + glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_RIGHT) == glfw.PRESS + ) + + x, y = glfw.get_cursor_pos(window) + self._last_mouse_x = int(self._scale * x) + self._last_mouse_y = int(self._scale * y) + + def _scroll_callback(self, window, x_offset, y_offset): + with self._gui_lock: + mujoco.mjv_moveCamera( + self.model, + mujoco.mjtMouse.mjMOUSE_ZOOM, + 0, + -0.05 * y_offset, + self.scn, + self.cam, + ) + + def _create_overlay(self): + topleft = mujoco.mjtGridPos.mjGRID_TOPLEFT + bottomleft = mujoco.mjtGridPos.mjGRID_BOTTOMLEFT + + if self._render_every_frame: + self.add_overlay(topleft, "", "") + else: + self.add_overlay( + topleft, + "Run speed = %.3f x real time" % self._run_speed, + "[S]lower, [F]aster", + ) + self.add_overlay( + topleft, "Ren[d]er every frame", "On" if self._render_every_frame else "Off" + ) + self.add_overlay( + topleft, + "Switch camera (#cams = %d)" % (self.model.ncam + 1), + "[Tab] (camera ID = %d)" % self.cam.fixedcamid, + ) + self.add_overlay(topleft, "[C]ontact forces", "On" if self._contacts else "Off") + self.add_overlay(topleft, "T[r]ansparent", "On" if self._transparent else "Off") + if self._paused is not None: + if not self._paused: + self.add_overlay(topleft, "Stop", "[Space]") + else: + self.add_overlay(topleft, "Start", "[Space]") + self.add_overlay( + topleft, "Advance simulation by one step", "[right arrow]" + ) + self.add_overlay( + topleft, "Referenc[e] frames", "On" if self.vopt.frame == 1 else "Off" + ) + self.add_overlay(topleft, "[H]ide Menu", "") + if self._image_idx > 0: + fname = self._image_path % (self._image_idx - 1) + self.add_overlay(topleft, "Cap[t]ure frame", "Saved as %s" % fname) + else: + self.add_overlay(topleft, "Cap[t]ure frame", "") + self.add_overlay(topleft, "Toggle geomgroup visibility", "0-4") + + self.add_overlay(bottomleft, "FPS", "%d%s" % (1 / self._time_per_render, "")) + self.add_overlay( + bottomleft, "Solver iterations", str(self.data.solver_iter + 1) + ) + self.add_overlay( + bottomleft, "Step", str(round(self.data.time / self.model.opt.timestep)) + ) + self.add_overlay(bottomleft, "timestep", "%.5f" % self.model.opt.timestep) + + def render(self): + # mjv_updateScene, mjr_render, mjr_overlay + def update(): + # fill overlay items + self._create_overlay() + + render_start = time.time() + if self.window is None: + return + elif glfw.window_should_close(self.window): + glfw.destroy_window(self.window) + glfw.terminate() + self.viewport.width, self.viewport.height = glfw.get_framebuffer_size( + self.window + ) + with self._gui_lock: + # update scene + mujoco.mjv_updateScene( + self.model, + self.data, + self.vopt, + mujoco.MjvPerturb(), + self.cam, + mujoco.mjtCatBit.mjCAT_ALL.value, + self.scn, + ) + # marker items + for marker in self._markers: + self._add_marker_to_scene(marker) + # render + mujoco.mjr_render(self.viewport, self.scn, self.con) + # overlay items + if not self._hide_menu: + for gridpos, [t1, t2] in self._overlays.items(): + mujoco.mjr_overlay( + mujoco.mjtFontScale.mjFONTSCALE_150, + gridpos, + self.viewport, + t1, + t2, + self.con, + ) + glfw.swap_buffers(self.window) + glfw.poll_events() + self._time_per_render = 0.9 * self._time_per_render + 0.1 * ( + time.time() - render_start + ) + + # clear overlay + self._overlays.clear() + + if self._paused: + while self._paused: + update() + if self._advance_by_one_step: + self._advance_by_one_step = False + break + else: + self._loop_count += self.model.opt.timestep / ( + self._time_per_render * self._run_speed + ) + if self._render_every_frame: + self._loop_count = 1 + while self._loop_count > 0: + update() + self._loop_count -= 1 + + # clear markers + self._markers[:] = [] + + def close(self): + glfw.destroy_window(self.window) + glfw.terminate() diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/pusher.py b/MLPY/Lib/site-packages/gym/envs/mujoco/pusher.py new file mode 100644 index 0000000000000000000000000000000000000000..63f58f2ed598b017a80654a3adc5551775b8bea4 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/pusher.py @@ -0,0 +1,84 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class PusherEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(23,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "pusher.xml", 5, observation_space=observation_space, **kwargs + ) + + def step(self, a): + vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm") + vec_2 = self.get_body_com("object") - self.get_body_com("goal") + + reward_near = -np.linalg.norm(vec_1) + reward_dist = -np.linalg.norm(vec_2) + reward_ctrl = -np.square(a).sum() + reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near + + self.do_simulation(a, self.frame_skip) + if self.render_mode == "human": + self.render() + + ob = self._get_obs() + return ( + ob, + reward, + False, + False, + dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl), + ) + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = -1 + self.viewer.cam.distance = 4.0 + + def reset_model(self): + qpos = self.init_qpos + + self.goal_pos = np.asarray([0, 0]) + while True: + self.cylinder_pos = np.concatenate( + [ + self.np_random.uniform(low=-0.3, high=0, size=1), + self.np_random.uniform(low=-0.2, high=0.2, size=1), + ] + ) + if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17: + break + + qpos[-4:-2] = self.cylinder_pos + qpos[-2:] = self.goal_pos + qvel = self.init_qvel + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nv + ) + qvel[-4:] = 0 + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + return np.concatenate( + [ + self.sim.data.qpos.flat[:7], + self.sim.data.qvel.flat[:7], + self.get_body_com("tips_arm"), + self.get_body_com("object"), + self.get_body_com("goal"), + ] + ) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/pusher_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/pusher_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..07ecd165240c5f932de56fe572f21877e37815ac --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/pusher_v4.py @@ -0,0 +1,206 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + + +class PusherEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + "Pusher" is a multi-jointed robot arm which is very similar to that of a human. + The goal is to move a target cylinder (called *object*) to a goal position using the robot's end effector (called *fingertip*). + The robot consists of shoulder, elbow, forearm, and wrist joints. + + ### Action Space + The action space is a `Box(-2, 2, (7,), float32)`. An action `(a, b)` represents the torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|--------------------------------------------------------------------|-------------|-------------|----------------------------------|-------|--------------| + | 0 | Rotation of the panning the shoulder | -2 | 2 | r_shoulder_pan_joint | hinge | torque (N m) | + | 1 | Rotation of the shoulder lifting joint | -2 | 2 | r_shoulder_lift_joint | hinge | torque (N m) | + | 2 | Rotation of the shoulder rolling joint | -2 | 2 | r_upper_arm_roll_joint | hinge | torque (N m) | + | 3 | Rotation of hinge joint that flexed the elbow | -2 | 2 | r_elbow_flex_joint | hinge | torque (N m) | + | 4 | Rotation of hinge that rolls the forearm | -2 | 2 | r_forearm_roll_joint | hinge | torque (N m) | + | 5 | Rotation of flexing the wrist | -2 | 2 | r_wrist_flex_joint | hinge | torque (N m) | + | 6 | Rotation of rolling the wrist | -2 | 2 | r_wrist_roll_joint | hinge | torque (N m) | + + ### Observation Space + + Observations consist of + + - Angle of rotational joints on the pusher + - Angular velocities of rotational joints on the pusher + - The coordinates of the fingertip of the pusher + - The coordinates of the object to be moved + - The coordinates of the goal position + + The observation is a `ndarray` with shape `(23,)` where the elements correspond to the table below. + An analogy can be drawn to a human arm in order to help understand the state space, with the words flex and roll meaning the + same as human joints. + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | -------------------------------------------------------- | ---- | --- | -------------------------------- | -------- | ------------------------ | + | 0 | Rotation of the panning the shoulder | -Inf | Inf | r_shoulder_pan_joint | hinge | angle (rad) | + | 1 | Rotation of the shoulder lifting joint | -Inf | Inf | r_shoulder_lift_joint | hinge | angle (rad) | + | 2 | Rotation of the shoulder rolling joint | -Inf | Inf | r_upper_arm_roll_joint | hinge | angle (rad) | + | 3 | Rotation of hinge joint that flexed the elbow | -Inf | Inf | r_elbow_flex_joint | hinge | angle (rad) | + | 4 | Rotation of hinge that rolls the forearm | -Inf | Inf | r_forearm_roll_joint | hinge | angle (rad) | + | 5 | Rotation of flexing the wrist | -Inf | Inf | r_wrist_flex_joint | hinge | angle (rad) | + | 6 | Rotation of rolling the wrist | -Inf | Inf | r_wrist_roll_joint | hinge | angle (rad) | + | 7 | Rotational velocity of the panning the shoulder | -Inf | Inf | r_shoulder_pan_joint | hinge | angular velocity (rad/s) | + | 8 | Rotational velocity of the shoulder lifting joint | -Inf | Inf | r_shoulder_lift_joint | hinge | angular velocity (rad/s) | + | 9 | Rotational velocity of the shoulder rolling joint | -Inf | Inf | r_upper_arm_roll_joint | hinge | angular velocity (rad/s) | + | 10 | Rotational velocity of hinge joint that flexed the elbow | -Inf | Inf | r_elbow_flex_joint | hinge | angular velocity (rad/s) | + | 11 | Rotational velocity of hinge that rolls the forearm | -Inf | Inf | r_forearm_roll_joint | hinge | angular velocity (rad/s) | + | 12 | Rotational velocity of flexing the wrist | -Inf | Inf | r_wrist_flex_joint | hinge | angular velocity (rad/s) | + | 13 | Rotational velocity of rolling the wrist | -Inf | Inf | r_wrist_roll_joint | hinge | angular velocity (rad/s) | + | 14 | x-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) | + | 15 | y-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) | + | 16 | z-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) | + | 17 | x-coordinate of the object to be moved | -Inf | Inf | object (obj_slidex) | slide | position (m) | + | 18 | y-coordinate of the object to be moved | -Inf | Inf | object (obj_slidey) | slide | position (m) | + | 19 | z-coordinate of the object to be moved | -Inf | Inf | object | cylinder | position (m) | + | 20 | x-coordinate of the goal position of the object | -Inf | Inf | goal (goal_slidex) | slide | position (m) | + | 21 | y-coordinate of the goal position of the object | -Inf | Inf | goal (goal_slidey) | slide | position (m) | + | 22 | z-coordinate of the goal position of the object | -Inf | Inf | goal | sphere | position (m) | + + + ### Rewards + The reward consists of two parts: + - *reward_near *: This reward is a measure of how far the *fingertip* + of the pusher (the unattached end) is from the object, with a more negative + value assigned for when the pusher's *fingertip* is further away from the + target. It is calculated as the negative vector norm of (position of + the fingertip - position of target), or *-norm("fingertip" - "target")*. + - *reward_dist *: This reward is a measure of how far the object is from + the target goal position, with a more negative value assigned for object is + further away from the target. It is calculated as the negative vector norm of + (position of the object - position of goal), or *-norm("object" - "target")*. + - *reward_control*: A negative reward for penalising the pusher if + it takes actions that are too large. It is measured as the negative squared + Euclidean norm of the action, i.e. as *- sum(action2)*. + + The total reward returned is ***reward*** *=* *reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near* + + Unlike other environments, Pusher does not allow you to specify weights for the individual reward terms. + However, `info` does contain the keys *reward_dist* and *reward_ctrl*. Thus, if you'd like to weight the terms, + you should create a wrapper that computes the weighted reward from `info`. + + + ### Starting State + All pusher (not including object and goal) states start in + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0). A uniform noise in the range + [-0.005, 0.005] is added to the velocity attributes only. The velocities of + the object and goal are permanently set to 0. The object's x-position is selected uniformly + between [-0.3, 0] while the y-position is selected uniformly between [-0.2, 0.2], and this + process is repeated until the vector norm between the object's (x,y) position and origin is not greater + than 0.17. The goal always have the same position of (0.45, -0.05, -0.323). + + The default framerate is 5 with each frame lasting for 0.01, giving rise to a *dt = 5 * 0.01 = 0.05* + + ### Episode End + + The episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 100 timesteps. + 2. Termination: Any of the state space values is no longer finite. + + ### Arguments + + No additional arguments are currently supported (in v2 and lower), + but modifications can be made to the XML file in the assets folder + (or by changing the path to a modified XML file in another folder).. + + ``` + env = gym.make('Pusher-v4') + ``` + + There is no v3 for Pusher, unlike the robot environments where a v3 and + beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks (not including reacher, which has a max_time_steps of 50). Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 20, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(23,), dtype=np.float64) + MujocoEnv.__init__( + self, "pusher.xml", 5, observation_space=observation_space, **kwargs + ) + + def step(self, a): + vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm") + vec_2 = self.get_body_com("object") - self.get_body_com("goal") + + reward_near = -np.linalg.norm(vec_1) + reward_dist = -np.linalg.norm(vec_2) + reward_ctrl = -np.square(a).sum() + reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near + + self.do_simulation(a, self.frame_skip) + if self.render_mode == "human": + self.render() + + ob = self._get_obs() + return ( + ob, + reward, + False, + False, + dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl), + ) + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = -1 + self.viewer.cam.distance = 4.0 + + def reset_model(self): + qpos = self.init_qpos + + self.goal_pos = np.asarray([0, 0]) + while True: + self.cylinder_pos = np.concatenate( + [ + self.np_random.uniform(low=-0.3, high=0, size=1), + self.np_random.uniform(low=-0.2, high=0.2, size=1), + ] + ) + if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17: + break + + qpos[-4:-2] = self.cylinder_pos + qpos[-2:] = self.goal_pos + qvel = self.init_qvel + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nv + ) + qvel[-4:] = 0 + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + return np.concatenate( + [ + self.data.qpos.flat[:7], + self.data.qvel.flat[:7], + self.get_body_com("tips_arm"), + self.get_body_com("object"), + self.get_body_com("goal"), + ] + ) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/reacher.py b/MLPY/Lib/site-packages/gym/envs/mujoco/reacher.py new file mode 100644 index 0000000000000000000000000000000000000000..931dc50f8c682e96122c8fd2cbb7d8b8609a88bf --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/reacher.py @@ -0,0 +1,75 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class ReacherEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 50, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "reacher.xml", 2, observation_space=observation_space, **kwargs + ) + + def step(self, a): + vec = self.get_body_com("fingertip") - self.get_body_com("target") + reward_dist = -np.linalg.norm(vec) + reward_ctrl = -np.square(a).sum() + reward = reward_dist + reward_ctrl + + self.do_simulation(a, self.frame_skip) + if self.render_mode == "human": + self.render() + + ob = self._get_obs() + return ( + ob, + reward, + False, + False, + dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl), + ) + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 0 + + def reset_model(self): + qpos = ( + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + + self.init_qpos + ) + while True: + self.goal = self.np_random.uniform(low=-0.2, high=0.2, size=2) + if np.linalg.norm(self.goal) < 0.2: + break + qpos[-2:] = self.goal + qvel = self.init_qvel + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nv + ) + qvel[-2:] = 0 + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + theta = self.sim.data.qpos.flat[:2] + return np.concatenate( + [ + np.cos(theta), + np.sin(theta), + self.sim.data.qpos.flat[2:], + self.sim.data.qvel.flat[:2], + self.get_body_com("fingertip") - self.get_body_com("target"), + ] + ) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/reacher_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/reacher_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..4740652be592b2388e58c1acd75a9e5ff5324448 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/reacher_v4.py @@ -0,0 +1,187 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + + +class ReacherEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + "Reacher" is a two-jointed robot arm. The goal is to move the robot's end effector (called *fingertip*) close to a + target that is spawned at a random position. + + ### Action Space + The action space is a `Box(-1, 1, (2,), float32)`. An action `(a, b)` represents the torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|---------------------------------------------------------------------------------|-------------|-------------|--------------------------|-------|------| + | 0 | Torque applied at the first hinge (connecting the link to the point of fixture) | -1 | 1 | joint0 | hinge | torque (N m) | + | 1 | Torque applied at the second hinge (connecting the two links) | -1 | 1 | joint1 | hinge | torque (N m) | + + ### Observation Space + + Observations consist of + + - The cosine of the angles of the two arms + - The sine of the angles of the two arms + - The coordinates of the target + - The angular velocities of the arms + - The vector between the target and the reacher's fingertip (3 dimensional with the last element being 0) + + The observation is a `ndarray` with shape `(11,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ---------------------------------------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | cosine of the angle of the first arm | -Inf | Inf | cos(joint0) | hinge | unitless | + | 1 | cosine of the angle of the second arm | -Inf | Inf | cos(joint1) | hinge | unitless | + | 2 | sine of the angle of the first arm | -Inf | Inf | cos(joint0) | hinge | unitless | + | 3 | sine of the angle of the second arm | -Inf | Inf | cos(joint1) | hinge | unitless | + | 4 | x-coordinate of the target | -Inf | Inf | target_x | slide | position (m) | + | 5 | y-coordinate of the target | -Inf | Inf | target_y | slide | position (m) | + | 6 | angular velocity of the first arm | -Inf | Inf | joint0 | hinge | angular velocity (rad/s) | + | 7 | angular velocity of the second arm | -Inf | Inf | joint1 | hinge | angular velocity (rad/s) | + | 8 | x-value of position_fingertip - position_target | -Inf | Inf | NA | slide | position (m) | + | 9 | y-value of position_fingertip - position_target | -Inf | Inf | NA | slide | position (m) | + | 10 | z-value of position_fingertip - position_target (0 since reacher is 2d and z is same for both) | -Inf | Inf | NA | slide | position (m) | + + + Most Gym environments just return the positions and velocity of the + joints in the `.xml` file as the state of the environment. However, in + reacher the state is created by combining only certain elements of the + position and velocity, and performing some function transformations on them. + If one is to read the `.xml` for reacher then they will find 4 joints: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + |-----|-----------------------------|----------|----------|----------------------------------|-------|--------------------| + | 0 | angle of the first arm | -Inf | Inf | joint0 | hinge | angle (rad) | + | 1 | angle of the second arm | -Inf | Inf | joint1 | hinge | angle (rad) | + | 2 | x-coordinate of the target | -Inf | Inf | target_x | slide | position (m) | + | 3 | y-coordinate of the target | -Inf | Inf | target_y | slide | position (m) | + + + ### Rewards + The reward consists of two parts: + - *reward_distance*: This reward is a measure of how far the *fingertip* + of the reacher (the unattached end) is from the target, with a more negative + value assigned for when the reacher's *fingertip* is further away from the + target. It is calculated as the negative vector norm of (position of + the fingertip - position of target), or *-norm("fingertip" - "target")*. + - *reward_control*: A negative reward for penalising the walker if + it takes actions that are too large. It is measured as the negative squared + Euclidean norm of the action, i.e. as *- sum(action2)*. + + The total reward returned is ***reward*** *=* *reward_distance + reward_control* + + Unlike other environments, Reacher does not allow you to specify weights for the individual reward terms. + However, `info` does contain the keys *reward_dist* and *reward_ctrl*. Thus, if you'd like to weight the terms, + you should create a wrapper that computes the weighted reward from `info`. + + + ### Starting State + All observations start in state + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + with a noise added for stochasticity. A uniform noise in the range + [-0.1, 0.1] is added to the positional attributes, while the target position + is selected uniformly at random in a disk of radius 0.2 around the origin. + Independent, uniform noise in the + range of [-0.005, 0.005] is added to the velocities, and the last + element ("fingertip" - "target") is calculated at the end once everything + is set. The default setting has a framerate of 2 and a *dt = 2 * 0.01 = 0.02* + + ### Episode End + + The episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 50 timesteps (with a new random target popping up if the reacher's fingertip reaches it before 50 timesteps) + 2. Termination: Any of the state space values is no longer finite. + + ### Arguments + + No additional arguments are currently supported (in v2 and lower), + but modifications can be made to the XML file in the assets folder + (or by changing the path to a modified XML file in another folder).. + + ``` + env = gym.make('Reacher-v4') + ``` + + There is no v3 for Reacher, unlike the robot environments where a v3 and + beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks (not including reacher, which has a max_time_steps of 50). Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 50, + } + + def __init__(self, **kwargs): + utils.EzPickle.__init__(self, **kwargs) + observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64) + MujocoEnv.__init__( + self, "reacher.xml", 2, observation_space=observation_space, **kwargs + ) + + def step(self, a): + vec = self.get_body_com("fingertip") - self.get_body_com("target") + reward_dist = -np.linalg.norm(vec) + reward_ctrl = -np.square(a).sum() + reward = reward_dist + reward_ctrl + + self.do_simulation(a, self.frame_skip) + if self.render_mode == "human": + self.render() + + ob = self._get_obs() + return ( + ob, + reward, + False, + False, + dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl), + ) + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 0 + + def reset_model(self): + qpos = ( + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + + self.init_qpos + ) + while True: + self.goal = self.np_random.uniform(low=-0.2, high=0.2, size=2) + if np.linalg.norm(self.goal) < 0.2: + break + qpos[-2:] = self.goal + qvel = self.init_qvel + self.np_random.uniform( + low=-0.005, high=0.005, size=self.model.nv + ) + qvel[-2:] = 0 + self.set_state(qpos, qvel) + return self._get_obs() + + def _get_obs(self): + theta = self.data.qpos.flat[:2] + return np.concatenate( + [ + np.cos(theta), + np.sin(theta), + self.data.qpos.flat[2:], + self.data.qvel.flat[:2], + self.get_body_com("fingertip") - self.get_body_com("target"), + ] + ) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer.py b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer.py new file mode 100644 index 0000000000000000000000000000000000000000..612a392f40dedf82e7d04a7af5c86003df333b12 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer.py @@ -0,0 +1,59 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class SwimmerEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 25, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "swimmer.xml", 4, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, a): + ctrl_cost_coeff = 0.0001 + xposbefore = self.sim.data.qpos[0] + self.do_simulation(a, self.frame_skip) + xposafter = self.sim.data.qpos[0] + + reward_fwd = (xposafter - xposbefore) / self.dt + reward_ctrl = -ctrl_cost_coeff * np.square(a).sum() + reward = reward_fwd + reward_ctrl + ob = self._get_obs() + + if self.render_mode == "human": + self.render() + + return ( + ob, + reward, + False, + False, + dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl), + ) + + def _get_obs(self): + qpos = self.sim.data.qpos + qvel = self.sim.data.qvel + return np.concatenate([qpos.flat[2:], qvel.flat]) + + def reset_model(self): + self.set_state( + self.init_qpos + + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq), + self.init_qvel + + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nv), + ) + return self._get_obs() diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..68a0d4fc4005bdd7862c70a49596b77e8e849255 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v3.py @@ -0,0 +1,128 @@ +__credits__ = ["Rushiv Arora"] + +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = {} + + +class SwimmerEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 25, + } + + def __init__( + self, + xml_file="swimmer.xml", + forward_reward_weight=1.0, + ctrl_cost_weight=1e-4, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + forward_reward_weight, + ctrl_cost_weight, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(10,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 4, observation_space=observation_space, **kwargs + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + xy_position_before = self.sim.data.qpos[0:2].copy() + self.do_simulation(action, self.frame_skip) + xy_position_after = self.sim.data.qpos[0:2].copy() + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + forward_reward = self._forward_reward_weight * x_velocity + ctrl_cost = self.control_cost(action) + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + info = { + "reward_fwd": forward_reward, + "reward_ctrl": -ctrl_cost, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + + if self.render_mode == "human": + self.render() + + return observation, reward, False, False, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + observation = np.concatenate([position, velocity]).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..ef611af921e2d50e81591b76c62eb255c5c6b8d4 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/swimmer_v4.py @@ -0,0 +1,239 @@ +__credits__ = ["Rushiv Arora"] + +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = {} + + +class SwimmerEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment corresponds to the Swimmer environment described in Rémi Coulom's PhD thesis + ["Reinforcement Learning Using Neural Networks, with Applications to Motor Control"](https://tel.archives-ouvertes.fr/tel-00003985/document). + The environment aims to increase the number of independent state and control + variables as compared to the classic control environments. The swimmers + consist of three or more segments ('***links***') and one less articulation + joints ('***rotors***') - one rotor joint connecting exactly two links to + form a linear chain. The swimmer is suspended in a two dimensional pool and + always starts in the same position (subject to some deviation drawn from an + uniform distribution), and the goal is to move as fast as possible towards + the right by applying torque on the rotors and using the fluids friction. + + ### Notes + + The problem parameters are: + Problem parameters: + * *n*: number of body parts + * *mi*: mass of part *i* (*i* ∈ {1...n}) + * *li*: length of part *i* (*i* ∈ {1...n}) + * *k*: viscous-friction coefficient + + While the default environment has *n* = 3, *li* = 0.1, + and *k* = 0.1. It is possible to pass a custom MuJoCo XML file during construction to increase the + number of links, or to tweak any of the parameters. + + ### Action Space + The action space is a `Box(-1, 1, (2,), float32)`. An action represents the torques applied between *links* + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|------------------------------------|-------------|-------------|----------------------------------|-------|--------------| + | 0 | Torque applied on the first rotor | -1 | 1 | motor1_rot | hinge | torque (N m) | + | 1 | Torque applied on the second rotor | -1 | 1 | motor2_rot | hinge | torque (N m) | + + ### Observation Space + + By default, observations consists of: + * θi: angle of part *i* with respect to the *x* axis + * θi': its derivative with respect to time (angular velocity) + + In the default case, observations do not include the x- and y-coordinates of the front tip. These may + be included by passing `exclude_current_positions_from_observation=False` during construction. + Then, the observation space will have 10 dimensions where the first two dimensions + represent the x- and y-coordinates of the front tip. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x- and y-coordinates + will be returned in `info` with keys `"x_position"` and `"y_position"`, respectively. + + By default, the observation is a `ndarray` with shape `(8,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | angle of the front tip | -Inf | Inf | free_body_rot | hinge | angle (rad) | + | 1 | angle of the first rotor | -Inf | Inf | motor1_rot | hinge | angle (rad) | + | 2 | angle of the second rotor | -Inf | Inf | motor2_rot | hinge | angle (rad) | + | 3 | velocity of the tip along the x-axis | -Inf | Inf | slider1 | slide | velocity (m/s) | + | 4 | velocity of the tip along the y-axis | -Inf | Inf | slider2 | slide | velocity (m/s) | + | 5 | angular velocity of front tip | -Inf | Inf | free_body_rot | hinge | angular velocity (rad/s) | + | 6 | angular velocity of first rotor | -Inf | Inf | motor1_rot | hinge | angular velocity (rad/s) | + | 7 | angular velocity of second rotor | -Inf | Inf | motor2_rot | hinge | angular velocity (rad/s) | + + ### Rewards + The reward consists of two parts: + - *forward_reward*: A reward of moving forward which is measured + as *`forward_reward_weight` * (x-coordinate before action - x-coordinate after action)/dt*. *dt* is + the time between actions and is dependent on the frame_skip parameter + (default is 4), where the frametime is 0.01 - making the + default *dt = 4 * 0.01 = 0.04*. This reward would be positive if the swimmer + swims right as desired. + - *ctrl_cost*: A cost for penalising the swimmer if it takes + actions that are too large. It is measured as *`ctrl_cost_weight` * + sum(action2)* where *`ctrl_cost_weight`* is a parameter set for the + control and has a default value of 1e-4 + + The total reward returned is ***reward*** *=* *forward_reward - ctrl_cost* and `info` will also contain the individual reward terms + + ### Starting State + All observations start in state (0,0,0,0,0,0,0,0) with a Uniform noise in the range of [-`reset_noise_scale`, `reset_noise_scale`] is added to the initial state for stochasticity. + + ### Episode End + The episode truncates when the episode length is greater than 1000. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + gym.make('Swimmer-v4') + ``` + + v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('Swimmer-v4', ctrl_cost_weight=0.1, ....) + ``` + + | Parameter | Type | Default | Description | + | -------------------------------------------- | --------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `xml_file` | **str** | `"swimmer.xml"` | Path to a MuJoCo model | + | `forward_reward_weight` | **float** | `1.0` | Weight for _forward_reward_ term (see section on reward) | + | `ctrl_cost_weight` | **float** | `1e-4` | Weight for _ctrl_cost_ term (see section on reward) | + | `reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x- and y-coordinates from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 25, + } + + def __init__( + self, + forward_reward_weight=1.0, + ctrl_cost_weight=1e-4, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + forward_reward_weight, + ctrl_cost_weight, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(10,), dtype=np.float64 + ) + MujocoEnv.__init__( + self, "swimmer.xml", 4, observation_space=observation_space, **kwargs + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + xy_position_before = self.data.qpos[0:2].copy() + self.do_simulation(action, self.frame_skip) + xy_position_after = self.data.qpos[0:2].copy() + + xy_velocity = (xy_position_after - xy_position_before) / self.dt + x_velocity, y_velocity = xy_velocity + + forward_reward = self._forward_reward_weight * x_velocity + + ctrl_cost = self.control_cost(action) + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + info = { + "reward_fwd": forward_reward, + "reward_ctrl": -ctrl_cost, + "x_position": xy_position_after[0], + "y_position": xy_position_after[1], + "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + "forward_reward": forward_reward, + } + + if self.render_mode == "human": + self.render() + + return observation, reward, False, False, info + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = self.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[2:] + + observation = np.concatenate([position, velocity]).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d.py b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5506c497f3f5e6fc246db12e28ddd3a86f054a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d.py @@ -0,0 +1,61 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + + +class Walker2dEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__(self, **kwargs): + observation_space = Box(low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64) + MuJocoPyEnv.__init__( + self, "walker2d.xml", 4, observation_space=observation_space, **kwargs + ) + utils.EzPickle.__init__(self, **kwargs) + + def step(self, a): + posbefore = self.sim.data.qpos[0] + self.do_simulation(a, self.frame_skip) + posafter, height, ang = self.sim.data.qpos[0:3] + + alive_bonus = 1.0 + reward = (posafter - posbefore) / self.dt + reward += alive_bonus + reward -= 1e-3 * np.square(a).sum() + terminated = not (height > 0.8 and height < 2.0 and ang > -1.0 and ang < 1.0) + ob = self._get_obs() + + if self.render_mode == "human": + self.render() + + return ob, reward, terminated, False, {} + + def _get_obs(self): + qpos = self.sim.data.qpos + qvel = self.sim.data.qvel + return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel() + + def reset_model(self): + self.set_state( + self.init_qpos + + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nq), + self.init_qvel + + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nv), + ) + return self._get_obs() + + def viewer_setup(self): + assert self.viewer is not None + self.viewer.cam.trackbodyid = 2 + self.viewer.cam.distance = self.model.stat.extent * 0.5 + self.viewer.cam.lookat[2] = 1.15 + self.viewer.cam.elevation = -20 diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v3.py b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..cfbd9933af5c7a6a2d84c6d11041c078d4a1a060 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v3.py @@ -0,0 +1,167 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MuJocoPyEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 2, + "distance": 4.0, + "lookat": np.array((0.0, 0.0, 1.15)), + "elevation": -20.0, +} + + +class Walker2dEnv(MuJocoPyEnv, utils.EzPickle): + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__( + self, + xml_file="walker2d.xml", + forward_reward_weight=1.0, + ctrl_cost_weight=1e-3, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_z_range=(0.8, 2.0), + healthy_angle_range=(-1.0, 1.0), + reset_noise_scale=5e-3, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + xml_file, + forward_reward_weight, + ctrl_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + healthy_angle_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + + self._healthy_z_range = healthy_z_range + self._healthy_angle_range = healthy_angle_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64 + ) + + MuJocoPyEnv.__init__( + self, xml_file, 4, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def is_healthy(self): + z, angle = self.sim.data.qpos[1:3] + + min_z, max_z = self._healthy_z_range + min_angle, max_angle = self._healthy_angle_range + + healthy_z = min_z < z < max_z + healthy_angle = min_angle < angle < max_angle + is_healthy = healthy_z and healthy_angle + + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10) + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + + observation = self._get_obs() + reward = rewards - costs + terminated = self.terminated + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + } + + if self.render_mode == "human": + self.render() + + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v4.py b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..47e562817660621ebda79d9429a9b51b93806ba5 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/mujoco/walker2d_v4.py @@ -0,0 +1,296 @@ +import numpy as np + +from gym import utils +from gym.envs.mujoco import MujocoEnv +from gym.spaces import Box + +DEFAULT_CAMERA_CONFIG = { + "trackbodyid": 2, + "distance": 4.0, + "lookat": np.array((0.0, 0.0, 1.15)), + "elevation": -20.0, +} + + +class Walker2dEnv(MujocoEnv, utils.EzPickle): + """ + ### Description + + This environment builds on the hopper environment based on the work done by Erez, Tassa, and Todorov + in ["Infinite Horizon Model Predictive Control for Nonlinear Periodic Tasks"](http://www.roboticsproceedings.org/rss07/p10.pdf) + by adding another set of legs making it possible for the robot to walker forward instead of + hop. Like other Mujoco environments, this environment aims to increase the number of independent state + and control variables as compared to the classic control environments. The walker is a + two-dimensional two-legged figure that consist of four main body parts - a single torso at the top + (with the two legs splitting after the torso), two thighs in the middle below the torso, two legs + in the bottom below the thighs, and two feet attached to the legs on which the entire body rests. + The goal is to make coordinate both sets of feet, legs, and thighs to move in the forward (right) + direction by applying torques on the six hinges connecting the six body parts. + + ### Action Space + The action space is a `Box(-1, 1, (6,), float32)`. An action represents the torques applied at the hinge joints. + + | Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit | + |-----|----------------------------------------|-------------|-------------|----------------------------------|-------|--------------| + | 0 | Torque applied on the thigh rotor | -1 | 1 | thigh_joint | hinge | torque (N m) | + | 1 | Torque applied on the leg rotor | -1 | 1 | leg_joint | hinge | torque (N m) | + | 2 | Torque applied on the foot rotor | -1 | 1 | foot_joint | hinge | torque (N m) | + | 3 | Torque applied on the left thigh rotor | -1 | 1 | thigh_left_joint | hinge | torque (N m) | + | 4 | Torque applied on the left leg rotor | -1 | 1 | leg_left_joint | hinge | torque (N m) | + | 5 | Torque applied on the left foot rotor | -1 | 1 | foot_left_joint | hinge | torque (N m) | + + ### Observation Space + + Observations consist of positional values of different body parts of the walker, + followed by the velocities of those individual parts (their derivatives) with all the positions ordered before all the velocities. + + By default, observations do not include the x-coordinate of the top. It may + be included by passing `exclude_current_positions_from_observation=False` during construction. + In that case, the observation space will have 18 dimensions where the first dimension + represent the x-coordinates of the top of the walker. + Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x-coordinate + of the top will be returned in `info` with key `"x_position"`. + + By default, observation is a `ndarray` with shape `(17,)` where the elements correspond to the following: + + | Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit | + | --- | ------------------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ | + | 0 | z-coordinate of the top (height of hopper) | -Inf | Inf | rootz (torso) | slide | position (m) | + | 1 | angle of the top | -Inf | Inf | rooty (torso) | hinge | angle (rad) | + | 2 | angle of the thigh joint | -Inf | Inf | thigh_joint | hinge | angle (rad) | + | 3 | angle of the leg joint | -Inf | Inf | leg_joint | hinge | angle (rad) | + | 4 | angle of the foot joint | -Inf | Inf | foot_joint | hinge | angle (rad) | + | 5 | angle of the left thigh joint | -Inf | Inf | thigh_left_joint | hinge | angle (rad) | + | 6 | angle of the left leg joint | -Inf | Inf | leg_left_joint | hinge | angle (rad) | + | 7 | angle of the left foot joint | -Inf | Inf | foot_left_joint | hinge | angle (rad) | + | 8 | velocity of the x-coordinate of the top | -Inf | Inf | rootx | slide | velocity (m/s) | + | 9 | velocity of the z-coordinate (height) of the top | -Inf | Inf | rootz | slide | velocity (m/s) | + | 10 | angular velocity of the angle of the top | -Inf | Inf | rooty | hinge | angular velocity (rad/s) | + | 11 | angular velocity of the thigh hinge | -Inf | Inf | thigh_joint | hinge | angular velocity (rad/s) | + | 12 | angular velocity of the leg hinge | -Inf | Inf | leg_joint | hinge | angular velocity (rad/s) | + | 13 | angular velocity of the foot hinge | -Inf | Inf | foot_joint | hinge | angular velocity (rad/s) | + | 14 | angular velocity of the thigh hinge | -Inf | Inf | thigh_left_joint | hinge | angular velocity (rad/s) | + | 15 | angular velocity of the leg hinge | -Inf | Inf | leg_left_joint | hinge | angular velocity (rad/s) | + | 16 | angular velocity of the foot hinge | -Inf | Inf | foot_left_joint | hinge | angular velocity (rad/s) | + ### Rewards + The reward consists of three parts: + - *healthy_reward*: Every timestep that the walker is alive, it receives a fixed reward of value `healthy_reward`, + - *forward_reward*: A reward of walking forward which is measured as + *`forward_reward_weight` * (x-coordinate before action - x-coordinate after action)/dt*. + *dt* is the time between actions and is dependeent on the frame_skip parameter + (default is 4), where the frametime is 0.002 - making the default + *dt = 4 * 0.002 = 0.008*. This reward would be positive if the walker walks forward (right) desired. + - *ctrl_cost*: A cost for penalising the walker if it + takes actions that are too large. It is measured as + *`ctrl_cost_weight` * sum(action2)* where *`ctrl_cost_weight`* is + a parameter set for the control and has a default value of 0.001 + + The total reward returned is ***reward*** *=* *healthy_reward bonus + forward_reward - ctrl_cost* and `info` will also contain the individual reward terms + + ### Starting State + All observations start in state + (0.0, 1.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + with a uniform noise in the range of [-`reset_noise_scale`, `reset_noise_scale`] added to the values for stochasticity. + + ### Episode End + The walker is said to be unhealthy if any of the following happens: + + 1. Any of the state space values is no longer finite + 2. The height of the walker is ***not*** in the closed interval specified by `healthy_z_range` + 3. The absolute value of the angle (`observation[1]` if `exclude_current_positions_from_observation=False`, else `observation[2]`) is ***not*** in the closed interval specified by `healthy_angle_range` + + If `terminate_when_unhealthy=True` is passed during construction (which is the default), + the episode ends when any of the following happens: + + 1. Truncation: The episode duration reaches a 1000 timesteps + 2. Termination: The walker is unhealthy + + If `terminate_when_unhealthy=False` is passed, the episode is ended only when 1000 timesteps are exceeded. + + ### Arguments + + No additional arguments are currently supported in v2 and lower. + + ``` + env = gym.make('Walker2d-v4') + ``` + + v3 and beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. + + ``` + env = gym.make('Walker2d-v4', ctrl_cost_weight=0.1, ....) + ``` + + | Parameter | Type | Default | Description | + | -------------------------------------------- | --------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `xml_file` | **str** | `"walker2d.xml"` | Path to a MuJoCo model | + | `forward_reward_weight` | **float** | `1.0` | Weight for _forward_reward_ term (see section on reward) | + | `ctrl_cost_weight` | **float** | `1e-3` | Weight for _ctr_cost_ term (see section on reward) | + | `healthy_reward` | **float** | `1.0` | Constant reward given if the ant is "healthy" after timestep | + | `terminate_when_unhealthy` | **bool** | `True` | If true, issue a done signal if the z-coordinate of the walker is no longer healthy | + | `healthy_z_range` | **tuple** | `(0.8, 2)` | The z-coordinate of the top of the walker must be in this range to be considered healthy | + | `healthy_angle_range` | **tuple** | `(-1, 1)` | The angle must be in this range to be considered healthy | + | `reset_noise_scale` | **float** | `5e-3` | Scale of random perturbations of initial position and velocity (see section on Starting State) | + | `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies | + + + ### Version History + + * v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3 + * v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen) + * v2: All continuous control environments now use mujoco_py >= 1.50 + * v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments. + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": [ + "human", + "rgb_array", + "depth_array", + ], + "render_fps": 125, + } + + def __init__( + self, + forward_reward_weight=1.0, + ctrl_cost_weight=1e-3, + healthy_reward=1.0, + terminate_when_unhealthy=True, + healthy_z_range=(0.8, 2.0), + healthy_angle_range=(-1.0, 1.0), + reset_noise_scale=5e-3, + exclude_current_positions_from_observation=True, + **kwargs + ): + utils.EzPickle.__init__( + self, + forward_reward_weight, + ctrl_cost_weight, + healthy_reward, + terminate_when_unhealthy, + healthy_z_range, + healthy_angle_range, + reset_noise_scale, + exclude_current_positions_from_observation, + **kwargs + ) + + self._forward_reward_weight = forward_reward_weight + self._ctrl_cost_weight = ctrl_cost_weight + + self._healthy_reward = healthy_reward + self._terminate_when_unhealthy = terminate_when_unhealthy + + self._healthy_z_range = healthy_z_range + self._healthy_angle_range = healthy_angle_range + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation + ) + + if exclude_current_positions_from_observation: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64 + ) + else: + observation_space = Box( + low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64 + ) + + MujocoEnv.__init__( + self, "walker2d.xml", 4, observation_space=observation_space, **kwargs + ) + + @property + def healthy_reward(self): + return ( + float(self.is_healthy or self._terminate_when_unhealthy) + * self._healthy_reward + ) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + @property + def is_healthy(self): + z, angle = self.data.qpos[1:3] + + min_z, max_z = self._healthy_z_range + min_angle, max_angle = self._healthy_angle_range + + healthy_z = min_z < z < max_z + healthy_angle = min_angle < angle < max_angle + is_healthy = healthy_z and healthy_angle + + return is_healthy + + @property + def terminated(self): + terminated = not self.is_healthy if self._terminate_when_unhealthy else False + return terminated + + def _get_obs(self): + position = self.data.qpos.flat.copy() + velocity = np.clip(self.data.qvel.flat.copy(), -10, 10) + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def step(self, action): + x_position_before = self.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.data.qpos[0] + x_velocity = (x_position_after - x_position_before) / self.dt + + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + + rewards = forward_reward + healthy_reward + costs = ctrl_cost + + observation = self._get_obs() + reward = rewards - costs + terminated = self.terminated + info = { + "x_position": x_position_after, + "x_velocity": x_velocity, + } + + if self.render_mode == "human": + self.render() + + return observation, reward, terminated, False, info + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq + ) + qvel = self.init_qvel + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nv + ) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + assert self.viewer is not None + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) diff --git a/MLPY/Lib/site-packages/gym/envs/registration.py b/MLPY/Lib/site-packages/gym/envs/registration.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0237eac9137b0775c784b04e75841c7f8966f1 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/registration.py @@ -0,0 +1,703 @@ +import contextlib +import copy +import difflib +import importlib +import importlib.util +import re +import sys +import warnings +from dataclasses import dataclass, field +from typing import ( + Callable, + Dict, + List, + Optional, + Sequence, + SupportsFloat, + Tuple, + Union, + overload, +) + +import numpy as np + +from gym.wrappers import ( + AutoResetWrapper, + HumanRendering, + OrderEnforcing, + RenderCollection, + TimeLimit, +) +from gym.wrappers.compatibility import EnvCompatibility +from gym.wrappers.env_checker import PassiveEnvChecker + +if sys.version_info < (3, 10): + import importlib_metadata as metadata # type: ignore +else: + import importlib.metadata as metadata + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +from gym import Env, error, logger + +ENV_ID_RE = re.compile( + r"^(?:(?P[\w:-]+)\/)?(?:(?P[\w:.-]+?))(?:-v(?P\d+))?$" +) + + +def load(name: str) -> callable: + """Loads an environment with name and returns an environment creation function + + Args: + name: The environment name + + Returns: + Calls the environment constructor + """ + mod_name, attr_name = name.split(":") + mod = importlib.import_module(mod_name) + fn = getattr(mod, attr_name) + return fn + + +def parse_env_id(id: str) -> Tuple[Optional[str], str, Optional[int]]: + """Parse environment ID string format. + + This format is true today, but it's *not* an official spec. + [namespace/](env-name)-v(version) env-name is group 1, version is group 2 + + 2016-10-31: We're experimentally expanding the environment ID format + to include an optional namespace. + + Args: + id: The environment id to parse + + Returns: + A tuple of environment namespace, environment name and version number + + Raises: + Error: If the environment id does not a valid environment regex + """ + match = ENV_ID_RE.fullmatch(id) + if not match: + raise error.Error( + f"Malformed environment ID: {id}." + f"(Currently all IDs must be of the form [namespace/](env-name)-v(version). (namespace is optional))" + ) + namespace, name, version = match.group("namespace", "name", "version") + if version is not None: + version = int(version) + + return namespace, name, version + + +def get_env_id(ns: Optional[str], name: str, version: Optional[int]) -> str: + """Get the full env ID given a name and (optional) version and namespace. Inverse of :meth:`parse_env_id`. + + Args: + ns: The environment namespace + name: The environment name + version: The environment version + + Returns: + The environment id + """ + + full_name = name + if version is not None: + full_name += f"-v{version}" + if ns is not None: + full_name = ns + "/" + full_name + return full_name + + +@dataclass +class EnvSpec: + """A specification for creating environments with `gym.make`. + + * id: The string used to create the environment with `gym.make` + * entry_point: The location of the environment to create from + * reward_threshold: The reward threshold for completing the environment. + * nondeterministic: If the observation of an environment cannot be repeated with the same initial state, random number generator state and actions. + * max_episode_steps: The max number of steps that the environment can take before truncation + * order_enforce: If to enforce the order of `reset` before `step` and `render` functions + * autoreset: If to automatically reset the environment on episode end + * disable_env_checker: If to disable the environment checker wrapper in `gym.make`, by default False (runs the environment checker) + * kwargs: Additional keyword arguments passed to the environments through `gym.make` + """ + + id: str + entry_point: Union[Callable, str] + + # Environment attributes + reward_threshold: Optional[float] = field(default=None) + nondeterministic: bool = field(default=False) + + # Wrappers + max_episode_steps: Optional[int] = field(default=None) + order_enforce: bool = field(default=True) + autoreset: bool = field(default=False) + disable_env_checker: bool = field(default=False) + apply_api_compatibility: bool = field(default=False) + + # Environment arguments + kwargs: dict = field(default_factory=dict) + + # post-init attributes + namespace: Optional[str] = field(init=False) + name: str = field(init=False) + version: Optional[int] = field(init=False) + + def __post_init__(self): + # Initialize namespace, name, version + self.namespace, self.name, self.version = parse_env_id(self.id) + + def make(self, **kwargs) -> Env: + # For compatibility purposes + return make(self, **kwargs) + + +def _check_namespace_exists(ns: Optional[str]): + """Check if a namespace exists. If it doesn't, print a helpful error message.""" + if ns is None: + return + namespaces = { + spec_.namespace for spec_ in registry.values() if spec_.namespace is not None + } + if ns in namespaces: + return + + suggestion = ( + difflib.get_close_matches(ns, namespaces, n=1) if len(namespaces) > 0 else None + ) + suggestion_msg = ( + f"Did you mean: `{suggestion[0]}`?" + if suggestion + else f"Have you installed the proper package for {ns}?" + ) + + raise error.NamespaceNotFound(f"Namespace {ns} not found. {suggestion_msg}") + + +def _check_name_exists(ns: Optional[str], name: str): + """Check if an env exists in a namespace. If it doesn't, print a helpful error message.""" + _check_namespace_exists(ns) + names = {spec_.name for spec_ in registry.values() if spec_.namespace == ns} + + if name in names: + return + + suggestion = difflib.get_close_matches(name, names, n=1) + namespace_msg = f" in namespace {ns}" if ns else "" + suggestion_msg = f"Did you mean: `{suggestion[0]}`?" if suggestion else "" + + raise error.NameNotFound( + f"Environment {name} doesn't exist{namespace_msg}. {suggestion_msg}" + ) + + +def _check_version_exists(ns: Optional[str], name: str, version: Optional[int]): + """Check if an env version exists in a namespace. If it doesn't, print a helpful error message. + This is a complete test whether an environment identifier is valid, and will provide the best available hints. + + Args: + ns: The environment namespace + name: The environment space + version: The environment version + + Raises: + DeprecatedEnv: The environment doesn't exist but a default version does + VersionNotFound: The ``version`` used doesn't exist + DeprecatedEnv: Environment version is deprecated + """ + if get_env_id(ns, name, version) in registry: + return + + _check_name_exists(ns, name) + if version is None: + return + + message = f"Environment version `v{version}` for environment `{get_env_id(ns, name, None)}` doesn't exist." + + env_specs = [ + spec_ + for spec_ in registry.values() + if spec_.namespace == ns and spec_.name == name + ] + env_specs = sorted(env_specs, key=lambda spec_: int(spec_.version or -1)) + + default_spec = [spec_ for spec_ in env_specs if spec_.version is None] + + if default_spec: + message += f" It provides the default version {default_spec[0].id}`." + if len(env_specs) == 1: + raise error.DeprecatedEnv(message) + + # Process possible versioned environments + + versioned_specs = [spec_ for spec_ in env_specs if spec_.version is not None] + + latest_spec = max(versioned_specs, key=lambda spec: spec.version, default=None) # type: ignore + if latest_spec is not None and version > latest_spec.version: + version_list_msg = ", ".join(f"`v{spec_.version}`" for spec_ in env_specs) + message += f" It provides versioned environments: [ {version_list_msg} ]." + + raise error.VersionNotFound(message) + + if latest_spec is not None and version < latest_spec.version: + raise error.DeprecatedEnv( + f"Environment version v{version} for `{get_env_id(ns, name, None)}` is deprecated. " + f"Please use `{latest_spec.id}` instead." + ) + + +def find_highest_version(ns: Optional[str], name: str) -> Optional[int]: + version: List[int] = [ + spec_.version + for spec_ in registry.values() + if spec_.namespace == ns and spec_.name == name and spec_.version is not None + ] + return max(version, default=None) + + +def load_env_plugins(entry_point: str = "gym.envs") -> None: + # Load third-party environments + for plugin in metadata.entry_points(group=entry_point): + # Python 3.8 doesn't support plugin.module, plugin.attr + # So we'll have to try and parse this ourselves + module, attr = None, None + try: + module, attr = plugin.module, plugin.attr # type: ignore ## error: Cannot access member "attr" for type "EntryPoint" + except AttributeError: + if ":" in plugin.value: + module, attr = plugin.value.split(":", maxsplit=1) + else: + module, attr = plugin.value, None + except Exception as e: + warnings.warn( + f"While trying to load plugin `{plugin}` from {entry_point}, an exception occurred: {e}" + ) + module, attr = None, None + finally: + if attr is None: + raise error.Error( + f"Gym environment plugin `{module}` must specify a function to execute, not a root module" + ) + + context = namespace(plugin.name) + if plugin.name.startswith("__") and plugin.name.endswith("__"): + # `__internal__` is an artifact of the plugin system when + # the root namespace had an allow-list. The allow-list is now + # removed and plugins can register environments in the root + # namespace with the `__root__` magic key. + if plugin.name == "__root__" or plugin.name == "__internal__": + context = contextlib.nullcontext() + else: + logger.warn( + f"The environment namespace magic key `{plugin.name}` is unsupported. " + "To register an environment at the root namespace you should specify the `__root__` namespace." + ) + + with context: + fn = plugin.load() + try: + fn() + except Exception as e: + logger.warn(str(e)) + + +# fmt: off +@overload +def make(id: str, **kwargs) -> Env: ... +@overload +def make(id: EnvSpec, **kwargs) -> Env: ... + + +# Classic control +# ---------------------------------------- +@overload +def make(id: Literal["CartPole-v0", "CartPole-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["MountainCar-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["MountainCarContinuous-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ... +@overload +def make(id: Literal["Pendulum-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ... +@overload +def make(id: Literal["Acrobot-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... + + +# Box2d +# ---------------------------------------- +@overload +def make(id: Literal["LunarLander-v2", "LunarLanderContinuous-v2"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["BipedalWalker-v3", "BipedalWalkerHardcore-v3"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ... +@overload +def make(id: Literal["CarRacing-v2"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ... + + +# Toy Text +# ---------------------------------------- +@overload +def make(id: Literal["Blackjack-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["FrozenLake-v1", "FrozenLake8x8-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["CliffWalking-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... +@overload +def make(id: Literal["Taxi-v3"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ... + + +# Mujoco +# ---------------------------------------- +@overload +def make(id: Literal[ + "Reacher-v2", "Reacher-v4", + "Pusher-v2", "Pusher-v4", + "InvertedPendulum-v2", "InvertedPendulum-v4", + "InvertedDoublePendulum-v2", "InvertedDoublePendulum-v4", + "HalfCheetah-v2", "HalfCheetah-v3", "HalfCheetah-v4", + "Hopper-v2", "Hopper-v3", "Hopper-v4", + "Swimmer-v2", "Swimmer-v3", "Swimmer-v4", + "Walker2d-v2", "Walker2d-v3", "Walker2d-v4", + "Ant-v2", "Ant-v3", "Ant-v4", + "HumanoidStandup-v2", "HumanoidStandup-v4", + "Humanoid-v2", "Humanoid-v3", "Humanoid-v4", +], **kwargs) -> Env[np.ndarray, np.ndarray]: ... +# fmt: on + + +# Global registry of environments. Meant to be accessed through `register` and `make` +registry: Dict[str, EnvSpec] = {} +current_namespace: Optional[str] = None + + +def _check_spec_register(spec: EnvSpec): + """Checks whether the spec is valid to be registered. Helper function for `register`.""" + global registry + latest_versioned_spec = max( + ( + spec_ + for spec_ in registry.values() + if spec_.namespace == spec.namespace + and spec_.name == spec.name + and spec_.version is not None + ), + key=lambda spec_: int(spec_.version), # type: ignore + default=None, + ) + + unversioned_spec = next( + ( + spec_ + for spec_ in registry.values() + if spec_.namespace == spec.namespace + and spec_.name == spec.name + and spec_.version is None + ), + None, + ) + + if unversioned_spec is not None and spec.version is not None: + raise error.RegistrationError( + "Can't register the versioned environment " + f"`{spec.id}` when the unversioned environment " + f"`{unversioned_spec.id}` of the same name already exists." + ) + elif latest_versioned_spec is not None and spec.version is None: + raise error.RegistrationError( + "Can't register the unversioned environment " + f"`{spec.id}` when the versioned environment " + f"`{latest_versioned_spec.id}` of the same name " + f"already exists. Note: the default behavior is " + f"that `gym.make` with the unversioned environment " + f"will return the latest versioned environment" + ) + + +# Public API + + +@contextlib.contextmanager +def namespace(ns: str): + global current_namespace + old_namespace = current_namespace + current_namespace = ns + yield + current_namespace = old_namespace + + +def register( + id: str, + entry_point: Union[Callable, str], + reward_threshold: Optional[float] = None, + nondeterministic: bool = False, + max_episode_steps: Optional[int] = None, + order_enforce: bool = True, + autoreset: bool = False, + disable_env_checker: bool = False, + apply_api_compatibility: bool = False, + **kwargs, +): + """Register an environment with gym. + + The `id` parameter corresponds to the name of the environment, with the syntax as follows: + `(namespace)/(env_name)-v(version)` where `namespace` is optional. + + It takes arbitrary keyword arguments, which are passed to the `EnvSpec` constructor. + + Args: + id: The environment id + entry_point: The entry point for creating the environment + reward_threshold: The reward threshold considered to have learnt an environment + nondeterministic: If the environment is nondeterministic (even with knowledge of the initial seed and all actions) + max_episode_steps: The maximum number of episodes steps before truncation. Used by the Time Limit wrapper. + order_enforce: If to enable the order enforcer wrapper to ensure users run functions in the correct order + autoreset: If to add the autoreset wrapper such that reset does not need to be called. + disable_env_checker: If to disable the environment checker for the environment. Recommended to False. + apply_api_compatibility: If to apply the `StepAPICompatibility` wrapper. + **kwargs: arbitrary keyword arguments which are passed to the environment constructor + """ + global registry, current_namespace + ns, name, version = parse_env_id(id) + + if current_namespace is not None: + if ( + kwargs.get("namespace") is not None + and kwargs.get("namespace") != current_namespace + ): + logger.warn( + f"Custom namespace `{kwargs.get('namespace')}` is being overridden by namespace `{current_namespace}`. " + f"If you are developing a plugin you shouldn't specify a namespace in `register` calls. " + "The namespace is specified through the entry point package metadata." + ) + ns_id = current_namespace + else: + ns_id = ns + + full_id = get_env_id(ns_id, name, version) + + new_spec = EnvSpec( + id=full_id, + entry_point=entry_point, + reward_threshold=reward_threshold, + nondeterministic=nondeterministic, + max_episode_steps=max_episode_steps, + order_enforce=order_enforce, + autoreset=autoreset, + disable_env_checker=disable_env_checker, + apply_api_compatibility=apply_api_compatibility, + **kwargs, + ) + _check_spec_register(new_spec) + if new_spec.id in registry: + logger.warn(f"Overriding environment {new_spec.id} already in registry.") + registry[new_spec.id] = new_spec + + +def make( + id: Union[str, EnvSpec], + max_episode_steps: Optional[int] = None, + autoreset: bool = False, + apply_api_compatibility: Optional[bool] = None, + disable_env_checker: Optional[bool] = None, + **kwargs, +) -> Env: + """Create an environment according to the given ID. + + To find all available environments use `gym.envs.registry.keys()` for all valid ids. + + Args: + id: Name of the environment. Optionally, a module to import can be included, eg. 'module:Env-v0' + max_episode_steps: Maximum length of an episode (TimeLimit wrapper). + autoreset: Whether to automatically reset the environment after each episode (AutoResetWrapper). + apply_api_compatibility: Whether to wrap the environment with the `StepAPICompatibility` wrapper that + converts the environment step from a done bool to return termination and truncation bools. + By default, the argument is None to which the environment specification `apply_api_compatibility` is used + which defaults to False. Otherwise, the value of `apply_api_compatibility` is used. + If `True`, the wrapper is applied otherwise, the wrapper is not applied. + disable_env_checker: If to run the env checker, None will default to the environment specification `disable_env_checker` + (which is by default False, running the environment checker), + otherwise will run according to this parameter (`True` = not run, `False` = run) + kwargs: Additional arguments to pass to the environment constructor. + + Returns: + An instance of the environment. + + Raises: + Error: If the ``id`` doesn't exist then an error is raised + """ + if isinstance(id, EnvSpec): + spec_ = id + else: + module, id = (None, id) if ":" not in id else id.split(":") + if module is not None: + try: + importlib.import_module(module) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"{e}. Environment registration via importing a module failed. " + f"Check whether '{module}' contains env registration and can be imported." + ) + spec_ = registry.get(id) + + ns, name, version = parse_env_id(id) + latest_version = find_highest_version(ns, name) + if ( + version is not None + and latest_version is not None + and latest_version > version + ): + logger.warn( + f"The environment {id} is out of date. You should consider " + f"upgrading to version `v{latest_version}`." + ) + if version is None and latest_version is not None: + version = latest_version + new_env_id = get_env_id(ns, name, version) + spec_ = registry.get(new_env_id) + logger.warn( + f"Using the latest versioned environment `{new_env_id}` " + f"instead of the unversioned environment `{id}`." + ) + + if spec_ is None: + _check_version_exists(ns, name, version) + raise error.Error(f"No registered env with id: {id}") + + _kwargs = spec_.kwargs.copy() + _kwargs.update(kwargs) + + if spec_.entry_point is None: + raise error.Error(f"{spec_.id} registered but entry_point is not specified") + elif callable(spec_.entry_point): + env_creator = spec_.entry_point + else: + # Assume it's a string + env_creator = load(spec_.entry_point) + + mode = _kwargs.get("render_mode") + apply_human_rendering = False + apply_render_collection = False + + # If we have access to metadata we check that "render_mode" is valid and see if the HumanRendering wrapper needs to be applied + if mode is not None and hasattr(env_creator, "metadata"): + assert isinstance( + env_creator.metadata, dict + ), f"Expect the environment creator ({env_creator}) metadata to be dict, actual type: {type(env_creator.metadata)}" + + if "render_modes" in env_creator.metadata: + render_modes = env_creator.metadata["render_modes"] + if not isinstance(render_modes, Sequence): + logger.warn( + f"Expects the environment metadata render_modes to be a Sequence (tuple or list), actual type: {type(render_modes)}" + ) + + # Apply the `HumanRendering` wrapper, if the mode=="human" but "human" not in render_modes + if ( + mode == "human" + and "human" not in render_modes + and ("rgb_array" in render_modes or "rgb_array_list" in render_modes) + ): + logger.warn( + "You are trying to use 'human' rendering for an environment that doesn't natively support it. " + "The HumanRendering wrapper is being applied to your environment." + ) + apply_human_rendering = True + if "rgb_array" in render_modes: + _kwargs["render_mode"] = "rgb_array" + else: + _kwargs["render_mode"] = "rgb_array_list" + elif ( + mode not in render_modes + and mode.endswith("_list") + and mode[: -len("_list")] in render_modes + ): + _kwargs["render_mode"] = mode[: -len("_list")] + apply_render_collection = True + elif mode not in render_modes: + logger.warn( + f"The environment is being initialised with mode ({mode}) that is not in the possible render_modes ({render_modes})." + ) + else: + logger.warn( + f"The environment creator metadata doesn't include `render_modes`, contains: {list(env_creator.metadata.keys())}" + ) + + if apply_api_compatibility is True or ( + apply_api_compatibility is None and spec_.apply_api_compatibility is True + ): + # If we use the compatibility layer, we treat the render mode explicitly and don't pass it to the env creator + render_mode = _kwargs.pop("render_mode", None) + else: + render_mode = None + + try: + env = env_creator(**_kwargs) + except TypeError as e: + if ( + str(e).find("got an unexpected keyword argument 'render_mode'") >= 0 + and apply_human_rendering + ): + raise error.Error( + f"You passed render_mode='human' although {id} doesn't implement human-rendering natively. " + "Gym tried to apply the HumanRendering wrapper but it looks like your environment is using the old " + "rendering API, which is not supported by the HumanRendering wrapper." + ) + else: + raise e + + # Copies the environment creation specification and kwargs to add to the environment specification details + spec_ = copy.deepcopy(spec_) + spec_.kwargs = _kwargs + env.unwrapped.spec = spec_ + + # Add step API wrapper + if apply_api_compatibility is True or ( + apply_api_compatibility is None and spec_.apply_api_compatibility is True + ): + env = EnvCompatibility(env, render_mode) + + # Run the environment checker as the lowest level wrapper + if disable_env_checker is False or ( + disable_env_checker is None and spec_.disable_env_checker is False + ): + env = PassiveEnvChecker(env) + + # Add the order enforcing wrapper + if spec_.order_enforce: + env = OrderEnforcing(env) + + # Add the time limit wrapper + if max_episode_steps is not None: + env = TimeLimit(env, max_episode_steps) + elif spec_.max_episode_steps is not None: + env = TimeLimit(env, spec_.max_episode_steps) + + # Add the autoreset wrapper + if autoreset: + env = AutoResetWrapper(env) + + # Add human rendering wrapper + if apply_human_rendering: + env = HumanRendering(env) + elif apply_render_collection: + env = RenderCollection(env) + + return env + + +def spec(env_id: str) -> EnvSpec: + """Retrieve the spec for the given environment from the global registry.""" + spec_ = registry.get(env_id) + if spec_ is None: + ns, name, version = parse_env_id(env_id) + _check_version_exists(ns, name, version) + raise error.Error(f"No registered env with id: {env_id}") + else: + assert isinstance(spec_, EnvSpec) + return spec_ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__init__.py b/MLPY/Lib/site-packages/gym/envs/toy_text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36e1207955a4c607e371eb51045d0458dddfeeac --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/__init__.py @@ -0,0 +1,4 @@ +from gym.envs.toy_text.blackjack import BlackjackEnv +from gym.envs.toy_text.cliffwalking import CliffWalkingEnv +from gym.envs.toy_text.frozen_lake import FrozenLakeEnv +from gym.envs.toy_text.taxi import TaxiEnv diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90e7a61435a73e8969e55b01400f8e1e4f950fda Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/blackjack.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/blackjack.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7ddeb50993325aec72d9f17665039ad6962a646 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/blackjack.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/cliffwalking.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/cliffwalking.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0711ed12fb05d3d5589d7ff670a2275c8b7240d8 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/cliffwalking.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/frozen_lake.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/frozen_lake.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b91691b91773f071869919a2afa2b8a02bfeca7 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/frozen_lake.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/taxi.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/taxi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89bd49a3e3ed81e1cf192bdccc4e842f65a93c89 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/taxi.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89f7b10044d99a817d195860a6426f21938a91ea Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/blackjack.py b/MLPY/Lib/site-packages/gym/envs/toy_text/blackjack.py new file mode 100644 index 0000000000000000000000000000000000000000..4bcce17a08691b6d2a175259b242f7bd40db9cad --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/blackjack.py @@ -0,0 +1,318 @@ +import os +from typing import Optional + +import numpy as np + +import gym +from gym import spaces +from gym.error import DependencyNotInstalled + + +def cmp(a, b): + return float(a > b) - float(a < b) + + +# 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10 +deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] + + +def draw_card(np_random): + return int(np_random.choice(deck)) + + +def draw_hand(np_random): + return [draw_card(np_random), draw_card(np_random)] + + +def usable_ace(hand): # Does this hand have a usable ace? + return 1 in hand and sum(hand) + 10 <= 21 + + +def sum_hand(hand): # Return current hand total + if usable_ace(hand): + return sum(hand) + 10 + return sum(hand) + + +def is_bust(hand): # Is this hand a bust? + return sum_hand(hand) > 21 + + +def score(hand): # What is the score of this hand (0 if bust) + return 0 if is_bust(hand) else sum_hand(hand) + + +def is_natural(hand): # Is this hand a natural blackjack? + return sorted(hand) == [1, 10] + + +class BlackjackEnv(gym.Env): + """ + Blackjack is a card game where the goal is to beat the dealer by obtaining cards + that sum to closer to 21 (without going over 21) than the dealers cards. + + ### Description + Card Values: + + - Face cards (Jack, Queen, King) have a point value of 10. + - Aces can either count as 11 (called a 'usable ace') or 1. + - Numerical cards (2-9) have a value equal to their number. + + This game is played with an infinite deck (or with replacement). + The game starts with the dealer having one face up and one face down card, + while the player has two face up cards. + + The player can request additional cards (hit, action=1) until they decide to stop (stick, action=0) + or exceed 21 (bust, immediate loss). + After the player sticks, the dealer reveals their facedown card, and draws + until their sum is 17 or greater. If the dealer goes bust, the player wins. + If neither the player nor the dealer busts, the outcome (win, lose, draw) is + decided by whose sum is closer to 21. + + ### Action Space + There are two actions: stick (0), and hit (1). + + ### Observation Space + The observation consists of a 3-tuple containing: the player's current sum, + the value of the dealer's one showing card (1-10 where 1 is ace), + and whether the player holds a usable ace (0 or 1). + + This environment corresponds to the version of the blackjack problem + described in Example 5.1 in Reinforcement Learning: An Introduction + by Sutton and Barto (http://incompleteideas.net/book/the-book-2nd.html). + + ### Rewards + - win game: +1 + - lose game: -1 + - draw game: 0 + - win game with natural blackjack: + + +1.5 (if natural is True) + + +1 (if natural is False) + + ### Arguments + + ``` + gym.make('Blackjack-v1', natural=False, sab=False) + ``` + + `natural=False`: Whether to give an additional reward for + starting with a natural blackjack, i.e. starting with an ace and ten (sum is 21). + + `sab=False`: Whether to follow the exact rules outlined in the book by + Sutton and Barto. If `sab` is `True`, the keyword argument `natural` will be ignored. + If the player achieves a natural blackjack and the dealer does not, the player + will win (i.e. get a reward of +1). The reverse rule does not apply. + If both the player and the dealer get a natural, it will be a draw (i.e. reward 0). + + ### Version History + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 4, + } + + def __init__(self, render_mode: Optional[str] = None, natural=False, sab=False): + self.action_space = spaces.Discrete(2) + self.observation_space = spaces.Tuple( + (spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2)) + ) + + # Flag to payout 1.5 on a "natural" blackjack win, like casino rules + # Ref: http://www.bicyclecards.com/how-to-play/blackjack/ + self.natural = natural + + # Flag for full agreement with the (Sutton and Barto, 2018) definition. Overrides self.natural + self.sab = sab + + self.render_mode = render_mode + + def step(self, action): + assert self.action_space.contains(action) + if action: # hit: add a card to players hand and return + self.player.append(draw_card(self.np_random)) + if is_bust(self.player): + terminated = True + reward = -1.0 + else: + terminated = False + reward = 0.0 + else: # stick: play out the dealers hand, and score + terminated = True + while sum_hand(self.dealer) < 17: + self.dealer.append(draw_card(self.np_random)) + reward = cmp(score(self.player), score(self.dealer)) + if self.sab and is_natural(self.player) and not is_natural(self.dealer): + # Player automatically wins. Rules consistent with S&B + reward = 1.0 + elif ( + not self.sab + and self.natural + and is_natural(self.player) + and reward == 1.0 + ): + # Natural gives extra points, but doesn't autowin. Legacy implementation + reward = 1.5 + + if self.render_mode == "human": + self.render() + return self._get_obs(), reward, terminated, False, {} + + def _get_obs(self): + return (sum_hand(self.player), self.dealer[0], usable_ace(self.player)) + + def reset( + self, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self.dealer = draw_hand(self.np_random) + self.player = draw_hand(self.np_random) + + _, dealer_card_value, _ = self._get_obs() + + suits = ["C", "D", "H", "S"] + self.dealer_top_card_suit = self.np_random.choice(suits) + + if dealer_card_value == 1: + self.dealer_top_card_value_str = "A" + elif dealer_card_value == 10: + self.dealer_top_card_value_str = self.np_random.choice(["J", "Q", "K"]) + else: + self.dealer_top_card_value_str = str(dealer_card_value) + + if self.render_mode == "human": + self.render() + return self._get_obs(), {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[toy_text]`" + ) + + player_sum, dealer_card_value, usable_ace = self._get_obs() + screen_width, screen_height = 600, 500 + card_img_height = screen_height // 3 + card_img_width = int(card_img_height * 142 / 197) + spacing = screen_height // 20 + + bg_color = (7, 99, 36) + white = (255, 255, 255) + + if not hasattr(self, "screen"): + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode((screen_width, screen_height)) + else: + pygame.font.init() + self.screen = pygame.Surface((screen_width, screen_height)) + + if not hasattr(self, "clock"): + self.clock = pygame.time.Clock() + + self.screen.fill(bg_color) + + def get_image(path): + cwd = os.path.dirname(__file__) + image = pygame.image.load(os.path.join(cwd, path)) + return image + + def get_font(path, size): + cwd = os.path.dirname(__file__) + font = pygame.font.Font(os.path.join(cwd, path), size) + return font + + small_font = get_font( + os.path.join("font", "Minecraft.ttf"), screen_height // 15 + ) + dealer_text = small_font.render( + "Dealer: " + str(dealer_card_value), True, white + ) + dealer_text_rect = self.screen.blit(dealer_text, (spacing, spacing)) + + def scale_card_img(card_img): + return pygame.transform.scale(card_img, (card_img_width, card_img_height)) + + dealer_card_img = scale_card_img( + get_image( + os.path.join( + "img", + f"{self.dealer_top_card_suit}{self.dealer_top_card_value_str}.png", + ) + ) + ) + dealer_card_rect = self.screen.blit( + dealer_card_img, + ( + screen_width // 2 - card_img_width - spacing // 2, + dealer_text_rect.bottom + spacing, + ), + ) + + hidden_card_img = scale_card_img(get_image(os.path.join("img", "Card.png"))) + self.screen.blit( + hidden_card_img, + ( + screen_width // 2 + spacing // 2, + dealer_text_rect.bottom + spacing, + ), + ) + + player_text = small_font.render("Player", True, white) + player_text_rect = self.screen.blit( + player_text, (spacing, dealer_card_rect.bottom + 1.5 * spacing) + ) + + large_font = get_font(os.path.join("font", "Minecraft.ttf"), screen_height // 6) + player_sum_text = large_font.render(str(player_sum), True, white) + player_sum_text_rect = self.screen.blit( + player_sum_text, + ( + screen_width // 2 - player_sum_text.get_width() // 2, + player_text_rect.bottom + spacing, + ), + ) + + if usable_ace: + usable_ace_text = small_font.render("usable ace", True, white) + self.screen.blit( + usable_ace_text, + ( + screen_width // 2 - usable_ace_text.get_width() // 2, + player_sum_text_rect.bottom + spacing // 2, + ), + ) + if self.render_mode == "human": + pygame.event.pump() + pygame.display.update() + self.clock.tick(self.metadata["render_fps"]) + else: + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if hasattr(self, "screen"): + import pygame + + pygame.display.quit() + pygame.quit() + + +# Pixel art from Mariia Khmelnytska (https://www.123rf.com/photo_104453049_stock-vector-pixel-art-playing-cards-standart-deck-vector-set.html) diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/cliffwalking.py b/MLPY/Lib/site-packages/gym/envs/toy_text/cliffwalking.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3ed523668de8731219f9b77c5046b57b0c0dfa --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/cliffwalking.py @@ -0,0 +1,293 @@ +from contextlib import closing +from io import StringIO +from os import path +from typing import Optional + +import numpy as np + +from gym import Env, logger, spaces +from gym.envs.toy_text.utils import categorical_sample +from gym.error import DependencyNotInstalled + +UP = 0 +RIGHT = 1 +DOWN = 2 +LEFT = 3 + + +class CliffWalkingEnv(Env): + """ + This is a simple implementation of the Gridworld Cliff + reinforcement learning task. + + Adapted from Example 6.6 (page 106) from [Reinforcement Learning: An Introduction + by Sutton and Barto](http://incompleteideas.net/book/bookdraft2018jan1.pdf). + + With inspiration from: + [https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py] + (https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py) + + ### Description + The board is a 4x12 matrix, with (using NumPy matrix indexing): + - [3, 0] as the start at bottom-left + - [3, 11] as the goal at bottom-right + - [3, 1..10] as the cliff at bottom-center + + If the agent steps on the cliff, it returns to the start. + An episode terminates when the agent reaches the goal. + + ### Actions + There are 4 discrete deterministic actions: + - 0: move up + - 1: move right + - 2: move down + - 3: move left + + ### Observations + There are 3x12 + 1 possible states. In fact, the agent cannot be at the cliff, nor at the goal + (as this results in the end of the episode). + It remains all the positions of the first 3 rows plus the bottom-left cell. + The observation is simply the current position encoded as [flattened index](https://numpy.org/doc/stable/reference/generated/numpy.unravel_index.html). + + ### Reward + Each time step incurs -1 reward, and stepping into the cliff incurs -100 reward. + + ### Arguments + + ``` + gym.make('CliffWalking-v0') + ``` + + ### Version History + - v0: Initial version release + """ + + metadata = { + "render_modes": ["human", "rgb_array", "ansi"], + "render_fps": 4, + } + + def __init__(self, render_mode: Optional[str] = None): + self.shape = (4, 12) + self.start_state_index = np.ravel_multi_index((3, 0), self.shape) + + self.nS = np.prod(self.shape) + self.nA = 4 + + # Cliff Location + self._cliff = np.zeros(self.shape, dtype=bool) + self._cliff[3, 1:-1] = True + + # Calculate transition probabilities and rewards + self.P = {} + for s in range(self.nS): + position = np.unravel_index(s, self.shape) + self.P[s] = {a: [] for a in range(self.nA)} + self.P[s][UP] = self._calculate_transition_prob(position, [-1, 0]) + self.P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1]) + self.P[s][DOWN] = self._calculate_transition_prob(position, [1, 0]) + self.P[s][LEFT] = self._calculate_transition_prob(position, [0, -1]) + + # Calculate initial state distribution + # We always start in state (3, 0) + self.initial_state_distrib = np.zeros(self.nS) + self.initial_state_distrib[self.start_state_index] = 1.0 + + self.observation_space = spaces.Discrete(self.nS) + self.action_space = spaces.Discrete(self.nA) + + self.render_mode = render_mode + + # pygame utils + self.cell_size = (60, 60) + self.window_size = ( + self.shape[1] * self.cell_size[1], + self.shape[0] * self.cell_size[0], + ) + self.window_surface = None + self.clock = None + self.elf_images = None + self.start_img = None + self.goal_img = None + self.cliff_img = None + self.mountain_bg_img = None + self.near_cliff_img = None + self.tree_img = None + + def _limit_coordinates(self, coord: np.ndarray) -> np.ndarray: + """Prevent the agent from falling out of the grid world.""" + coord[0] = min(coord[0], self.shape[0] - 1) + coord[0] = max(coord[0], 0) + coord[1] = min(coord[1], self.shape[1] - 1) + coord[1] = max(coord[1], 0) + return coord + + def _calculate_transition_prob(self, current, delta): + """Determine the outcome for an action. Transition Prob is always 1.0. + + Args: + current: Current position on the grid as (row, col) + delta: Change in position for transition + + Returns: + Tuple of ``(1.0, new_state, reward, terminated)`` + """ + new_position = np.array(current) + np.array(delta) + new_position = self._limit_coordinates(new_position).astype(int) + new_state = np.ravel_multi_index(tuple(new_position), self.shape) + if self._cliff[tuple(new_position)]: + return [(1.0, self.start_state_index, -100, False)] + + terminal_state = (self.shape[0] - 1, self.shape[1] - 1) + is_terminated = tuple(new_position) == terminal_state + return [(1.0, new_state, -1, is_terminated)] + + def step(self, a): + transitions = self.P[self.s][a] + i = categorical_sample([t[0] for t in transitions], self.np_random) + p, s, r, t = transitions[i] + self.s = s + self.lastaction = a + + if self.render_mode == "human": + self.render() + return (int(s), r, t, False, {"prob": p}) + + def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): + super().reset(seed=seed) + self.s = categorical_sample(self.initial_state_distrib, self.np_random) + self.lastaction = None + + if self.render_mode == "human": + self.render() + return int(self.s), {"prob": 1} + + def render(self): + if self.render_mode is None: + logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + elif self.render_mode == "ansi": + return self._render_text() + else: + return self._render_gui(self.render_mode) + + def _render_gui(self, mode): + try: + import pygame + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[toy_text]`" + ) + if self.window_surface is None: + pygame.init() + + if mode == "human": + pygame.display.init() + pygame.display.set_caption("CliffWalking") + self.window_surface = pygame.display.set_mode(self.window_size) + else: # rgb_array + self.window_surface = pygame.Surface(self.window_size) + if self.clock is None: + self.clock = pygame.time.Clock() + if self.elf_images is None: + hikers = [ + path.join(path.dirname(__file__), "img/elf_up.png"), + path.join(path.dirname(__file__), "img/elf_right.png"), + path.join(path.dirname(__file__), "img/elf_down.png"), + path.join(path.dirname(__file__), "img/elf_left.png"), + ] + self.elf_images = [ + pygame.transform.scale(pygame.image.load(f_name), self.cell_size) + for f_name in hikers + ] + if self.start_img is None: + file_name = path.join(path.dirname(__file__), "img/stool.png") + self.start_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.goal_img is None: + file_name = path.join(path.dirname(__file__), "img/cookie.png") + self.goal_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.mountain_bg_img is None: + bg_imgs = [ + path.join(path.dirname(__file__), "img/mountain_bg1.png"), + path.join(path.dirname(__file__), "img/mountain_bg2.png"), + ] + self.mountain_bg_img = [ + pygame.transform.scale(pygame.image.load(f_name), self.cell_size) + for f_name in bg_imgs + ] + if self.near_cliff_img is None: + near_cliff_imgs = [ + path.join(path.dirname(__file__), "img/mountain_near-cliff1.png"), + path.join(path.dirname(__file__), "img/mountain_near-cliff2.png"), + ] + self.near_cliff_img = [ + pygame.transform.scale(pygame.image.load(f_name), self.cell_size) + for f_name in near_cliff_imgs + ] + if self.cliff_img is None: + file_name = path.join(path.dirname(__file__), "img/mountain_cliff.png") + self.cliff_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + + for s in range(self.nS): + row, col = np.unravel_index(s, self.shape) + pos = (col * self.cell_size[0], row * self.cell_size[1]) + check_board_mask = row % 2 ^ col % 2 + self.window_surface.blit(self.mountain_bg_img[check_board_mask], pos) + + if self._cliff[row, col]: + self.window_surface.blit(self.cliff_img, pos) + if row < self.shape[0] - 1 and self._cliff[row + 1, col]: + self.window_surface.blit(self.near_cliff_img[check_board_mask], pos) + if s == self.start_state_index: + self.window_surface.blit(self.start_img, pos) + if s == self.nS - 1: + self.window_surface.blit(self.goal_img, pos) + if s == self.s: + elf_pos = (pos[0], pos[1] - 0.1 * self.cell_size[1]) + last_action = self.lastaction if self.lastaction is not None else 2 + self.window_surface.blit(self.elf_images[last_action], elf_pos) + + if mode == "human": + pygame.event.pump() + pygame.display.update() + self.clock.tick(self.metadata["render_fps"]) + else: # rgb_array + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.window_surface)), axes=(1, 0, 2) + ) + + def _render_text(self): + outfile = StringIO() + + for s in range(self.nS): + position = np.unravel_index(s, self.shape) + if self.s == s: + output = " x " + # Print terminal state + elif position == (3, 11): + output = " T " + elif self._cliff[position]: + output = " C " + else: + output = " o " + + if position[1] == 0: + output = output.lstrip() + if position[1] == self.shape[1] - 1: + output = output.rstrip() + output += "\n" + + outfile.write(output) + outfile.write("\n") + + with closing(outfile): + return outfile.getvalue() diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/font/Minecraft.ttf b/MLPY/Lib/site-packages/gym/envs/toy_text/font/Minecraft.ttf new file mode 100644 index 0000000000000000000000000000000000000000..85c14725a3bf6d67aaf0f03292f9b763c1654f07 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/font/Minecraft.ttf differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/frozen_lake.py b/MLPY/Lib/site-packages/gym/envs/toy_text/frozen_lake.py new file mode 100644 index 0000000000000000000000000000000000000000..65ea6429771a3ba9675010d6d2b8955e2a156397 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/frozen_lake.py @@ -0,0 +1,413 @@ +from contextlib import closing +from io import StringIO +from os import path +from typing import List, Optional + +import numpy as np + +from gym import Env, logger, spaces, utils +from gym.envs.toy_text.utils import categorical_sample +from gym.error import DependencyNotInstalled + +LEFT = 0 +DOWN = 1 +RIGHT = 2 +UP = 3 + +MAPS = { + "4x4": ["SFFF", "FHFH", "FFFH", "HFFG"], + "8x8": [ + "SFFFFFFF", + "FFFFFFFF", + "FFFHFFFF", + "FFFFFHFF", + "FFFHFFFF", + "FHHFFFHF", + "FHFFHFHF", + "FFFHFFFG", + ], +} + + +# DFS to check that it's a valid path. +def is_valid(board: List[List[str]], max_size: int) -> bool: + frontier, discovered = [], set() + frontier.append((0, 0)) + while frontier: + r, c = frontier.pop() + if not (r, c) in discovered: + discovered.add((r, c)) + directions = [(1, 0), (0, 1), (-1, 0), (0, -1)] + for x, y in directions: + r_new = r + x + c_new = c + y + if r_new < 0 or r_new >= max_size or c_new < 0 or c_new >= max_size: + continue + if board[r_new][c_new] == "G": + return True + if board[r_new][c_new] != "H": + frontier.append((r_new, c_new)) + return False + + +def generate_random_map(size: int = 8, p: float = 0.8) -> List[str]: + """Generates a random valid map (one that has a path from start to goal) + + Args: + size: size of each side of the grid + p: probability that a tile is frozen + + Returns: + A random valid map + """ + valid = False + board = [] # initialize to make pyright happy + + while not valid: + p = min(1, p) + board = np.random.choice(["F", "H"], (size, size), p=[p, 1 - p]) + board[0][0] = "S" + board[-1][-1] = "G" + valid = is_valid(board, size) + return ["".join(x) for x in board] + + +class FrozenLakeEnv(Env): + """ + Frozen lake involves crossing a frozen lake from Start(S) to Goal(G) without falling into any Holes(H) + by walking over the Frozen(F) lake. + The agent may not always move in the intended direction due to the slippery nature of the frozen lake. + + + ### Action Space + The agent takes a 1-element vector for actions. + The action space is `(dir)`, where `dir` decides direction to move in which can be: + + - 0: LEFT + - 1: DOWN + - 2: RIGHT + - 3: UP + + ### Observation Space + The observation is a value representing the agent's current position as + current_row * nrows + current_col (where both the row and col start at 0). + For example, the goal position in the 4x4 map can be calculated as follows: 3 * 4 + 3 = 15. + The number of possible observations is dependent on the size of the map. + For example, the 4x4 map has 16 possible observations. + + ### Rewards + + Reward schedule: + - Reach goal(G): +1 + - Reach hole(H): 0 + - Reach frozen(F): 0 + + ### Arguments + + ``` + gym.make('FrozenLake-v1', desc=None, map_name="4x4", is_slippery=True) + ``` + + `desc`: Used to specify custom map for frozen lake. For example, + + desc=["SFFF", "FHFH", "FFFH", "HFFG"]. + + A random generated map can be specified by calling the function `generate_random_map`. For example, + + ``` + from gym.envs.toy_text.frozen_lake import generate_random_map + + gym.make('FrozenLake-v1', desc=generate_random_map(size=8)) + ``` + + `map_name`: ID to use any of the preloaded maps. + + "4x4":[ + "SFFF", + "FHFH", + "FFFH", + "HFFG" + ] + + "8x8": [ + "SFFFFFFF", + "FFFFFFFF", + "FFFHFFFF", + "FFFFFHFF", + "FFFHFFFF", + "FHHFFFHF", + "FHFFHFHF", + "FFFHFFFG", + ] + + `is_slippery`: True/False. If True will move in intended direction with + probability of 1/3 else will move in either perpendicular direction with + equal probability of 1/3 in both directions. + + For example, if action is left and is_slippery is True, then: + - P(move left)=1/3 + - P(move up)=1/3 + - P(move down)=1/3 + + ### Version History + * v1: Bug fixes to rewards + * v0: Initial versions release (1.0.0) + """ + + metadata = { + "render_modes": ["human", "ansi", "rgb_array"], + "render_fps": 4, + } + + def __init__( + self, + render_mode: Optional[str] = None, + desc=None, + map_name="4x4", + is_slippery=True, + ): + if desc is None and map_name is None: + desc = generate_random_map() + elif desc is None: + desc = MAPS[map_name] + self.desc = desc = np.asarray(desc, dtype="c") + self.nrow, self.ncol = nrow, ncol = desc.shape + self.reward_range = (0, 1) + + nA = 4 + nS = nrow * ncol + + self.initial_state_distrib = np.array(desc == b"S").astype("float64").ravel() + self.initial_state_distrib /= self.initial_state_distrib.sum() + + self.P = {s: {a: [] for a in range(nA)} for s in range(nS)} + + def to_s(row, col): + return row * ncol + col + + def inc(row, col, a): + if a == LEFT: + col = max(col - 1, 0) + elif a == DOWN: + row = min(row + 1, nrow - 1) + elif a == RIGHT: + col = min(col + 1, ncol - 1) + elif a == UP: + row = max(row - 1, 0) + return (row, col) + + def update_probability_matrix(row, col, action): + newrow, newcol = inc(row, col, action) + newstate = to_s(newrow, newcol) + newletter = desc[newrow, newcol] + terminated = bytes(newletter) in b"GH" + reward = float(newletter == b"G") + return newstate, reward, terminated + + for row in range(nrow): + for col in range(ncol): + s = to_s(row, col) + for a in range(4): + li = self.P[s][a] + letter = desc[row, col] + if letter in b"GH": + li.append((1.0, s, 0, True)) + else: + if is_slippery: + for b in [(a - 1) % 4, a, (a + 1) % 4]: + li.append( + (1.0 / 3.0, *update_probability_matrix(row, col, b)) + ) + else: + li.append((1.0, *update_probability_matrix(row, col, a))) + + self.observation_space = spaces.Discrete(nS) + self.action_space = spaces.Discrete(nA) + + self.render_mode = render_mode + + # pygame utils + self.window_size = (min(64 * ncol, 512), min(64 * nrow, 512)) + self.cell_size = ( + self.window_size[0] // self.ncol, + self.window_size[1] // self.nrow, + ) + self.window_surface = None + self.clock = None + self.hole_img = None + self.cracked_hole_img = None + self.ice_img = None + self.elf_images = None + self.goal_img = None + self.start_img = None + + def step(self, a): + transitions = self.P[self.s][a] + i = categorical_sample([t[0] for t in transitions], self.np_random) + p, s, r, t = transitions[i] + self.s = s + self.lastaction = a + + if self.render_mode == "human": + self.render() + return (int(s), r, t, False, {"prob": p}) + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self.s = categorical_sample(self.initial_state_distrib, self.np_random) + self.lastaction = None + + if self.render_mode == "human": + self.render() + return int(self.s), {"prob": 1} + + def render(self): + if self.render_mode is None: + logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + elif self.render_mode == "ansi": + return self._render_text() + else: # self.render_mode in {"human", "rgb_array"}: + return self._render_gui(self.render_mode) + + def _render_gui(self, mode): + try: + import pygame + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[toy_text]`" + ) + + if self.window_surface is None: + pygame.init() + + if mode == "human": + pygame.display.init() + pygame.display.set_caption("Frozen Lake") + self.window_surface = pygame.display.set_mode(self.window_size) + elif mode == "rgb_array": + self.window_surface = pygame.Surface(self.window_size) + + assert ( + self.window_surface is not None + ), "Something went wrong with pygame. This should never happen." + + if self.clock is None: + self.clock = pygame.time.Clock() + if self.hole_img is None: + file_name = path.join(path.dirname(__file__), "img/hole.png") + self.hole_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.cracked_hole_img is None: + file_name = path.join(path.dirname(__file__), "img/cracked_hole.png") + self.cracked_hole_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.ice_img is None: + file_name = path.join(path.dirname(__file__), "img/ice.png") + self.ice_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.goal_img is None: + file_name = path.join(path.dirname(__file__), "img/goal.png") + self.goal_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.start_img is None: + file_name = path.join(path.dirname(__file__), "img/stool.png") + self.start_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.elf_images is None: + elfs = [ + path.join(path.dirname(__file__), "img/elf_left.png"), + path.join(path.dirname(__file__), "img/elf_down.png"), + path.join(path.dirname(__file__), "img/elf_right.png"), + path.join(path.dirname(__file__), "img/elf_up.png"), + ] + self.elf_images = [ + pygame.transform.scale(pygame.image.load(f_name), self.cell_size) + for f_name in elfs + ] + + desc = self.desc.tolist() + assert isinstance(desc, list), f"desc should be a list or an array, got {desc}" + for y in range(self.nrow): + for x in range(self.ncol): + pos = (x * self.cell_size[0], y * self.cell_size[1]) + rect = (*pos, *self.cell_size) + + self.window_surface.blit(self.ice_img, pos) + if desc[y][x] == b"H": + self.window_surface.blit(self.hole_img, pos) + elif desc[y][x] == b"G": + self.window_surface.blit(self.goal_img, pos) + elif desc[y][x] == b"S": + self.window_surface.blit(self.start_img, pos) + + pygame.draw.rect(self.window_surface, (180, 200, 230), rect, 1) + + # paint the elf + bot_row, bot_col = self.s // self.ncol, self.s % self.ncol + cell_rect = (bot_col * self.cell_size[0], bot_row * self.cell_size[1]) + last_action = self.lastaction if self.lastaction is not None else 1 + elf_img = self.elf_images[last_action] + + if desc[bot_row][bot_col] == b"H": + self.window_surface.blit(self.cracked_hole_img, cell_rect) + else: + self.window_surface.blit(elf_img, cell_rect) + + if mode == "human": + pygame.event.pump() + pygame.display.update() + self.clock.tick(self.metadata["render_fps"]) + elif mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.window_surface)), axes=(1, 0, 2) + ) + + @staticmethod + def _center_small_rect(big_rect, small_dims): + offset_w = (big_rect[2] - small_dims[0]) / 2 + offset_h = (big_rect[3] - small_dims[1]) / 2 + return ( + big_rect[0] + offset_w, + big_rect[1] + offset_h, + ) + + def _render_text(self): + desc = self.desc.tolist() + outfile = StringIO() + + row, col = self.s // self.ncol, self.s % self.ncol + desc = [[c.decode("utf-8") for c in line] for line in desc] + desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True) + if self.lastaction is not None: + outfile.write(f" ({['Left', 'Down', 'Right', 'Up'][self.lastaction]})\n") + else: + outfile.write("\n") + outfile.write("\n".join("".join(line) for line in desc) + "\n") + + with closing(outfile): + return outfile.getvalue() + + def close(self): + if self.window_surface is not None: + import pygame + + pygame.display.quit() + pygame.quit() + + +# Elf and stool from https://franuka.itch.io/rpg-snow-tileset +# All other assets by Mel Tillery http://www.cyaneus.com/ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C2.png new file mode 100644 index 0000000000000000000000000000000000000000..56c446d4a4b812a919468a3f68c0cbc5ab3c0285 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C3.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C3.png new file mode 100644 index 0000000000000000000000000000000000000000..f496d19c20d15ae9dc4acafd8febcb7b74a904c1 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C3.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C4.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C4.png new file mode 100644 index 0000000000000000000000000000000000000000..d8f10f216f27b2f7fc597b05d0c6a540e7b7227a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C4.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C5.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C5.png new file mode 100644 index 0000000000000000000000000000000000000000..3382e23027b456f5fdc12605399f89f58eea7565 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C5.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C6.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C6.png new file mode 100644 index 0000000000000000000000000000000000000000..ab19dc3f3c75bbb6fbf0b5af41e2f9240772fd01 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C6.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C7.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C7.png new file mode 100644 index 0000000000000000000000000000000000000000..19bf11c591f3f9cf54b08bf8d867818924068e80 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C7.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C8.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C8.png new file mode 100644 index 0000000000000000000000000000000000000000..1fd6f80dbfbd76294c12c3a0ac61904852f50b4f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C8.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/C9.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C9.png new file mode 100644 index 0000000000000000000000000000000000000000..41a6431f96aaba6e390d07dce2012a373d3b081d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/C9.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/CA.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CA.png new file mode 100644 index 0000000000000000000000000000000000000000..006f9841e142156db8f5ccf7f71b91a984ad5708 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CA.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/CJ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CJ.png new file mode 100644 index 0000000000000000000000000000000000000000..b948a0f07125ed2885c88d9c2e5943e723060eb2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CJ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/CK.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CK.png new file mode 100644 index 0000000000000000000000000000000000000000..9db8c772efed93bba85a267e43886c578470cecb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CK.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/CQ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CQ.png new file mode 100644 index 0000000000000000000000000000000000000000..fc23d8538499c950d70644682e29a50bb119a2fd Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CQ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/CT.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CT.png new file mode 100644 index 0000000000000000000000000000000000000000..bde8478dc498e9f5d22fcdad3cdb227ddf08218f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/CT.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/Card.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/Card.png new file mode 100644 index 0000000000000000000000000000000000000000..04ec431da076dd52267d801ec8e471f0e02c4463 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/Card.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D2.png new file mode 100644 index 0000000000000000000000000000000000000000..2b4babadfbd0c7b7c58442093d1b025c38a2c359 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D3.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D3.png new file mode 100644 index 0000000000000000000000000000000000000000..25e2e2e03ea493f6b29880a65b4b6bfbac6f0df2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D3.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D4.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D4.png new file mode 100644 index 0000000000000000000000000000000000000000..f33e33dd6f67dc44cf030effa910066f22867ae4 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D4.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D5.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D5.png new file mode 100644 index 0000000000000000000000000000000000000000..dce581381a4f917521c28e87883398fae43493f7 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D5.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D6.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D6.png new file mode 100644 index 0000000000000000000000000000000000000000..59fa18072e651b17594df667597fd6b2817d80c6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D6.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D7.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D7.png new file mode 100644 index 0000000000000000000000000000000000000000..35b733bce1e20f3f0f6b02d1c98c9769e2263710 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D7.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D8.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D8.png new file mode 100644 index 0000000000000000000000000000000000000000..ee777f97a564462a50bf71af030d3b09073ffc1f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D8.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/D9.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D9.png new file mode 100644 index 0000000000000000000000000000000000000000..5b3936889b3075a827e8ef09af3a427a668e867d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/D9.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/DA.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DA.png new file mode 100644 index 0000000000000000000000000000000000000000..23b58115875883a8f1a234cee344ac1a4f9f8447 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DA.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/DJ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DJ.png new file mode 100644 index 0000000000000000000000000000000000000000..6ca59058feae030a89f62d848872dcb20cb3a103 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DJ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/DK.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DK.png new file mode 100644 index 0000000000000000000000000000000000000000..25d4acedcd9bb603a7cae392c15b301387433508 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DK.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/DQ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DQ.png new file mode 100644 index 0000000000000000000000000000000000000000..a56f982e974582f5caf74409c82712b006782439 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DQ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/DT.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DT.png new file mode 100644 index 0000000000000000000000000000000000000000..c94858b0216a80388e7d7e6cd77c421cac03255d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/DT.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H2.png new file mode 100644 index 0000000000000000000000000000000000000000..466c0f3520a52b3d2535c9e322f4f3bdef072c05 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H3.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H3.png new file mode 100644 index 0000000000000000000000000000000000000000..66b71ccacedba8ae33cfa9f63bd5c721b35c0727 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H3.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H4.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H4.png new file mode 100644 index 0000000000000000000000000000000000000000..57ff831f7b6deb39f336a59d2208587b1691c3c2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H4.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H5.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H5.png new file mode 100644 index 0000000000000000000000000000000000000000..64e37dceb091ccfacffa5663b58b6de50f8e8039 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H5.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H6.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H6.png new file mode 100644 index 0000000000000000000000000000000000000000..61a1ea4ff6bd8d06fe6592c214f897a0c744e58b Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H6.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H7.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H7.png new file mode 100644 index 0000000000000000000000000000000000000000..346d0a5721286fc30aeac63129fe8778b65b27b3 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H7.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H8.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H8.png new file mode 100644 index 0000000000000000000000000000000000000000..dc8dca2f68f7c09283d08c0cb2da5e4d8e1159af Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H8.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/H9.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H9.png new file mode 100644 index 0000000000000000000000000000000000000000..3f92346e0861bdb7f845f4290e16beb9768dc991 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/H9.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/HA.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HA.png new file mode 100644 index 0000000000000000000000000000000000000000..2f8854de2979123898549d172389af182028e843 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HA.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/HJ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HJ.png new file mode 100644 index 0000000000000000000000000000000000000000..3930ff3f03dc84b25d425d7c45141bbb9b390894 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HJ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/HK.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HK.png new file mode 100644 index 0000000000000000000000000000000000000000..ed169c8d25b33ebc2dcd523ec98c6daae3d45dd8 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HK.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/HQ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HQ.png new file mode 100644 index 0000000000000000000000000000000000000000..06388f6fab80656acf69ab1b5b229e07169949eb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HQ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/HT.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HT.png new file mode 100644 index 0000000000000000000000000000000000000000..02724791e28428e415c4437005d0efe594b02a1e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/HT.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S2.png new file mode 100644 index 0000000000000000000000000000000000000000..09eaccc3ee0e3ce4208fd787faae21bd7622ed15 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S3.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S3.png new file mode 100644 index 0000000000000000000000000000000000000000..ba094d54954408371109794fade303ee7a7dbba6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S3.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S4.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S4.png new file mode 100644 index 0000000000000000000000000000000000000000..4090e11973a309e5ec21670a7bffa49a715026fd Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S4.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S5.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S5.png new file mode 100644 index 0000000000000000000000000000000000000000..230db548f94dbbf0f28a5b250eec9f39d326fd20 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S5.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S6.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S6.png new file mode 100644 index 0000000000000000000000000000000000000000..2189d5b9930ca9dd7eb10c7a71371363487c7a5e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S6.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S7.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S7.png new file mode 100644 index 0000000000000000000000000000000000000000..5b859dfed4900fe200ddb14864a9537d3e0ab87e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S7.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S8.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S8.png new file mode 100644 index 0000000000000000000000000000000000000000..26e6b7138160fdee98de23877b7959b43829b9f2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S8.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/S9.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S9.png new file mode 100644 index 0000000000000000000000000000000000000000..d69a6835077aa5c126ac03095b54b569b5312cca Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/S9.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/SA.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SA.png new file mode 100644 index 0000000000000000000000000000000000000000..bd136074b0963e7723c87d6c0fbc751bd8178e1c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SA.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/SJ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SJ.png new file mode 100644 index 0000000000000000000000000000000000000000..3adc3a90bc546e01b2bd4ec1b23476c34605798e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SJ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/SK.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SK.png new file mode 100644 index 0000000000000000000000000000000000000000..5948ae521b4c1edccd0e45a0b86fce70ea34124b Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SK.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/SQ.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SQ.png new file mode 100644 index 0000000000000000000000000000000000000000..9fb5037700fef10dff68bbf694efd508b88c408f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/SQ.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/ST.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/ST.png new file mode 100644 index 0000000000000000000000000000000000000000..7e4a12a0fb0a762c5b86fccbaeb11082d646e98c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/ST.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_front.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_front.png new file mode 100644 index 0000000000000000000000000000000000000000..55a63cf4f612a48555869466c07da428b675374c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_front.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_left.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_left.png new file mode 100644 index 0000000000000000000000000000000000000000..7ae835de8943a0ca2d342ab17b3609c7baf79ef5 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_left.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_rear.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_rear.png new file mode 100644 index 0000000000000000000000000000000000000000..197ca1da1422515e6371fe346d08301c9475f47e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_rear.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_right.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_right.png new file mode 100644 index 0000000000000000000000000000000000000000..8f7fd590e19da1183840e96b8a2eb8e0ded56692 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cab_right.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cookie.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cookie.png new file mode 100644 index 0000000000000000000000000000000000000000..e5ac2eb4e37ebe1ce9ecbee607af8bf6ab0bfae1 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cookie.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/cracked_hole.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cracked_hole.png new file mode 100644 index 0000000000000000000000000000000000000000..55930420493b336ba5961f45c27fec709bc1bcad Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/cracked_hole.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_down.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_down.png new file mode 100644 index 0000000000000000000000000000000000000000..afa3daf1387c93fa81e34c0b0cc258fa23a5ee36 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_down.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_left.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_left.png new file mode 100644 index 0000000000000000000000000000000000000000..bc9e22ea63286dda4a427770e582be96d76f25aa Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_left.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_right.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_right.png new file mode 100644 index 0000000000000000000000000000000000000000..836403158a586644dd5cb5d2894cdbb4a80d7035 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_right.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_up.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_up.png new file mode 100644 index 0000000000000000000000000000000000000000..933f1f0017848386af3ec5ac7c5f6763cd9c05fb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/elf_up.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/goal.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/goal.png new file mode 100644 index 0000000000000000000000000000000000000000..709e4b4891351dff4d59fda03e7b940de70c5185 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/goal.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_bottom.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_bottom.png new file mode 100644 index 0000000000000000000000000000000000000000..577eb1aa8b6648c9944b1b02c08429e18b4ae815 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_bottom.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_horiz.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_horiz.png new file mode 100644 index 0000000000000000000000000000000000000000..71b456da0a0ae7cc670d41a0984e8a342d2ded0a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_horiz.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_left.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_left.png new file mode 100644 index 0000000000000000000000000000000000000000..726a18cac0ad480d4cfaaf5397ddc05d8da6ffdb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_left.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_right.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_right.png new file mode 100644 index 0000000000000000000000000000000000000000..f168ef7ecf9212a73cd8ac0bab6d13c59da91d6f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_right.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_top.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_top.png new file mode 100644 index 0000000000000000000000000000000000000000..3914f6980340fccae5fb3c476e251d4ea1bd4a63 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_top.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_vert.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_vert.png new file mode 100644 index 0000000000000000000000000000000000000000..1bdc9ccd9017e7a8d397db6ef8bd8ae09dfd931c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/gridworld_median_vert.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/hole.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/hole.png new file mode 100644 index 0000000000000000000000000000000000000000..c7afd1db16de6d8d207ac5485f99de379daeb884 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/hole.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/hotel.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/hotel.png new file mode 100644 index 0000000000000000000000000000000000000000..0faa41852126e4974e4a491be55a50e8eabf957a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/hotel.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/ice.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/ice.png new file mode 100644 index 0000000000000000000000000000000000000000..95dbc74a888952f19d82eebf04bd5ff2d5e0622a Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/ice.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg1.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg1.png new file mode 100644 index 0000000000000000000000000000000000000000..e5872ceb65985f8934e5dc2a43e2ca11d09d6d3f Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg1.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg2.png new file mode 100644 index 0000000000000000000000000000000000000000..8cadf7df88a7b381f5018fc7d5f331a38897ccda Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_bg2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_cliff.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_cliff.png new file mode 100644 index 0000000000000000000000000000000000000000..6ad1ec0abef7dd598db96b7f39d8db306fad60e5 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_cliff.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff1.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff1.png new file mode 100644 index 0000000000000000000000000000000000000000..79834dafe199b8e7273e0a764d7e1493634199dc Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff1.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff2.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff2.png new file mode 100644 index 0000000000000000000000000000000000000000..dd3b57f38fab45b89c8f412b25d327a65e0862d3 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/mountain_near-cliff2.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/passenger.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/passenger.png new file mode 100644 index 0000000000000000000000000000000000000000..567a3a3ab1bcd737df440f845c66d05bde319c98 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/passenger.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/stool.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/stool.png new file mode 100644 index 0000000000000000000000000000000000000000..f304d797f6953781509f35b68b38189f970a52d6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/stool.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/img/taxi_background.png b/MLPY/Lib/site-packages/gym/envs/toy_text/img/taxi_background.png new file mode 100644 index 0000000000000000000000000000000000000000..ea10433922dc8f05483cbb2c197089ea4792eaa9 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/envs/toy_text/img/taxi_background.png differ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/taxi.py b/MLPY/Lib/site-packages/gym/envs/toy_text/taxi.py new file mode 100644 index 0000000000000000000000000000000000000000..ac5ef188174c025f112097cd5c809d7affeb7219 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/taxi.py @@ -0,0 +1,472 @@ +from contextlib import closing +from io import StringIO +from os import path +from typing import Optional + +import numpy as np + +from gym import Env, logger, spaces, utils +from gym.envs.toy_text.utils import categorical_sample +from gym.error import DependencyNotInstalled + +MAP = [ + "+---------+", + "|R: | : :G|", + "| : | : : |", + "| : : : : |", + "| | : | : |", + "|Y| : |B: |", + "+---------+", +] +WINDOW_SIZE = (550, 350) + + +class TaxiEnv(Env): + """ + + The Taxi Problem + from "Hierarchical Reinforcement Learning with the MAXQ Value Function Decomposition" + by Tom Dietterich + + ### Description + There are four designated locations in the grid world indicated by R(ed), + G(reen), Y(ellow), and B(lue). When the episode starts, the taxi starts off + at a random square and the passenger is at a random location. The taxi + drives to the passenger's location, picks up the passenger, drives to the + passenger's destination (another one of the four specified locations), and + then drops off the passenger. Once the passenger is dropped off, the episode ends. + + Map: + + +---------+ + |R: | : :G| + | : | : : | + | : : : : | + | | : | : | + |Y| : |B: | + +---------+ + + ### Actions + There are 6 discrete deterministic actions: + - 0: move south + - 1: move north + - 2: move east + - 3: move west + - 4: pickup passenger + - 5: drop off passenger + + ### Observations + There are 500 discrete states since there are 25 taxi positions, 5 possible + locations of the passenger (including the case when the passenger is in the + taxi), and 4 destination locations. + + Note that there are 400 states that can actually be reached during an + episode. The missing states correspond to situations in which the passenger + is at the same location as their destination, as this typically signals the + end of an episode. Four additional states can be observed right after a + successful episodes, when both the passenger and the taxi are at the destination. + This gives a total of 404 reachable discrete states. + + Each state space is represented by the tuple: + (taxi_row, taxi_col, passenger_location, destination) + + An observation is an integer that encodes the corresponding state. + The state tuple can then be decoded with the "decode" method. + + Passenger locations: + - 0: R(ed) + - 1: G(reen) + - 2: Y(ellow) + - 3: B(lue) + - 4: in taxi + + Destinations: + - 0: R(ed) + - 1: G(reen) + - 2: Y(ellow) + - 3: B(lue) + + ### Info + + ``step`` and ``reset()`` will return an info dictionary that contains "p" and "action_mask" containing + the probability that the state is taken and a mask of what actions will result in a change of state to speed up training. + + As Taxi's initial state is a stochastic, the "p" key represents the probability of the + transition however this value is currently bugged being 1.0, this will be fixed soon. + As the steps are deterministic, "p" represents the probability of the transition which is always 1.0 + + For some cases, taking an action will have no effect on the state of the agent. + In v0.25.0, ``info["action_mask"]`` contains a np.ndarray for each of the action specifying + if the action will change the state. + + To sample a modifying action, use ``action = env.action_space.sample(info["action_mask"])`` + Or with a Q-value based algorithm ``action = np.argmax(q_values[obs, np.where(info["action_mask"] == 1)[0]])``. + + ### Rewards + - -1 per step unless other reward is triggered. + - +20 delivering passenger. + - -10 executing "pickup" and "drop-off" actions illegally. + + ### Arguments + + ``` + gym.make('Taxi-v3') + ``` + + ### Version History + * v3: Map Correction + Cleaner Domain Description, v0.25.0 action masking added to the reset and step information + * v2: Disallow Taxi start location = goal location, Update Taxi observations in the rollout, Update Taxi reward threshold. + * v1: Remove (3,2) from locs, add passidx<4 check + * v0: Initial versions release + """ + + metadata = { + "render_modes": ["human", "ansi", "rgb_array"], + "render_fps": 4, + } + + def __init__(self, render_mode: Optional[str] = None): + self.desc = np.asarray(MAP, dtype="c") + + self.locs = locs = [(0, 0), (0, 4), (4, 0), (4, 3)] + self.locs_colors = [(255, 0, 0), (0, 255, 0), (255, 255, 0), (0, 0, 255)] + + num_states = 500 + num_rows = 5 + num_columns = 5 + max_row = num_rows - 1 + max_col = num_columns - 1 + self.initial_state_distrib = np.zeros(num_states) + num_actions = 6 + self.P = { + state: {action: [] for action in range(num_actions)} + for state in range(num_states) + } + for row in range(num_rows): + for col in range(num_columns): + for pass_idx in range(len(locs) + 1): # +1 for being inside taxi + for dest_idx in range(len(locs)): + state = self.encode(row, col, pass_idx, dest_idx) + if pass_idx < 4 and pass_idx != dest_idx: + self.initial_state_distrib[state] += 1 + for action in range(num_actions): + # defaults + new_row, new_col, new_pass_idx = row, col, pass_idx + reward = ( + -1 + ) # default reward when there is no pickup/dropoff + terminated = False + taxi_loc = (row, col) + + if action == 0: + new_row = min(row + 1, max_row) + elif action == 1: + new_row = max(row - 1, 0) + if action == 2 and self.desc[1 + row, 2 * col + 2] == b":": + new_col = min(col + 1, max_col) + elif action == 3 and self.desc[1 + row, 2 * col] == b":": + new_col = max(col - 1, 0) + elif action == 4: # pickup + if pass_idx < 4 and taxi_loc == locs[pass_idx]: + new_pass_idx = 4 + else: # passenger not at location + reward = -10 + elif action == 5: # dropoff + if (taxi_loc == locs[dest_idx]) and pass_idx == 4: + new_pass_idx = dest_idx + terminated = True + reward = 20 + elif (taxi_loc in locs) and pass_idx == 4: + new_pass_idx = locs.index(taxi_loc) + else: # dropoff at wrong location + reward = -10 + new_state = self.encode( + new_row, new_col, new_pass_idx, dest_idx + ) + self.P[state][action].append( + (1.0, new_state, reward, terminated) + ) + self.initial_state_distrib /= self.initial_state_distrib.sum() + self.action_space = spaces.Discrete(num_actions) + self.observation_space = spaces.Discrete(num_states) + + self.render_mode = render_mode + + # pygame utils + self.window = None + self.clock = None + self.cell_size = ( + WINDOW_SIZE[0] / self.desc.shape[1], + WINDOW_SIZE[1] / self.desc.shape[0], + ) + self.taxi_imgs = None + self.taxi_orientation = 0 + self.passenger_img = None + self.destination_img = None + self.median_horiz = None + self.median_vert = None + self.background_img = None + + def encode(self, taxi_row, taxi_col, pass_loc, dest_idx): + # (5) 5, 5, 4 + i = taxi_row + i *= 5 + i += taxi_col + i *= 5 + i += pass_loc + i *= 4 + i += dest_idx + return i + + def decode(self, i): + out = [] + out.append(i % 4) + i = i // 4 + out.append(i % 5) + i = i // 5 + out.append(i % 5) + i = i // 5 + out.append(i) + assert 0 <= i < 5 + return reversed(out) + + def action_mask(self, state: int): + """Computes an action mask for the action space using the state information.""" + mask = np.zeros(6, dtype=np.int8) + taxi_row, taxi_col, pass_loc, dest_idx = self.decode(state) + if taxi_row < 4: + mask[0] = 1 + if taxi_row > 0: + mask[1] = 1 + if taxi_col < 4 and self.desc[taxi_row + 1, 2 * taxi_col + 2] == b":": + mask[2] = 1 + if taxi_col > 0 and self.desc[taxi_row + 1, 2 * taxi_col] == b":": + mask[3] = 1 + if pass_loc < 4 and (taxi_row, taxi_col) == self.locs[pass_loc]: + mask[4] = 1 + if pass_loc == 4 and ( + (taxi_row, taxi_col) == self.locs[dest_idx] + or (taxi_row, taxi_col) in self.locs + ): + mask[5] = 1 + return mask + + def step(self, a): + transitions = self.P[self.s][a] + i = categorical_sample([t[0] for t in transitions], self.np_random) + p, s, r, t = transitions[i] + self.s = s + self.lastaction = a + + if self.render_mode == "human": + self.render() + return (int(s), r, t, False, {"prob": p, "action_mask": self.action_mask(s)}) + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + self.s = categorical_sample(self.initial_state_distrib, self.np_random) + self.lastaction = None + self.taxi_orientation = 0 + + if self.render_mode == "human": + self.render() + return int(self.s), {"prob": 1.0, "action_mask": self.action_mask(self.s)} + + def render(self): + if self.render_mode is None: + logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + if self.render_mode == "ansi": + return self._render_text() + else: # self.render_mode in {"human", "rgb_array"}: + return self._render_gui(self.render_mode) + + def _render_gui(self, mode): + try: + import pygame # dependency to pygame only if rendering with human + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[toy_text]`" + ) + + if self.window is None: + pygame.init() + pygame.display.set_caption("Taxi") + if mode == "human": + self.window = pygame.display.set_mode(WINDOW_SIZE) + elif mode == "rgb_array": + self.window = pygame.Surface(WINDOW_SIZE) + + assert ( + self.window is not None + ), "Something went wrong with pygame. This should never happen." + if self.clock is None: + self.clock = pygame.time.Clock() + if self.taxi_imgs is None: + file_names = [ + path.join(path.dirname(__file__), "img/cab_front.png"), + path.join(path.dirname(__file__), "img/cab_rear.png"), + path.join(path.dirname(__file__), "img/cab_right.png"), + path.join(path.dirname(__file__), "img/cab_left.png"), + ] + self.taxi_imgs = [ + pygame.transform.scale(pygame.image.load(file_name), self.cell_size) + for file_name in file_names + ] + if self.passenger_img is None: + file_name = path.join(path.dirname(__file__), "img/passenger.png") + self.passenger_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + if self.destination_img is None: + file_name = path.join(path.dirname(__file__), "img/hotel.png") + self.destination_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + self.destination_img.set_alpha(170) + if self.median_horiz is None: + file_names = [ + path.join(path.dirname(__file__), "img/gridworld_median_left.png"), + path.join(path.dirname(__file__), "img/gridworld_median_horiz.png"), + path.join(path.dirname(__file__), "img/gridworld_median_right.png"), + ] + self.median_horiz = [ + pygame.transform.scale(pygame.image.load(file_name), self.cell_size) + for file_name in file_names + ] + if self.median_vert is None: + file_names = [ + path.join(path.dirname(__file__), "img/gridworld_median_top.png"), + path.join(path.dirname(__file__), "img/gridworld_median_vert.png"), + path.join(path.dirname(__file__), "img/gridworld_median_bottom.png"), + ] + self.median_vert = [ + pygame.transform.scale(pygame.image.load(file_name), self.cell_size) + for file_name in file_names + ] + if self.background_img is None: + file_name = path.join(path.dirname(__file__), "img/taxi_background.png") + self.background_img = pygame.transform.scale( + pygame.image.load(file_name), self.cell_size + ) + + desc = self.desc + + for y in range(0, desc.shape[0]): + for x in range(0, desc.shape[1]): + cell = (x * self.cell_size[0], y * self.cell_size[1]) + self.window.blit(self.background_img, cell) + if desc[y][x] == b"|" and (y == 0 or desc[y - 1][x] != b"|"): + self.window.blit(self.median_vert[0], cell) + elif desc[y][x] == b"|" and ( + y == desc.shape[0] - 1 or desc[y + 1][x] != b"|" + ): + self.window.blit(self.median_vert[2], cell) + elif desc[y][x] == b"|": + self.window.blit(self.median_vert[1], cell) + elif desc[y][x] == b"-" and (x == 0 or desc[y][x - 1] != b"-"): + self.window.blit(self.median_horiz[0], cell) + elif desc[y][x] == b"-" and ( + x == desc.shape[1] - 1 or desc[y][x + 1] != b"-" + ): + self.window.blit(self.median_horiz[2], cell) + elif desc[y][x] == b"-": + self.window.blit(self.median_horiz[1], cell) + + for cell, color in zip(self.locs, self.locs_colors): + color_cell = pygame.Surface(self.cell_size) + color_cell.set_alpha(128) + color_cell.fill(color) + loc = self.get_surf_loc(cell) + self.window.blit(color_cell, (loc[0], loc[1] + 10)) + + taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s) + + if pass_idx < 4: + self.window.blit(self.passenger_img, self.get_surf_loc(self.locs[pass_idx])) + + if self.lastaction in [0, 1, 2, 3]: + self.taxi_orientation = self.lastaction + dest_loc = self.get_surf_loc(self.locs[dest_idx]) + taxi_location = self.get_surf_loc((taxi_row, taxi_col)) + + if dest_loc[1] <= taxi_location[1]: + self.window.blit( + self.destination_img, + (dest_loc[0], dest_loc[1] - self.cell_size[1] // 2), + ) + self.window.blit(self.taxi_imgs[self.taxi_orientation], taxi_location) + else: # change blit order for overlapping appearance + self.window.blit(self.taxi_imgs[self.taxi_orientation], taxi_location) + self.window.blit( + self.destination_img, + (dest_loc[0], dest_loc[1] - self.cell_size[1] // 2), + ) + + if mode == "human": + pygame.display.update() + self.clock.tick(self.metadata["render_fps"]) + elif mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.window)), axes=(1, 0, 2) + ) + + def get_surf_loc(self, map_loc): + return (map_loc[1] * 2 + 1) * self.cell_size[0], ( + map_loc[0] + 1 + ) * self.cell_size[1] + + def _render_text(self): + desc = self.desc.copy().tolist() + outfile = StringIO() + + out = [[c.decode("utf-8") for c in line] for line in desc] + taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s) + + def ul(x): + return "_" if x == " " else x + + if pass_idx < 4: + out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize( + out[1 + taxi_row][2 * taxi_col + 1], "yellow", highlight=True + ) + pi, pj = self.locs[pass_idx] + out[1 + pi][2 * pj + 1] = utils.colorize( + out[1 + pi][2 * pj + 1], "blue", bold=True + ) + else: # passenger in taxi + out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize( + ul(out[1 + taxi_row][2 * taxi_col + 1]), "green", highlight=True + ) + + di, dj = self.locs[dest_idx] + out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], "magenta") + outfile.write("\n".join(["".join(row) for row in out]) + "\n") + if self.lastaction is not None: + outfile.write( + f" ({['South', 'North', 'East', 'West', 'Pickup', 'Dropoff'][self.lastaction]})\n" + ) + else: + outfile.write("\n") + + with closing(outfile): + return outfile.getvalue() + + def close(self): + if self.window is not None: + import pygame + + pygame.display.quit() + pygame.quit() + + +# Taxi rider from https://franuka.itch.io/rpg-asset-pack +# All other assets by Mel Tillery http://www.cyaneus.com/ diff --git a/MLPY/Lib/site-packages/gym/envs/toy_text/utils.py b/MLPY/Lib/site-packages/gym/envs/toy_text/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..20d259da46a0a40c878ec4155e5bb090b0b85f48 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/envs/toy_text/utils.py @@ -0,0 +1,8 @@ +import numpy as np + + +def categorical_sample(prob_n, np_random: np.random.Generator): + """Sample from categorical distribution where each row specifies class probabilities.""" + prob_n = np.asarray(prob_n) + csprob_n = np.cumsum(prob_n) + return np.argmax(csprob_n > np_random.random()) diff --git a/MLPY/Lib/site-packages/gym/error.py b/MLPY/Lib/site-packages/gym/error.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9b88998866c174766b0a983ec43933ef8fa19a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/error.py @@ -0,0 +1,194 @@ +"""Set of Error classes for gym.""" +import warnings + + +class Error(Exception): + """Error superclass.""" + + +# Local errors + + +class Unregistered(Error): + """Raised when the user requests an item from the registry that does not actually exist.""" + + +class UnregisteredEnv(Unregistered): + """Raised when the user requests an env from the registry that does not actually exist.""" + + +class NamespaceNotFound(UnregisteredEnv): + """Raised when the user requests an env from the registry where the namespace doesn't exist.""" + + +class NameNotFound(UnregisteredEnv): + """Raised when the user requests an env from the registry where the name doesn't exist.""" + + +class VersionNotFound(UnregisteredEnv): + """Raised when the user requests an env from the registry where the version doesn't exist.""" + + +class UnregisteredBenchmark(Unregistered): + """Raised when the user requests an env from the registry that does not actually exist.""" + + +class DeprecatedEnv(Error): + """Raised when the user requests an env from the registry with an older version number than the latest env with the same name.""" + + +class RegistrationError(Error): + """Raised when the user attempts to register an invalid env. For example, an unversioned env when a versioned env exists.""" + + +class UnseedableEnv(Error): + """Raised when the user tries to seed an env that does not support seeding.""" + + +class DependencyNotInstalled(Error): + """Raised when the user has not installed a dependency.""" + + +class UnsupportedMode(Error): + """Raised when the user requests a rendering mode not supported by the environment.""" + + +class ResetNeeded(Error): + """When the order enforcing is violated, i.e. step or render is called before reset.""" + + +class ResetNotAllowed(Error): + """When the monitor is active, raised when the user tries to step an environment that's not yet terminated or truncated.""" + + +class InvalidAction(Error): + """Raised when the user performs an action not contained within the action space.""" + + +# API errors + + +class APIError(Error): + """Deprecated, to be removed at gym 1.0.""" + + def __init__( + self, + message=None, + http_body=None, + http_status=None, + json_body=None, + headers=None, + ): + """Initialise API error.""" + super().__init__(message) + + warnings.warn("APIError is deprecated and will be removed at gym 1.0") + + if http_body and hasattr(http_body, "decode"): + try: + http_body = http_body.decode("utf-8") + except Exception: + http_body = "" + + self._message = message + self.http_body = http_body + self.http_status = http_status + self.json_body = json_body + self.headers = headers or {} + self.request_id = self.headers.get("request-id", None) + + def __unicode__(self): + """Returns a string, if request_id is not None then make message other use the _message.""" + if self.request_id is not None: + msg = self._message or "" + return f"Request {self.request_id}: {msg}" + else: + return self._message + + def __str__(self): + """Returns the __unicode__.""" + return self.__unicode__() + + +class APIConnectionError(APIError): + """Deprecated, to be removed at gym 1.0.""" + + +class InvalidRequestError(APIError): + """Deprecated, to be removed at gym 1.0.""" + + def __init__( + self, + message, + param, + http_body=None, + http_status=None, + json_body=None, + headers=None, + ): + """Initialises the invalid request error.""" + super().__init__(message, http_body, http_status, json_body, headers) + self.param = param + + +class AuthenticationError(APIError): + """Deprecated, to be removed at gym 1.0.""" + + +class RateLimitError(APIError): + """Deprecated, to be removed at gym 1.0.""" + + +# Video errors + + +class VideoRecorderError(Error): + """Unused error.""" + + +class InvalidFrame(Error): + """Error message when an invalid frame is captured.""" + + +# Wrapper errors + + +class DoubleWrapperError(Error): + """Error message for when using double wrappers.""" + + +class WrapAfterConfigureError(Error): + """Error message for using wrap after configure.""" + + +class RetriesExceededError(Error): + """Error message for retries exceeding set number.""" + + +# Vectorized environments errors + + +class AlreadyPendingCallError(Exception): + """Raised when `reset`, or `step` is called asynchronously (e.g. with `reset_async`, or `step_async` respectively), and `reset_async`, or `step_async` (respectively) is called again (without a complete call to `reset_wait`, or `step_wait` respectively).""" + + def __init__(self, message: str, name: str): + """Initialises the exception with name attributes.""" + super().__init__(message) + self.name = name + + +class NoAsyncCallError(Exception): + """Raised when an asynchronous `reset`, or `step` is not running, but `reset_wait`, or `step_wait` (respectively) is called.""" + + def __init__(self, message: str, name: str): + """Initialises the exception with name attributes.""" + super().__init__(message) + self.name = name + + +class ClosedEnvironmentError(Exception): + """Trying to call `reset`, or `step`, while the environment is closed.""" + + +class CustomSpaceError(Exception): + """The space is a custom gym.Space instance, and is not supported by `AsyncVectorEnv` with `shared_memory=True`.""" diff --git a/MLPY/Lib/site-packages/gym/logger.py b/MLPY/Lib/site-packages/gym/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..349e5756cfe75e101c8d74653a177e654da52e34 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/logger.py @@ -0,0 +1,73 @@ +"""Set of functions for logging messages.""" +import sys +import warnings +from typing import Optional, Type + +from gym.utils import colorize + +DEBUG = 10 +INFO = 20 +WARN = 30 +ERROR = 40 +DISABLED = 50 + +min_level = 30 + + +# Ensure DeprecationWarning to be displayed (#2685, #3059) +warnings.filterwarnings("once", "", DeprecationWarning, module=r"^gym\.") + + +def set_level(level: int): + """Set logging threshold on current logger.""" + global min_level + min_level = level + + +def debug(msg: str, *args: object): + """Logs a debug message to the user.""" + if min_level <= DEBUG: + print(f"DEBUG: {msg % args}", file=sys.stderr) + + +def info(msg: str, *args: object): + """Logs an info message to the user.""" + if min_level <= INFO: + print(f"INFO: {msg % args}", file=sys.stderr) + + +def warn( + msg: str, + *args: object, + category: Optional[Type[Warning]] = None, + stacklevel: int = 1, +): + """Raises a warning to the user if the min_level <= WARN. + + Args: + msg: The message to warn the user + *args: Additional information to warn the user + category: The category of warning + stacklevel: The stack level to raise to + """ + if min_level <= WARN: + warnings.warn( + colorize(f"WARN: {msg % args}", "yellow"), + category=category, + stacklevel=stacklevel + 1, + ) + + +def deprecation(msg: str, *args: object): + """Logs a deprecation warning to users.""" + warn(msg, *args, category=DeprecationWarning, stacklevel=2) + + +def error(msg: str, *args: object): + """Logs an error message if min_level <= ERROR in red on the sys.stderr.""" + if min_level <= ERROR: + print(colorize(f"ERROR: {msg % args}", "red"), file=sys.stderr) + + +# DEPRECATED: +setLevel = set_level diff --git a/MLPY/Lib/site-packages/gym/py.typed b/MLPY/Lib/site-packages/gym/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/gym/spaces/__init__.py b/MLPY/Lib/site-packages/gym/spaces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6f4923c284e8218aec325f312d4a5f77e93b3f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/__init__.py @@ -0,0 +1,39 @@ +"""This module implements various spaces. + +Spaces describe mathematical sets and are used in Gym to specify valid actions and observations. +Every Gym environment must have the attributes ``action_space`` and ``observation_space``. +If, for instance, three possible actions (0,1,2) can be performed in your environment and observations +are vectors in the two-dimensional unit cube, the environment code may contain the following two lines:: + + self.action_space = spaces.Discrete(3) + self.observation_space = spaces.Box(0, 1, shape=(2,)) +""" +from gym.spaces.box import Box +from gym.spaces.dict import Dict +from gym.spaces.discrete import Discrete +from gym.spaces.graph import Graph, GraphInstance +from gym.spaces.multi_binary import MultiBinary +from gym.spaces.multi_discrete import MultiDiscrete +from gym.spaces.sequence import Sequence +from gym.spaces.space import Space +from gym.spaces.text import Text +from gym.spaces.tuple import Tuple +from gym.spaces.utils import flatdim, flatten, flatten_space, unflatten + +__all__ = [ + "Space", + "Box", + "Discrete", + "Text", + "Graph", + "GraphInstance", + "MultiDiscrete", + "MultiBinary", + "Tuple", + "Sequence", + "Dict", + "flatdim", + "flatten_space", + "flatten", + "unflatten", +] diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac32524767e83e3b69a97fd348054a74d3c0c31 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/box.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/box.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c790967baec59a1e70861ae183edf4b235e9a9 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/box.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/dict.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/dict.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9640fc2090ecf829b2aa2b21be282cc066512463 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/dict.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/discrete.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/discrete.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b20f45fdf552884ef9be82bd338d7cd13234902 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/discrete.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/graph.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fe5ca224bbda9247bd5bd2ec18a415b9d536b46 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/graph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_binary.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_binary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb60643d2701de13ec844982a7fddf3061ea0ec7 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_binary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_discrete.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_discrete.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3ab763b0ae3203066e4db7b59b11141013ea98d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/multi_discrete.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/sequence.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/sequence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f827a3d0c60bd15a47d509b531e130805d4aea5 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/sequence.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/space.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/space.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7494049d8b1e3fccff6101d3060e37e04c2e126 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/space.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/text.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/text.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..368b78eb7695737709a6711228d5eae0f39a2bd4 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/text.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/tuple.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/tuple.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ab39ea30bc03ad02394d8e9790d13f9b294adf6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/tuple.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/gym/spaces/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62814d86a043ddbee1eadb0751ccee3d5b8c0c91 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/spaces/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/spaces/box.py b/MLPY/Lib/site-packages/gym/spaces/box.py new file mode 100644 index 0000000000000000000000000000000000000000..f09cbcf1863239d8ed8ef068031dbb8460a1dadd --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/box.py @@ -0,0 +1,338 @@ +"""Implementation of a space that represents closed boxes in euclidean space.""" +from typing import Dict, List, Optional, Sequence, SupportsFloat, Tuple, Type, Union + +import numpy as np + +import gym.error +from gym import logger +from gym.spaces.space import Space + + +def _short_repr(arr: np.ndarray) -> str: + """Create a shortened string representation of a numpy array. + + If arr is a multiple of the all-ones vector, return a string representation of the multiplier. + Otherwise, return a string representation of the entire array. + + Args: + arr: The array to represent + + Returns: + A short representation of the array + """ + if arr.size != 0 and np.min(arr) == np.max(arr): + return str(np.min(arr)) + return str(arr) + + +def is_float_integer(var) -> bool: + """Checks if a variable is an integer or float.""" + return np.issubdtype(type(var), np.integer) or np.issubdtype(type(var), np.floating) + + +class Box(Space[np.ndarray]): + r"""A (possibly unbounded) box in :math:`\mathbb{R}^n`. + + Specifically, a Box represents the Cartesian product of n closed intervals. + Each interval has the form of one of :math:`[a, b]`, :math:`(-\infty, b]`, + :math:`[a, \infty)`, or :math:`(-\infty, \infty)`. + + There are two common use cases: + + * Identical bound for each dimension:: + + >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32) + Box(3, 4) + + * Independent bound for each dimension:: + + >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32) + Box(2,) + """ + + def __init__( + self, + low: Union[SupportsFloat, np.ndarray], + high: Union[SupportsFloat, np.ndarray], + shape: Optional[Sequence[int]] = None, + dtype: Type = np.float32, + seed: Optional[Union[int, np.random.Generator]] = None, + ): + r"""Constructor of :class:`Box`. + + The argument ``low`` specifies the lower bound of each dimension and ``high`` specifies the upper bounds. + I.e., the space that is constructed will be the product of the intervals :math:`[\text{low}[i], \text{high}[i]]`. + + If ``low`` (or ``high``) is a scalar, the lower bound (or upper bound, respectively) will be assumed to be + this value across all dimensions. + + Args: + low (Union[SupportsFloat, np.ndarray]): Lower bounds of the intervals. + high (Union[SupportsFloat, np.ndarray]): Upper bounds of the intervals. + shape (Optional[Sequence[int]]): The shape is inferred from the shape of `low` or `high` `np.ndarray`s with + `low` and `high` scalars defaulting to a shape of (1,) + dtype: The dtype of the elements of the space. If this is an integer type, the :class:`Box` is essentially a discrete space. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. + + Raises: + ValueError: If no shape information is provided (shape is None, low is None and high is None) then a + value error is raised. + """ + assert ( + dtype is not None + ), "Box dtype must be explicitly provided, cannot be None." + self.dtype = np.dtype(dtype) + + # determine shape if it isn't provided directly + if shape is not None: + assert all( + np.issubdtype(type(dim), np.integer) for dim in shape + ), f"Expect all shape elements to be an integer, actual type: {tuple(type(dim) for dim in shape)}" + shape = tuple(int(dim) for dim in shape) # This changes any np types to int + elif isinstance(low, np.ndarray): + shape = low.shape + elif isinstance(high, np.ndarray): + shape = high.shape + elif is_float_integer(low) and is_float_integer(high): + shape = (1,) + else: + raise ValueError( + f"Box shape is inferred from low and high, expect their types to be np.ndarray, an integer or a float, actual type low: {type(low)}, high: {type(high)}" + ) + + # Capture the boundedness information before replacing np.inf with get_inf + _low = np.full(shape, low, dtype=float) if is_float_integer(low) else low + self.bounded_below = -np.inf < _low + _high = np.full(shape, high, dtype=float) if is_float_integer(high) else high + self.bounded_above = np.inf > _high + + low = _broadcast(low, dtype, shape, inf_sign="-") # type: ignore + high = _broadcast(high, dtype, shape, inf_sign="+") # type: ignore + + assert isinstance(low, np.ndarray) + assert ( + low.shape == shape + ), f"low.shape doesn't match provided shape, low.shape: {low.shape}, shape: {shape}" + assert isinstance(high, np.ndarray) + assert ( + high.shape == shape + ), f"high.shape doesn't match provided shape, high.shape: {high.shape}, shape: {shape}" + + self._shape: Tuple[int, ...] = shape + + low_precision = get_precision(low.dtype) + high_precision = get_precision(high.dtype) + dtype_precision = get_precision(self.dtype) + if min(low_precision, high_precision) > dtype_precision: # type: ignore + logger.warn(f"Box bound precision lowered by casting to {self.dtype}") + self.low = low.astype(self.dtype) + self.high = high.astype(self.dtype) + + self.low_repr = _short_repr(self.low) + self.high_repr = _short_repr(self.high) + + super().__init__(self.shape, self.dtype, seed) + + @property + def shape(self) -> Tuple[int, ...]: + """Has stricter type than gym.Space - never None.""" + return self._shape + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return True + + def is_bounded(self, manner: str = "both") -> bool: + """Checks whether the box is bounded in some sense. + + Args: + manner (str): One of ``"both"``, ``"below"``, ``"above"``. + + Returns: + If the space is bounded + + Raises: + ValueError: If `manner` is neither ``"both"`` nor ``"below"`` or ``"above"`` + """ + below = bool(np.all(self.bounded_below)) + above = bool(np.all(self.bounded_above)) + if manner == "both": + return below and above + elif manner == "below": + return below + elif manner == "above": + return above + else: + raise ValueError( + f"manner is not in {{'below', 'above', 'both'}}, actual value: {manner}" + ) + + def sample(self, mask: None = None) -> np.ndarray: + r"""Generates a single random sample inside the Box. + + In creating a sample of the box, each coordinate is sampled (independently) from a distribution + that is chosen according to the form of the interval: + + * :math:`[a, b]` : uniform distribution + * :math:`[a, \infty)` : shifted exponential distribution + * :math:`(-\infty, b]` : shifted negative exponential distribution + * :math:`(-\infty, \infty)` : normal distribution + + Args: + mask: A mask for sampling values from the Box space, currently unsupported. + + Returns: + A sampled value from the Box + """ + if mask is not None: + raise gym.error.Error( + f"Box.sample cannot be provided a mask, actual value: {mask}" + ) + + high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1 + sample = np.empty(self.shape) + + # Masking arrays which classify the coordinates according to interval + # type + unbounded = ~self.bounded_below & ~self.bounded_above + upp_bounded = ~self.bounded_below & self.bounded_above + low_bounded = self.bounded_below & ~self.bounded_above + bounded = self.bounded_below & self.bounded_above + + # Vectorized sampling by interval type + sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape) + + sample[low_bounded] = ( + self.np_random.exponential(size=low_bounded[low_bounded].shape) + + self.low[low_bounded] + ) + + sample[upp_bounded] = ( + -self.np_random.exponential(size=upp_bounded[upp_bounded].shape) + + self.high[upp_bounded] + ) + + sample[bounded] = self.np_random.uniform( + low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape + ) + if self.dtype.kind == "i": + sample = np.floor(sample) + + return sample.astype(self.dtype) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if not isinstance(x, np.ndarray): + logger.warn("Casting input x to numpy array.") + try: + x = np.asarray(x, dtype=self.dtype) + except (ValueError, TypeError): + return False + + return bool( + np.can_cast(x.dtype, self.dtype) + and x.shape == self.shape + and np.all(x >= self.low) + and np.all(x <= self.high) + ) + + def to_jsonable(self, sample_n): + """Convert a batch of samples from this space to a JSONable data type.""" + return np.array(sample_n).tolist() + + def from_jsonable(self, sample_n: Sequence[Union[float, int]]) -> List[np.ndarray]: + """Convert a JSONable data type to a batch of samples from this space.""" + return [np.asarray(sample) for sample in sample_n] + + def __repr__(self) -> str: + """A string representation of this space. + + The representation will include bounds, shape and dtype. + If a bound is uniform, only the corresponding scalar will be given to avoid redundant and ugly strings. + + Returns: + A representation of the space + """ + return f"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})" + + def __eq__(self, other) -> bool: + """Check whether `other` is equivalent to this instance. Doesn't check dtype equivalence.""" + return ( + isinstance(other, Box) + and (self.shape == other.shape) + # and (self.dtype == other.dtype) + and np.allclose(self.low, other.low) + and np.allclose(self.high, other.high) + ) + + def __setstate__(self, state: Dict): + """Sets the state of the box for unpickling a box with legacy support.""" + super().__setstate__(state) + + # legacy support through re-adding "low_repr" and "high_repr" if missing from pickled state + if not hasattr(self, "low_repr"): + self.low_repr = _short_repr(self.low) + + if not hasattr(self, "high_repr"): + self.high_repr = _short_repr(self.high) + + +def get_inf(dtype, sign: str) -> SupportsFloat: + """Returns an infinite that doesn't break things. + + Args: + dtype: An `np.dtype` + sign (str): must be either `"+"` or `"-"` + + Returns: + Gets an infinite value with the sign and dtype + + Raises: + TypeError: Unknown sign, use either '+' or '-' + ValueError: Unknown dtype for infinite bounds + """ + if np.dtype(dtype).kind == "f": + if sign == "+": + return np.inf + elif sign == "-": + return -np.inf + else: + raise TypeError(f"Unknown sign {sign}, use either '+' or '-'") + elif np.dtype(dtype).kind == "i": + if sign == "+": + return np.iinfo(dtype).max - 2 + elif sign == "-": + return np.iinfo(dtype).min + 2 + else: + raise TypeError(f"Unknown sign {sign}, use either '+' or '-'") + else: + raise ValueError(f"Unknown dtype {dtype} for infinite bounds") + + +def get_precision(dtype) -> SupportsFloat: + """Get precision of a data type.""" + if np.issubdtype(dtype, np.floating): + return np.finfo(dtype).precision + else: + return np.inf + + +def _broadcast( + value: Union[SupportsFloat, np.ndarray], + dtype, + shape: Tuple[int, ...], + inf_sign: str, +) -> np.ndarray: + """Handle infinite bounds and broadcast at the same time if needed.""" + if is_float_integer(value): + value = get_inf(dtype, inf_sign) if np.isinf(value) else value # type: ignore + value = np.full(shape, value, dtype=dtype) + else: + assert isinstance(value, np.ndarray) + if np.any(np.isinf(value)): + # create new array with dtype, but maintain old one to preserve np.inf + temp = value.astype(dtype) + temp[np.isinf(value)] = get_inf(dtype, inf_sign) + value = temp + return value diff --git a/MLPY/Lib/site-packages/gym/spaces/dict.py b/MLPY/Lib/site-packages/gym/spaces/dict.py new file mode 100644 index 0000000000000000000000000000000000000000..280986fcf9ebe692c4237a2c7936b3affa70ac96 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/dict.py @@ -0,0 +1,245 @@ +"""Implementation of a space that represents the cartesian product of other spaces as a dictionary.""" +from collections import OrderedDict +from collections.abc import Mapping, Sequence +from typing import Any +from typing import Dict as TypingDict +from typing import List, Optional +from typing import Sequence as TypingSequence +from typing import Tuple, Union + +import numpy as np + +from gym.spaces.space import Space + + +class Dict(Space[TypingDict[str, Space]], Mapping): + """A dictionary of :class:`Space` instances. + + Elements of this space are (ordered) dictionaries of elements from the constituent spaces. + + Example usage: + + >>> from gym.spaces import Dict, Discrete + >>> observation_space = Dict({"position": Discrete(2), "velocity": Discrete(3)}) + >>> observation_space.sample() + OrderedDict([('position', 1), ('velocity', 2)]) + + Example usage [nested]:: + + >>> from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete + >>> Dict( + ... { + ... "ext_controller": MultiDiscrete([5, 2, 2]), + ... "inner_state": Dict( + ... { + ... "charge": Discrete(100), + ... "system_checks": MultiBinary(10), + ... "job_status": Dict( + ... { + ... "task": Discrete(5), + ... "progress": Box(low=0, high=100, shape=()), + ... } + ... ), + ... } + ... ), + ... } + ... ) + + It can be convenient to use :class:`Dict` spaces if you want to make complex observations or actions more human-readable. + Usually, it will not be possible to use elements of this space directly in learning code. However, you can easily + convert `Dict` observations to flat arrays by using a :class:`gym.wrappers.FlattenObservation` wrapper. Similar wrappers can be + implemented to deal with :class:`Dict` actions. + """ + + def __init__( + self, + spaces: Optional[ + Union[ + TypingDict[str, Space], + TypingSequence[Tuple[str, Space]], + ] + ] = None, + seed: Optional[Union[dict, int, np.random.Generator]] = None, + **spaces_kwargs: Space, + ): + """Constructor of :class:`Dict` space. + + This space can be instantiated in one of two ways: Either you pass a dictionary + of spaces to :meth:`__init__` via the ``spaces`` argument, or you pass the spaces as separate + keyword arguments (where you will need to avoid the keys ``spaces`` and ``seed``) + + Example:: + + >>> from gym.spaces import Box, Discrete + >>> Dict({"position": Box(-1, 1, shape=(2,)), "color": Discrete(3)}) + Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32)) + >>> Dict(position=Box(-1, 1, shape=(2,)), color=Discrete(3)) + Dict(color:Discrete(3), position:Box(-1.0, 1.0, (2,), float32)) + + Args: + spaces: A dictionary of spaces. This specifies the structure of the :class:`Dict` space + seed: Optionally, you can use this argument to seed the RNGs of the spaces that make up the :class:`Dict` space. + **spaces_kwargs: If ``spaces`` is ``None``, you need to pass the constituent spaces as keyword arguments, as described above. + """ + # Convert the spaces into an OrderedDict + if isinstance(spaces, Mapping) and not isinstance(spaces, OrderedDict): + try: + spaces = OrderedDict(sorted(spaces.items())) + except TypeError: + # Incomparable types (e.g. `int` vs. `str`, or user-defined types) found. + # The keys remain in the insertion order. + spaces = OrderedDict(spaces.items()) + elif isinstance(spaces, Sequence): + spaces = OrderedDict(spaces) + elif spaces is None: + spaces = OrderedDict() + else: + assert isinstance( + spaces, OrderedDict + ), f"Unexpected Dict space input, expecting dict, OrderedDict or Sequence, actual type: {type(spaces)}" + + # Add kwargs to spaces to allow both dictionary and keywords to be used + for key, space in spaces_kwargs.items(): + if key not in spaces: + spaces[key] = space + else: + raise ValueError( + f"Dict space keyword '{key}' already exists in the spaces dictionary." + ) + + self.spaces = spaces + for key, space in self.spaces.items(): + assert isinstance( + space, Space + ), f"Dict space element is not an instance of Space: key='{key}', space={space}" + + super().__init__( + None, None, seed # type: ignore + ) # None for shape and dtype, since it'll require special handling + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return all(space.is_np_flattenable for space in self.spaces.values()) + + def seed(self, seed: Optional[Union[dict, int]] = None) -> list: + """Seed the PRNG of this space and all subspaces. + + Depending on the type of seed, the subspaces will be seeded differently + * None - All the subspaces will use a random initial seed + * Int - The integer is used to seed the `Dict` space that is used to generate seed values for each of the subspaces. Warning, this does not guarantee unique seeds for all of the subspaces. + * Dict - Using all the keys in the seed dictionary, the values are used to seed the subspaces. This allows the seeding of multiple composite subspaces (`Dict["space": Dict[...], ...]` with `{"space": {...}, ...}`). + + Args: + seed: An optional list of ints or int to seed the (sub-)spaces. + """ + seeds = [] + + if isinstance(seed, dict): + assert ( + seed.keys() == self.spaces.keys() + ), f"The seed keys: {seed.keys()} are not identical to space keys: {self.spaces.keys()}" + for key in seed.keys(): + seeds += self.spaces[key].seed(seed[key]) + elif isinstance(seed, int): + seeds = super().seed(seed) + # Using `np.int32` will mean that the same key occurring is extremely low, even for large subspaces + subseeds = self.np_random.integers( + np.iinfo(np.int32).max, size=len(self.spaces) + ) + for subspace, subseed in zip(self.spaces.values(), subseeds): + seeds += subspace.seed(int(subseed)) + elif seed is None: + for space in self.spaces.values(): + seeds += space.seed(None) + else: + raise TypeError( + f"Expected seed type: dict, int or None, actual type: {type(seed)}" + ) + + return seeds + + def sample(self, mask: Optional[TypingDict[str, Any]] = None) -> dict: + """Generates a single random sample from this space. + + The sample is an ordered dictionary of independent samples from the constituent spaces. + + Args: + mask: An optional mask for each of the subspaces, expects the same keys as the space + + Returns: + A dictionary with the same key and sampled values from :attr:`self.spaces` + """ + if mask is not None: + assert isinstance( + mask, dict + ), f"Expects mask to be a dict, actual type: {type(mask)}" + assert ( + mask.keys() == self.spaces.keys() + ), f"Expect mask keys to be same as space keys, mask keys: {mask.keys()}, space keys: {self.spaces.keys()}" + return OrderedDict( + [(k, space.sample(mask[k])) for k, space in self.spaces.items()] + ) + + return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()]) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, dict) and x.keys() == self.spaces.keys(): + return all(x[key] in self.spaces[key] for key in self.spaces.keys()) + return False + + def __getitem__(self, key: str) -> Space: + """Get the space that is associated to `key`.""" + return self.spaces[key] + + def __setitem__(self, key: str, value: Space): + """Set the space that is associated to `key`.""" + assert isinstance( + value, Space + ), f"Trying to set {key} to Dict space with value that is not a gym space, actual type: {type(value)}" + self.spaces[key] = value + + def __iter__(self): + """Iterator through the keys of the subspaces.""" + yield from self.spaces + + def __len__(self) -> int: + """Gives the number of simpler spaces that make up the `Dict` space.""" + return len(self.spaces) + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + return ( + "Dict(" + ", ".join([f"{k!r}: {s}" for k, s in self.spaces.items()]) + ")" + ) + + def __eq__(self, other) -> bool: + """Check whether `other` is equivalent to this instance.""" + return ( + isinstance(other, Dict) + # Comparison of `OrderedDict`s is order-sensitive + and self.spaces == other.spaces # OrderedDict.__eq__ + ) + + def to_jsonable(self, sample_n: list) -> dict: + """Convert a batch of samples from this space to a JSONable data type.""" + # serialize as dict-repr of vectors + return { + key: space.to_jsonable([sample[key] for sample in sample_n]) + for key, space in self.spaces.items() + } + + def from_jsonable(self, sample_n: TypingDict[str, list]) -> List[dict]: + """Convert a JSONable data type to a batch of samples from this space.""" + dict_of_list: TypingDict[str, list] = { + key: space.from_jsonable(sample_n[key]) + for key, space in self.spaces.items() + } + + n_elements = len(next(iter(dict_of_list.values()))) + result = [ + OrderedDict({key: value[n] for key, value in dict_of_list.items()}) + for n in range(n_elements) + ] + return result diff --git a/MLPY/Lib/site-packages/gym/spaces/discrete.py b/MLPY/Lib/site-packages/gym/spaces/discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..5d5e1c14a81514365cf855bb079f141d0f195151 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/discrete.py @@ -0,0 +1,126 @@ +"""Implementation of a space consisting of finitely many elements.""" +from typing import Optional, Union + +import numpy as np + +from gym.spaces.space import Space + + +class Discrete(Space[int]): + r"""A space consisting of finitely many elements. + + This class represents a finite subset of integers, more specifically a set of the form :math:`\{ a, a+1, \dots, a+n-1 \}`. + + Example:: + + >>> Discrete(2) # {0, 1} + >>> Discrete(3, start=-1) # {-1, 0, 1} + """ + + def __init__( + self, + n: int, + seed: Optional[Union[int, np.random.Generator]] = None, + start: int = 0, + ): + r"""Constructor of :class:`Discrete` space. + + This will construct the space :math:`\{\text{start}, ..., \text{start} + n - 1\}`. + + Args: + n (int): The number of elements of this space. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the ``Dict`` space. + start (int): The smallest element of this space. + """ + assert isinstance(n, (int, np.integer)) + assert n > 0, "n (counts) have to be positive" + assert isinstance(start, (int, np.integer)) + self.n = int(n) + self.start = int(start) + super().__init__((), np.int64, seed) + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return True + + def sample(self, mask: Optional[np.ndarray] = None) -> int: + """Generates a single random sample from this space. + + A sample will be chosen uniformly at random with the mask if provided + + Args: + mask: An optional mask for if an action can be selected. + Expected `np.ndarray` of shape `(n,)` and dtype `np.int8` where `1` represents valid actions and `0` invalid / infeasible actions. + If there are no possible actions (i.e. `np.all(mask == 0)`) then `space.start` will be returned. + + Returns: + A sampled integer from the space + """ + if mask is not None: + assert isinstance( + mask, np.ndarray + ), f"The expected type of the mask is np.ndarray, actual type: {type(mask)}" + assert ( + mask.dtype == np.int8 + ), f"The expected dtype of the mask is np.int8, actual dtype: {mask.dtype}" + assert mask.shape == ( + self.n, + ), f"The expected shape of the mask is {(self.n,)}, actual shape: {mask.shape}" + valid_action_mask = mask == 1 + assert np.all( + np.logical_or(mask == 0, valid_action_mask) + ), f"All values of a mask should be 0 or 1, actual values: {mask}" + if np.any(valid_action_mask): + return int( + self.start + self.np_random.choice(np.where(valid_action_mask)[0]) + ) + else: + return self.start + + return int(self.start + self.np_random.integers(self.n)) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, int): + as_int = x + elif isinstance(x, (np.generic, np.ndarray)) and ( + np.issubdtype(x.dtype, np.integer) and x.shape == () + ): + as_int = int(x) # type: ignore + else: + return False + + return self.start <= as_int < self.start + self.n + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + if self.start != 0: + return f"Discrete({self.n}, start={self.start})" + return f"Discrete({self.n})" + + def __eq__(self, other) -> bool: + """Check whether ``other`` is equivalent to this instance.""" + return ( + isinstance(other, Discrete) + and self.n == other.n + and self.start == other.start + ) + + def __setstate__(self, state): + """Used when loading a pickled space. + + This method has to be implemented explicitly to allow for loading of legacy states. + + Args: + state: The new state + """ + # Don't mutate the original state + state = dict(state) + + # Allow for loading of legacy states. + # See https://github.com/openai/gym/pull/2470 + if "start" not in state: + state["start"] = 0 + + super().__setstate__(state) diff --git a/MLPY/Lib/site-packages/gym/spaces/graph.py b/MLPY/Lib/site-packages/gym/spaces/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..afd5c516a7db0a2206226c9a689551c7d12c4d08 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/graph.py @@ -0,0 +1,238 @@ +"""Implementation of a space that represents graph information where nodes and edges can be represented with euclidean space.""" +from typing import NamedTuple, Optional, Sequence, Tuple, Union + +import numpy as np + +from gym.logger import warn +from gym.spaces.box import Box +from gym.spaces.discrete import Discrete +from gym.spaces.multi_discrete import MultiDiscrete +from gym.spaces.space import Space + + +class GraphInstance(NamedTuple): + """A Graph space instance. + + * nodes (np.ndarray): an (n x ...) sized array representing the features for n nodes, (...) must adhere to the shape of the node space. + * edges (Optional[np.ndarray]): an (m x ...) sized array representing the features for m edges, (...) must adhere to the shape of the edge space. + * edge_links (Optional[np.ndarray]): an (m x 2) sized array of ints representing the indices of the two nodes that each edge connects. + """ + + nodes: np.ndarray + edges: Optional[np.ndarray] + edge_links: Optional[np.ndarray] + + +class Graph(Space): + r"""A space representing graph information as a series of `nodes` connected with `edges` according to an adjacency matrix represented as a series of `edge_links`. + + Example usage:: + + self.observation_space = spaces.Graph(node_space=space.Box(low=-100, high=100, shape=(3,)), edge_space=spaces.Discrete(3)) + """ + + def __init__( + self, + node_space: Union[Box, Discrete], + edge_space: Union[None, Box, Discrete], + seed: Optional[Union[int, np.random.Generator]] = None, + ): + r"""Constructor of :class:`Graph`. + + The argument ``node_space`` specifies the base space that each node feature will use. + This argument must be either a Box or Discrete instance. + + The argument ``edge_space`` specifies the base space that each edge feature will use. + This argument must be either a None, Box or Discrete instance. + + Args: + node_space (Union[Box, Discrete]): space of the node features. + edge_space (Union[None, Box, Discrete]): space of the node features. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. + """ + assert isinstance( + node_space, (Box, Discrete) + ), f"Values of the node_space should be instances of Box or Discrete, got {type(node_space)}" + if edge_space is not None: + assert isinstance( + edge_space, (Box, Discrete) + ), f"Values of the edge_space should be instances of None Box or Discrete, got {type(node_space)}" + + self.node_space = node_space + self.edge_space = edge_space + + super().__init__(None, None, seed) + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return False + + def _generate_sample_space( + self, base_space: Union[None, Box, Discrete], num: int + ) -> Optional[Union[Box, MultiDiscrete]]: + if num == 0 or base_space is None: + return None + + if isinstance(base_space, Box): + return Box( + low=np.array(max(1, num) * [base_space.low]), + high=np.array(max(1, num) * [base_space.high]), + shape=(num,) + base_space.shape, + dtype=base_space.dtype, + seed=self.np_random, + ) + elif isinstance(base_space, Discrete): + return MultiDiscrete(nvec=[base_space.n] * num, seed=self.np_random) + else: + raise TypeError( + f"Expects base space to be Box and Discrete, actual space: {type(base_space)}." + ) + + def sample( + self, + mask: Optional[ + Tuple[ + Optional[Union[np.ndarray, tuple]], + Optional[Union[np.ndarray, tuple]], + ] + ] = None, + num_nodes: int = 10, + num_edges: Optional[int] = None, + ) -> GraphInstance: + """Generates a single sample graph with num_nodes between 1 and 10 sampled from the Graph. + + Args: + mask: An optional tuple of optional node and edge mask that is only possible with Discrete spaces + (Box spaces don't support sample masks). + If no `num_edges` is provided then the `edge_mask` is multiplied by the number of edges + num_nodes: The number of nodes that will be sampled, the default is 10 nodes + num_edges: An optional number of edges, otherwise, a random number between 0 and `num_nodes`^2 + + Returns: + A NamedTuple representing a graph with attributes .nodes, .edges, and .edge_links. + """ + assert ( + num_nodes > 0 + ), f"The number of nodes is expected to be greater than 0, actual value: {num_nodes}" + + if mask is not None: + node_space_mask, edge_space_mask = mask + else: + node_space_mask, edge_space_mask = None, None + + # we only have edges when we have at least 2 nodes + if num_edges is None: + if num_nodes > 1: + # maximal number of edges is `n*(n-1)` allowing self connections and two-way is allowed + num_edges = self.np_random.integers(num_nodes * (num_nodes - 1)) + else: + num_edges = 0 + + if edge_space_mask is not None: + edge_space_mask = tuple(edge_space_mask for _ in range(num_edges)) + else: + if self.edge_space is None: + warn( + f"The number of edges is set ({num_edges}) but the edge space is None." + ) + assert ( + num_edges >= 0 + ), f"Expects the number of edges to be greater than 0, actual value: {num_edges}" + assert num_edges is not None + + sampled_node_space = self._generate_sample_space(self.node_space, num_nodes) + sampled_edge_space = self._generate_sample_space(self.edge_space, num_edges) + + assert sampled_node_space is not None + sampled_nodes = sampled_node_space.sample(node_space_mask) + sampled_edges = ( + sampled_edge_space.sample(edge_space_mask) + if sampled_edge_space is not None + else None + ) + + sampled_edge_links = None + if sampled_edges is not None and num_edges > 0: + sampled_edge_links = self.np_random.integers( + low=0, high=num_nodes, size=(num_edges, 2) + ) + + return GraphInstance(sampled_nodes, sampled_edges, sampled_edge_links) + + def contains(self, x: GraphInstance) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, GraphInstance): + # Checks the nodes + if isinstance(x.nodes, np.ndarray): + if all(node in self.node_space for node in x.nodes): + # Check the edges and edge links which are optional + if isinstance(x.edges, np.ndarray) and isinstance( + x.edge_links, np.ndarray + ): + assert x.edges is not None + assert x.edge_links is not None + if self.edge_space is not None: + if all(edge in self.edge_space for edge in x.edges): + if np.issubdtype(x.edge_links.dtype, np.integer): + if x.edge_links.shape == (len(x.edges), 2): + if np.all( + np.logical_and( + x.edge_links >= 0, + x.edge_links < len(x.nodes), + ) + ): + return True + else: + return x.edges is None and x.edge_links is None + return False + + def __repr__(self) -> str: + """A string representation of this space. + + The representation will include node_space and edge_space + + Returns: + A representation of the space + """ + return f"Graph({self.node_space}, {self.edge_space})" + + def __eq__(self, other) -> bool: + """Check whether `other` is equivalent to this instance.""" + return ( + isinstance(other, Graph) + and (self.node_space == other.node_space) + and (self.edge_space == other.edge_space) + ) + + def to_jsonable(self, sample_n: NamedTuple) -> list: + """Convert a batch of samples from this space to a JSONable data type.""" + # serialize as list of dicts + ret_n = [] + for sample in sample_n: + ret = {} + ret["nodes"] = sample.nodes.tolist() + if sample.edges is not None: + ret["edges"] = sample.edges.tolist() + ret["edge_links"] = sample.edge_links.tolist() + ret_n.append(ret) + return ret_n + + def from_jsonable(self, sample_n: Sequence[dict]) -> list: + """Convert a JSONable data type to a batch of samples from this space.""" + ret = [] + for sample in sample_n: + if "edges" in sample: + ret_n = GraphInstance( + np.asarray(sample["nodes"]), + np.asarray(sample["edges"]), + np.asarray(sample["edge_links"]), + ) + else: + ret_n = GraphInstance( + np.asarray(sample["nodes"]), + None, + None, + ) + ret.append(ret_n) + return ret diff --git a/MLPY/Lib/site-packages/gym/spaces/multi_binary.py b/MLPY/Lib/site-packages/gym/spaces/multi_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..0c384fb48ea4cc0094617ce7277602ef0132d59f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/multi_binary.py @@ -0,0 +1,118 @@ +"""Implementation of a space that consists of binary np.ndarrays of a fixed shape.""" +from typing import Optional, Sequence, Tuple, Union + +import numpy as np + +from gym.spaces.space import Space + + +class MultiBinary(Space[np.ndarray]): + """An n-shape binary space. + + Elements of this space are binary arrays of a shape that is fixed during construction. + + Example Usage:: + + >>> observation_space = MultiBinary(5) + >>> observation_space.sample() + array([0, 1, 0, 1, 0], dtype=int8) + >>> observation_space = MultiBinary([3, 2]) + >>> observation_space.sample() + array([[0, 0], + [0, 1], + [1, 1]], dtype=int8) + """ + + def __init__( + self, + n: Union[np.ndarray, Sequence[int], int], + seed: Optional[Union[int, np.random.Generator]] = None, + ): + """Constructor of :class:`MultiBinary` space. + + Args: + n: This will fix the shape of elements of the space. It can either be an integer (if the space is flat) + or some sort of sequence (tuple, list or np.ndarray) if there are multiple axes. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. + """ + if isinstance(n, (Sequence, np.ndarray)): + self.n = input_n = tuple(int(i) for i in n) + assert (np.asarray(input_n) > 0).all() # n (counts) have to be positive + else: + self.n = n = int(n) + input_n = (n,) + assert (np.asarray(input_n) > 0).all() # n (counts) have to be positive + + super().__init__(input_n, np.int8, seed) + + @property + def shape(self) -> Tuple[int, ...]: + """Has stricter type than gym.Space - never None.""" + return self._shape # type: ignore + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return True + + def sample(self, mask: Optional[np.ndarray] = None) -> np.ndarray: + """Generates a single random sample from this space. + + A sample is drawn by independent, fair coin tosses (one toss per binary variable of the space). + + Args: + mask: An optional np.ndarray to mask samples with expected shape of ``space.shape``. + For mask == 0 then the samples will be 0 and mask == 1 then random samples will be generated. + The expected mask shape is the space shape and mask dtype is `np.int8`. + + Returns: + Sampled values from space + """ + if mask is not None: + assert isinstance( + mask, np.ndarray + ), f"The expected type of the mask is np.ndarray, actual type: {type(mask)}" + assert ( + mask.dtype == np.int8 + ), f"The expected dtype of the mask is np.int8, actual dtype: {mask.dtype}" + assert ( + mask.shape == self.shape + ), f"The expected shape of the mask is {self.shape}, actual shape: {mask.shape}" + assert np.all( + (mask == 0) | (mask == 1) | (mask == 2) + ), f"All values of a mask should be 0, 1 or 2, actual values: {mask}" + + return np.where( + mask == 2, + self.np_random.integers(low=0, high=2, size=self.n, dtype=self.dtype), + mask.astype(self.dtype), + ) + + return self.np_random.integers(low=0, high=2, size=self.n, dtype=self.dtype) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, Sequence): + x = np.array(x) # Promote list to array for contains check + + return bool( + isinstance(x, np.ndarray) + and self.shape == x.shape + and np.all((x == 0) | (x == 1)) + ) + + def to_jsonable(self, sample_n) -> list: + """Convert a batch of samples from this space to a JSONable data type.""" + return np.array(sample_n).tolist() + + def from_jsonable(self, sample_n) -> list: + """Convert a JSONable data type to a batch of samples from this space.""" + return [np.asarray(sample, self.dtype) for sample in sample_n] + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + return f"MultiBinary({self.n})" + + def __eq__(self, other) -> bool: + """Check whether `other` is equivalent to this instance.""" + return isinstance(other, MultiBinary) and self.n == other.n diff --git a/MLPY/Lib/site-packages/gym/spaces/multi_discrete.py b/MLPY/Lib/site-packages/gym/spaces/multi_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..5349865e4152d08de42cb6e7febc6a49d9db94c6 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/multi_discrete.py @@ -0,0 +1,175 @@ +"""Implementation of a space that represents the cartesian product of `Discrete` spaces.""" +from typing import Iterable, List, Optional, Sequence, Tuple, Union + +import numpy as np + +from gym import logger +from gym.spaces.discrete import Discrete +from gym.spaces.space import Space + + +class MultiDiscrete(Space[np.ndarray]): + """This represents the cartesian product of arbitrary :class:`Discrete` spaces. + + It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space. + + Note: + Some environment wrappers assume a value of 0 always represents the NOOP action. + + e.g. Nintendo Game Controller - Can be conceptualized as 3 discrete action spaces: + + 1. Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4 + 2. Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 + 3. Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 + + It can be initialized as ``MultiDiscrete([ 5, 2, 2 ])`` such that a sample might be ``array([3, 1, 0])``. + + Although this feature is rarely used, :class:`MultiDiscrete` spaces may also have several axes + if ``nvec`` has several axes: + + Example:: + + >> d = MultiDiscrete(np.array([[1, 2], [3, 4]])) + >> d.sample() + array([[0, 0], + [2, 3]]) + """ + + def __init__( + self, + nvec: Union[np.ndarray, list], + dtype=np.int64, + seed: Optional[Union[int, np.random.Generator]] = None, + ): + """Constructor of :class:`MultiDiscrete` space. + + The argument ``nvec`` will determine the number of values each categorical variable can take. + + Args: + nvec: vector of counts of each categorical variable. This will usually be a list of integers. However, + you may also pass a more complicated numpy array if you'd like the space to have several axes. + dtype: This should be some kind of integer type. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. + """ + self.nvec = np.array(nvec, dtype=dtype, copy=True) + assert (self.nvec > 0).all(), "nvec (counts) have to be positive" + + super().__init__(self.nvec.shape, dtype, seed) + + @property + def shape(self) -> Tuple[int, ...]: + """Has stricter type than :class:`gym.Space` - never None.""" + return self._shape # type: ignore + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return True + + def sample(self, mask: Optional[tuple] = None) -> np.ndarray: + """Generates a single random sample this space. + + Args: + mask: An optional mask for multi-discrete, expects tuples with a `np.ndarray` mask in the position of each + action with shape `(n,)` where `n` is the number of actions and `dtype=np.int8`. + Only mask values == 1 are possible to sample unless all mask values for an action are 0 then the default action 0 is sampled. + + Returns: + An `np.ndarray` of shape `space.shape` + """ + if mask is not None: + + def _apply_mask( + sub_mask: Union[np.ndarray, tuple], + sub_nvec: Union[np.ndarray, np.integer], + ) -> Union[int, List[int]]: + if isinstance(sub_nvec, np.ndarray): + assert isinstance( + sub_mask, tuple + ), f"Expects the mask to be a tuple for sub_nvec ({sub_nvec}), actual type: {type(sub_mask)}" + assert len(sub_mask) == len( + sub_nvec + ), f"Expects the mask length to be equal to the number of actions, mask length: {len(sub_mask)}, nvec length: {len(sub_nvec)}" + return [ + _apply_mask(new_mask, new_nvec) + for new_mask, new_nvec in zip(sub_mask, sub_nvec) + ] + else: + assert np.issubdtype( + type(sub_nvec), np.integer + ), f"Expects the sub_nvec to be an action, actually: {sub_nvec}, {type(sub_nvec)}" + assert isinstance( + sub_mask, np.ndarray + ), f"Expects the sub mask to be np.ndarray, actual type: {type(sub_mask)}" + assert ( + len(sub_mask) == sub_nvec + ), f"Expects the mask length to be equal to the number of actions, mask length: {len(sub_mask)}, action: {sub_nvec}" + assert ( + sub_mask.dtype == np.int8 + ), f"Expects the mask dtype to be np.int8, actual dtype: {sub_mask.dtype}" + + valid_action_mask = sub_mask == 1 + assert np.all( + np.logical_or(sub_mask == 0, valid_action_mask) + ), f"Expects all masks values to 0 or 1, actual values: {sub_mask}" + + if np.any(valid_action_mask): + return self.np_random.choice(np.where(valid_action_mask)[0]) + else: + return 0 + + return np.array(_apply_mask(mask, self.nvec), dtype=self.dtype) + + return (self.np_random.random(self.nvec.shape) * self.nvec).astype(self.dtype) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, Sequence): + x = np.array(x) # Promote list to array for contains check + + # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x + # is within correct bounds for space dtype (even though x does not have to be unsigned) + return bool( + isinstance(x, np.ndarray) + and x.shape == self.shape + and x.dtype != object + and np.all(0 <= x) + and np.all(x < self.nvec) + ) + + def to_jsonable(self, sample_n: Iterable[np.ndarray]): + """Convert a batch of samples from this space to a JSONable data type.""" + return [sample.tolist() for sample in sample_n] + + def from_jsonable(self, sample_n): + """Convert a JSONable data type to a batch of samples from this space.""" + return np.array(sample_n) + + def __repr__(self): + """Gives a string representation of this space.""" + return f"MultiDiscrete({self.nvec})" + + def __getitem__(self, index): + """Extract a subspace from this ``MultiDiscrete`` space.""" + nvec = self.nvec[index] + if nvec.ndim == 0: + subspace = Discrete(nvec) + else: + subspace = MultiDiscrete(nvec, self.dtype) # type: ignore + + # you don't need to deepcopy as np random generator call replaces the state not the data + subspace.np_random.bit_generator.state = self.np_random.bit_generator.state + + return subspace + + def __len__(self): + """Gives the ``len`` of samples from this space.""" + if self.nvec.ndim >= 2: + logger.warn( + "Getting the length of a multi-dimensional MultiDiscrete space." + ) + return len(self.nvec) + + def __eq__(self, other): + """Check whether ``other`` is equivalent to this instance.""" + return isinstance(other, MultiDiscrete) and np.all(self.nvec == other.nvec) diff --git a/MLPY/Lib/site-packages/gym/spaces/sequence.py b/MLPY/Lib/site-packages/gym/spaces/sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..62ccf8b2728ff9e204084b0c8245b64801c54884 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/sequence.py @@ -0,0 +1,126 @@ +"""Implementation of a space that represents finite-length sequences.""" +from collections.abc import Sequence as CollectionSequence +from typing import Any, List, Optional, Tuple, Union + +import numpy as np + +import gym +from gym.spaces.space import Space + + +class Sequence(Space[Tuple]): + r"""This space represent sets of finite-length sequences. + + This space represents the set of tuples of the form :math:`(a_0, \dots, a_n)` where the :math:`a_i` belong + to some space that is specified during initialization and the integer :math:`n` is not fixed + + Example:: + >>> space = Sequence(Box(0, 1)) + >>> space.sample() + (array([0.0259352], dtype=float32),) + >>> space.sample() + (array([0.80977976], dtype=float32), array([0.80066574], dtype=float32), array([0.77165383], dtype=float32)) + """ + + def __init__( + self, + space: Space, + seed: Optional[Union[int, np.random.Generator]] = None, + ): + """Constructor of the :class:`Sequence` space. + + Args: + space: Elements in the sequences this space represent must belong to this space. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space. + """ + assert isinstance( + space, gym.Space + ), f"Expects the feature space to be instance of a gym Space, actual type: {type(space)}" + self.feature_space = space + super().__init__( + None, None, seed # type: ignore + ) # None for shape and dtype, since it'll require special handling + + def seed(self, seed: Optional[int] = None) -> list: + """Seed the PRNG of this space and the feature space.""" + seeds = super().seed(seed) + seeds += self.feature_space.seed(seed) + return seeds + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return False + + def sample( + self, + mask: Optional[Tuple[Optional[Union[np.ndarray, int]], Optional[Any]]] = None, + ) -> Tuple[Any]: + """Generates a single random sample from this space. + + Args: + mask: An optional mask for (optionally) the length of the sequence and (optionally) the values in the sequence. + If you specify `mask`, it is expected to be a tuple of the form `(length_mask, sample_mask)` where `length_mask` + is + - `None` The length will be randomly drawn from a geometric distribution + - `np.ndarray` of integers, in which case the length of the sampled sequence is randomly drawn from this array. + - `int` for a fixed length sample + The second element of the mask tuple `sample` mask specifies a mask that is applied when + sampling elements from the base space. The mask is applied for each feature space sample. + + Returns: + A tuple of random length with random samples of elements from the :attr:`feature_space`. + """ + if mask is not None: + length_mask, feature_mask = mask + else: + length_mask, feature_mask = None, None + + if length_mask is not None: + if np.issubdtype(type(length_mask), np.integer): + assert ( + 0 <= length_mask + ), f"Expects the length mask to be greater than or equal to zero, actual value: {length_mask}" + length = length_mask + elif isinstance(length_mask, np.ndarray): + assert ( + len(length_mask.shape) == 1 + ), f"Expects the shape of the length mask to be 1-dimensional, actual shape: {length_mask.shape}" + assert np.all( + 0 <= length_mask + ), f"Expects all values in the length_mask to be greater than or equal to zero, actual values: {length_mask}" + length = self.np_random.choice(length_mask) + else: + raise TypeError( + f"Expects the type of length_mask to an integer or a np.ndarray, actual type: {type(length_mask)}" + ) + else: + # The choice of 0.25 is arbitrary + length = self.np_random.geometric(0.25) + + return tuple( + self.feature_space.sample(mask=feature_mask) for _ in range(length) + ) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + return isinstance(x, CollectionSequence) and all( + self.feature_space.contains(item) for item in x + ) + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + return f"Sequence({self.feature_space})" + + def to_jsonable(self, sample_n: list) -> list: + """Convert a batch of samples from this space to a JSONable data type.""" + # serialize as dict-repr of vectors + return [self.feature_space.to_jsonable(list(sample)) for sample in sample_n] + + def from_jsonable(self, sample_n: List[List[Any]]) -> list: + """Convert a JSONable data type to a batch of samples from this space.""" + return [tuple(self.feature_space.from_jsonable(sample)) for sample in sample_n] + + def __eq__(self, other) -> bool: + """Check whether ``other`` is equivalent to this instance.""" + return isinstance(other, Sequence) and self.feature_space == other.feature_space diff --git a/MLPY/Lib/site-packages/gym/spaces/space.py b/MLPY/Lib/site-packages/gym/spaces/space.py new file mode 100644 index 0000000000000000000000000000000000000000..753ab6e31e9683d32f3877d64926c9eb3499440a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/space.py @@ -0,0 +1,150 @@ +"""Implementation of the `Space` metaclass.""" + +from typing import ( + Any, + Generic, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) + +import numpy as np + +from gym.utils import seeding + +T_cov = TypeVar("T_cov", covariant=True) + + +class Space(Generic[T_cov]): + """Superclass that is used to define observation and action spaces. + + Spaces are crucially used in Gym to define the format of valid actions and observations. + They serve various purposes: + + * They clearly define how to interact with environments, i.e. they specify what actions need to look like + and what observations will look like + * They allow us to work with highly structured data (e.g. in the form of elements of :class:`Dict` spaces) + and painlessly transform them into flat arrays that can be used in learning code + * They provide a method to sample random elements. This is especially useful for exploration and debugging. + + Different spaces can be combined hierarchically via container spaces (:class:`Tuple` and :class:`Dict`) to build a + more expressive space + + Warning: + Custom observation & action spaces can inherit from the ``Space`` + class. However, most use-cases should be covered by the existing space + classes (e.g. :class:`Box`, :class:`Discrete`, etc...), and container classes (:class`Tuple` & + :class:`Dict`). Note that parametrized probability distributions (through the + :meth:`Space.sample()` method), and batching functions (in :class:`gym.vector.VectorEnv`), are + only well-defined for instances of spaces provided in gym by default. + Moreover, some implementations of Reinforcement Learning algorithms might + not handle custom spaces properly. Use custom spaces with care. + """ + + def __init__( + self, + shape: Optional[Sequence[int]] = None, + dtype: Optional[Union[Type, str, np.dtype]] = None, + seed: Optional[Union[int, np.random.Generator]] = None, + ): + """Constructor of :class:`Space`. + + Args: + shape (Optional[Sequence[int]]): If elements of the space are numpy arrays, this should specify their shape. + dtype (Optional[Type | str]): If elements of the space are numpy arrays, this should specify their dtype. + seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space + """ + self._shape = None if shape is None else tuple(shape) + self.dtype = None if dtype is None else np.dtype(dtype) + self._np_random = None + if seed is not None: + if isinstance(seed, np.random.Generator): + self._np_random = seed + else: + self.seed(seed) + + @property + def np_random(self) -> np.random.Generator: + """Lazily seed the PRNG since this is expensive and only needed if sampling from this space.""" + if self._np_random is None: + self.seed() + + return self._np_random # type: ignore ## self.seed() call guarantees right type. + + @property + def shape(self) -> Optional[Tuple[int, ...]]: + """Return the shape of the space as an immutable property.""" + return self._shape + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + raise NotImplementedError + + def sample(self, mask: Optional[Any] = None) -> T_cov: + """Randomly sample an element of this space. + + Can be uniform or non-uniform sampling based on boundedness of space. + + Args: + mask: A mask used for sampling, expected ``dtype=np.int8`` and see sample implementation for expected shape. + + Returns: + A sampled actions from the space + """ + raise NotImplementedError + + def seed(self, seed: Optional[int] = None) -> list: + """Seed the PRNG of this space and possibly the PRNGs of subspaces.""" + self._np_random, seed = seeding.np_random(seed) + return [seed] + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + raise NotImplementedError + + def __contains__(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + return self.contains(x) + + def __setstate__(self, state: Union[Iterable, Mapping]): + """Used when loading a pickled space. + + This method was implemented explicitly to allow for loading of legacy states. + + Args: + state: The updated state value + """ + # Don't mutate the original state + state = dict(state) + + # Allow for loading of legacy states. + # See: + # https://github.com/openai/gym/pull/2397 -- shape + # https://github.com/openai/gym/pull/1913 -- np_random + # + if "shape" in state: + state["_shape"] = state["shape"] + del state["shape"] + if "np_random" in state: + state["_np_random"] = state["np_random"] + del state["np_random"] + + # Update our state + self.__dict__.update(state) + + def to_jsonable(self, sample_n: Sequence[T_cov]) -> list: + """Convert a batch of samples from this space to a JSONable data type.""" + # By default, assume identity is JSONable + return list(sample_n) + + def from_jsonable(self, sample_n: list) -> List[T_cov]: + """Convert a JSONable data type to a batch of samples from this space.""" + # By default, assume identity is JSONable + return sample_n diff --git a/MLPY/Lib/site-packages/gym/spaces/text.py b/MLPY/Lib/site-packages/gym/spaces/text.py new file mode 100644 index 0000000000000000000000000000000000000000..1ac184e46aba9fdcd117de718ba438bf3bd6d571 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/text.py @@ -0,0 +1,185 @@ +"""Implementation of a space that represents textual strings.""" +from typing import Any, Dict, FrozenSet, Optional, Set, Tuple, Union + +import numpy as np + +from gym.spaces.space import Space + +alphanumeric: FrozenSet[str] = frozenset( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" +) + + +class Text(Space[str]): + r"""A space representing a string comprised of characters from a given charset. + + Example:: + >>> # {"", "B5", "hello", ...} + >>> Text(5) + >>> # {"0", "42", "0123456789", ...} + >>> import string + >>> Text(min_length = 1, + ... max_length = 10, + ... charset = string.digits) + """ + + def __init__( + self, + max_length: int, + *, + min_length: int = 1, + charset: Union[Set[str], str] = alphanumeric, + seed: Optional[Union[int, np.random.Generator]] = None, + ): + r"""Constructor of :class:`Text` space. + + Both bounds for text length are inclusive. + + Args: + min_length (int): Minimum text length (in characters). Defaults to 1 to prevent empty strings. + max_length (int): Maximum text length (in characters). + charset (Union[set], str): Character set, defaults to the lower and upper english alphabet plus latin digits. + seed: The seed for sampling from the space. + """ + assert np.issubdtype( + type(min_length), np.integer + ), f"Expects the min_length to be an integer, actual type: {type(min_length)}" + assert np.issubdtype( + type(max_length), np.integer + ), f"Expects the max_length to be an integer, actual type: {type(max_length)}" + assert ( + 0 <= min_length + ), f"Minimum text length must be non-negative, actual value: {min_length}" + assert ( + min_length <= max_length + ), f"The min_length must be less than or equal to the max_length, min_length: {min_length}, max_length: {max_length}" + + self.min_length: int = int(min_length) + self.max_length: int = int(max_length) + + self._char_set: FrozenSet[str] = frozenset(charset) + self._char_list: Tuple[str, ...] = tuple(charset) + self._char_index: Dict[str, np.int32] = { + val: np.int32(i) for i, val in enumerate(tuple(charset)) + } + self._char_str: str = "".join(sorted(tuple(charset))) + + # As the shape is dynamic (between min_length and max_length) then None + super().__init__(dtype=str, seed=seed) + + def sample( + self, mask: Optional[Tuple[Optional[int], Optional[np.ndarray]]] = None + ) -> str: + """Generates a single random sample from this space with by default a random length between `min_length` and `max_length` and sampled from the `charset`. + + Args: + mask: An optional tuples of length and mask for the text. + The length is expected to be between the `min_length` and `max_length` otherwise a random integer between `min_length` and `max_length` is selected. + For the mask, we expect a numpy array of length of the charset passed with `dtype == np.int8`. + If the charlist mask is all zero then an empty string is returned no matter the `min_length` + + Returns: + A sampled string from the space + """ + if mask is not None: + assert isinstance( + mask, tuple + ), f"Expects the mask type to be a tuple, actual type: {type(mask)}" + assert ( + len(mask) == 2 + ), f"Expects the mask length to be two, actual length: {len(mask)}" + length, charlist_mask = mask + + if length is not None: + assert np.issubdtype( + type(length), np.integer + ), f"Expects the Text sample length to be an integer, actual type: {type(length)}" + assert ( + self.min_length <= length <= self.max_length + ), f"Expects the Text sample length be between {self.min_length} and {self.max_length}, actual length: {length}" + + if charlist_mask is not None: + assert isinstance( + charlist_mask, np.ndarray + ), f"Expects the Text sample mask to be an np.ndarray, actual type: {type(charlist_mask)}" + assert ( + charlist_mask.dtype == np.int8 + ), f"Expects the Text sample mask to be an np.ndarray, actual dtype: {charlist_mask.dtype}" + assert charlist_mask.shape == ( + len(self.character_set), + ), f"expects the Text sample mask to be {(len(self.character_set),)}, actual shape: {charlist_mask.shape}" + assert np.all( + np.logical_or(charlist_mask == 0, charlist_mask == 1) + ), f"Expects all masks values to 0 or 1, actual values: {charlist_mask}" + else: + length, charlist_mask = None, None + + if length is None: + length = self.np_random.integers(self.min_length, self.max_length + 1) + + if charlist_mask is None: + string = self.np_random.choice(self.character_list, size=length) + else: + valid_mask = charlist_mask == 1 + valid_indexes = np.where(valid_mask)[0] + if len(valid_indexes) == 0: + if self.min_length == 0: + string = "" + else: + # Otherwise the string will not be contained in the space + raise ValueError( + f"Trying to sample with a minimum length > 0 ({self.min_length}) but the character mask is all zero meaning that no character could be sampled." + ) + else: + string = "".join( + self.character_list[index] + for index in self.np_random.choice(valid_indexes, size=length) + ) + + return "".join(string) + + def contains(self, x: Any) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, str): + if self.min_length <= len(x) <= self.max_length: + return all(c in self.character_set for c in x) + return False + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + return ( + f"Text({self.min_length}, {self.max_length}, characters={self.characters})" + ) + + def __eq__(self, other) -> bool: + """Check whether ``other`` is equivalent to this instance.""" + return ( + isinstance(other, Text) + and self.min_length == other.min_length + and self.max_length == other.max_length + and self.character_set == other.character_set + ) + + @property + def character_set(self) -> FrozenSet[str]: + """Returns the character set for the space.""" + return self._char_set + + @property + def character_list(self) -> Tuple[str, ...]: + """Returns a tuple of characters in the space.""" + return self._char_list + + def character_index(self, char: str) -> np.int32: + """Returns a unique index for each character in the space's character set.""" + return self._char_index[char] + + @property + def characters(self) -> str: + """Returns a string with all Text characters.""" + return self._char_str + + @property + def is_np_flattenable(self) -> bool: + """The flattened version is an integer array for each character, padded to the max character length.""" + return True diff --git a/MLPY/Lib/site-packages/gym/spaces/tuple.py b/MLPY/Lib/site-packages/gym/spaces/tuple.py new file mode 100644 index 0000000000000000000000000000000000000000..e37f8cda15b2d2ce786e44a301ae4d8f67222eea --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/tuple.py @@ -0,0 +1,162 @@ +"""Implementation of a space that represents the cartesian product of other spaces.""" +from collections.abc import Sequence as CollectionSequence +from typing import Iterable, Optional +from typing import Sequence as TypingSequence +from typing import Tuple as TypingTuple +from typing import Union + +import numpy as np + +from gym.spaces.space import Space + + +class Tuple(Space[tuple], CollectionSequence): + """A tuple (more precisely: the cartesian product) of :class:`Space` instances. + + Elements of this space are tuples of elements of the constituent spaces. + + Example usage:: + + >>> from gym.spaces import Box, Discrete + >>> observation_space = Tuple((Discrete(2), Box(-1, 1, shape=(2,)))) + >>> observation_space.sample() + (0, array([0.03633198, 0.42370757], dtype=float32)) + """ + + def __init__( + self, + spaces: Iterable[Space], + seed: Optional[Union[int, TypingSequence[int], np.random.Generator]] = None, + ): + r"""Constructor of :class:`Tuple` space. + + The generated instance will represent the cartesian product :math:`\text{spaces}[0] \times ... \times \text{spaces}[-1]`. + + Args: + spaces (Iterable[Space]): The spaces that are involved in the cartesian product. + seed: Optionally, you can use this argument to seed the RNGs of the ``spaces`` to ensure reproducible sampling. + """ + self.spaces = tuple(spaces) + for space in self.spaces: + assert isinstance( + space, Space + ), "Elements of the tuple must be instances of gym.Space" + super().__init__(None, None, seed) # type: ignore + + @property + def is_np_flattenable(self): + """Checks whether this space can be flattened to a :class:`spaces.Box`.""" + return all(space.is_np_flattenable for space in self.spaces) + + def seed( + self, seed: Optional[Union[int, TypingSequence[int]]] = None + ) -> TypingSequence[int]: + """Seed the PRNG of this space and all subspaces. + + Depending on the type of seed, the subspaces will be seeded differently + * None - All the subspaces will use a random initial seed + * Int - The integer is used to seed the `Tuple` space that is used to generate seed values for each of the subspaces. Warning, this does not guarantee unique seeds for all of the subspaces. + * List - Values used to seed the subspaces. This allows the seeding of multiple composite subspaces (`List(42, 54, ...)`). + + Args: + seed: An optional list of ints or int to seed the (sub-)spaces. + """ + seeds = [] + + if isinstance(seed, CollectionSequence): + assert len(seed) == len( + self.spaces + ), f"Expects that the subspaces of seeds equals the number of subspaces. Actual length of seeds: {len(seeds)}, length of subspaces: {len(self.spaces)}" + for subseed, space in zip(seed, self.spaces): + seeds += space.seed(subseed) + elif isinstance(seed, int): + seeds = super().seed(seed) + subseeds = self.np_random.integers( + np.iinfo(np.int32).max, size=len(self.spaces) + ) + for subspace, subseed in zip(self.spaces, subseeds): + seeds += subspace.seed(int(subseed)) + elif seed is None: + for space in self.spaces: + seeds += space.seed(seed) + else: + raise TypeError( + f"Expected seed type: list, tuple, int or None, actual type: {type(seed)}" + ) + + return seeds + + def sample( + self, mask: Optional[TypingTuple[Optional[np.ndarray], ...]] = None + ) -> tuple: + """Generates a single random sample inside this space. + + This method draws independent samples from the subspaces. + + Args: + mask: An optional tuple of optional masks for each of the subspace's samples, + expects the same number of masks as spaces + + Returns: + Tuple of the subspace's samples + """ + if mask is not None: + assert isinstance( + mask, tuple + ), f"Expected type of mask is tuple, actual type: {type(mask)}" + assert len(mask) == len( + self.spaces + ), f"Expected length of mask is {len(self.spaces)}, actual length: {len(mask)}" + + return tuple( + space.sample(mask=sub_mask) + for space, sub_mask in zip(self.spaces, mask) + ) + + return tuple(space.sample() for space in self.spaces) + + def contains(self, x) -> bool: + """Return boolean specifying if x is a valid member of this space.""" + if isinstance(x, (list, np.ndarray)): + x = tuple(x) # Promote list and ndarray to tuple for contains check + return ( + isinstance(x, tuple) + and len(x) == len(self.spaces) + and all(space.contains(part) for (space, part) in zip(self.spaces, x)) + ) + + def __repr__(self) -> str: + """Gives a string representation of this space.""" + return "Tuple(" + ", ".join([str(s) for s in self.spaces]) + ")" + + def to_jsonable(self, sample_n: CollectionSequence) -> list: + """Convert a batch of samples from this space to a JSONable data type.""" + # serialize as list-repr of tuple of vectors + return [ + space.to_jsonable([sample[i] for sample in sample_n]) + for i, space in enumerate(self.spaces) + ] + + def from_jsonable(self, sample_n) -> list: + """Convert a JSONable data type to a batch of samples from this space.""" + return [ + sample + for sample in zip( + *[ + space.from_jsonable(sample_n[i]) + for i, space in enumerate(self.spaces) + ] + ) + ] + + def __getitem__(self, index: int) -> Space: + """Get the subspace at specific `index`.""" + return self.spaces[index] + + def __len__(self) -> int: + """Get the number of subspaces that are involved in the cartesian product.""" + return len(self.spaces) + + def __eq__(self, other) -> bool: + """Check whether ``other`` is equivalent to this instance.""" + return isinstance(other, Tuple) and self.spaces == other.spaces diff --git a/MLPY/Lib/site-packages/gym/spaces/utils.py b/MLPY/Lib/site-packages/gym/spaces/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..818292151bf18241429bab32cc5335b39bcb5040 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/spaces/utils.py @@ -0,0 +1,450 @@ +"""Implementation of utility functions that can be applied to spaces. + +These functions mostly take care of flattening and unflattening elements of spaces + to facilitate their usage in learning code. +""" +import operator as op +from collections import OrderedDict +from functools import reduce, singledispatch +from typing import Dict as TypingDict +from typing import TypeVar, Union, cast + +import numpy as np + +from gym.spaces import ( + Box, + Dict, + Discrete, + Graph, + GraphInstance, + MultiBinary, + MultiDiscrete, + Sequence, + Space, + Text, + Tuple, +) + + +@singledispatch +def flatdim(space: Space) -> int: + """Return the number of dimensions a flattened equivalent of this space would have. + + Example usage:: + + >>> from gym.spaces import Discrete + >>> space = Dict({"position": Discrete(2), "velocity": Discrete(3)}) + >>> flatdim(space) + 5 + + Args: + space: The space to return the number of dimensions of the flattened spaces + + Returns: + The number of dimensions for the flattened spaces + + Raises: + NotImplementedError: if the space is not defined in ``gym.spaces``. + ValueError: if the space cannot be flattened into a :class:`Box` + """ + if not space.is_np_flattenable: + raise ValueError( + f"{space} cannot be flattened to a numpy array, probably because it contains a `Graph` or `Sequence` subspace" + ) + + raise NotImplementedError(f"Unknown space: `{space}`") + + +@flatdim.register(Box) +@flatdim.register(MultiBinary) +def _flatdim_box_multibinary(space: Union[Box, MultiBinary]) -> int: + return reduce(op.mul, space.shape, 1) + + +@flatdim.register(Discrete) +def _flatdim_discrete(space: Discrete) -> int: + return int(space.n) + + +@flatdim.register(MultiDiscrete) +def _flatdim_multidiscrete(space: MultiDiscrete) -> int: + return int(np.sum(space.nvec)) + + +@flatdim.register(Tuple) +def _flatdim_tuple(space: Tuple) -> int: + if space.is_np_flattenable: + return sum(flatdim(s) for s in space.spaces) + raise ValueError( + f"{space} cannot be flattened to a numpy array, probably because it contains a `Graph` or `Sequence` subspace" + ) + + +@flatdim.register(Dict) +def _flatdim_dict(space: Dict) -> int: + if space.is_np_flattenable: + return sum(flatdim(s) for s in space.spaces.values()) + raise ValueError( + f"{space} cannot be flattened to a numpy array, probably because it contains a `Graph` or `Sequence` subspace" + ) + + +@flatdim.register(Graph) +def _flatdim_graph(space: Graph): + raise ValueError( + "Cannot get flattened size as the Graph Space in Gym has a dynamic size." + ) + + +@flatdim.register(Text) +def _flatdim_text(space: Text) -> int: + return space.max_length + + +T = TypeVar("T") +FlatType = Union[np.ndarray, TypingDict, tuple, GraphInstance] + + +@singledispatch +def flatten(space: Space[T], x: T) -> FlatType: + """Flatten a data point from a space. + + This is useful when e.g. points from spaces must be passed to a neural + network, which only understands flat arrays of floats. + + Args: + space: The space that ``x`` is flattened by + x: The value to flatten + + Returns: + - For ``Box`` and ``MultiBinary``, this is a flattened array + - For ``Discrete`` and ``MultiDiscrete``, this is a flattened one-hot array of the sample + - For ``Tuple`` and ``Dict``, this is a concatenated array the subspaces (does not support graph subspaces) + - For graph spaces, returns `GraphInstance` where: + - `nodes` are n x k arrays + - `edges` are either: + - m x k arrays + - None + - `edge_links` are either: + - m x 2 arrays + - None + + Raises: + NotImplementedError: If the space is not defined in ``gym.spaces``. + """ + raise NotImplementedError(f"Unknown space: `{space}`") + + +@flatten.register(Box) +@flatten.register(MultiBinary) +def _flatten_box_multibinary(space, x) -> np.ndarray: + return np.asarray(x, dtype=space.dtype).flatten() + + +@flatten.register(Discrete) +def _flatten_discrete(space, x) -> np.ndarray: + onehot = np.zeros(space.n, dtype=space.dtype) + onehot[x - space.start] = 1 + return onehot + + +@flatten.register(MultiDiscrete) +def _flatten_multidiscrete(space, x) -> np.ndarray: + offsets = np.zeros((space.nvec.size + 1,), dtype=space.dtype) + offsets[1:] = np.cumsum(space.nvec.flatten()) + + onehot = np.zeros((offsets[-1],), dtype=space.dtype) + onehot[offsets[:-1] + x.flatten()] = 1 + return onehot + + +@flatten.register(Tuple) +def _flatten_tuple(space, x) -> Union[tuple, np.ndarray]: + if space.is_np_flattenable: + return np.concatenate( + [flatten(s, x_part) for x_part, s in zip(x, space.spaces)] + ) + return tuple(flatten(s, x_part) for x_part, s in zip(x, space.spaces)) + + +@flatten.register(Dict) +def _flatten_dict(space, x) -> Union[dict, np.ndarray]: + if space.is_np_flattenable: + return np.concatenate([flatten(s, x[key]) for key, s in space.spaces.items()]) + return OrderedDict((key, flatten(s, x[key])) for key, s in space.spaces.items()) + + +@flatten.register(Graph) +def _flatten_graph(space, x) -> GraphInstance: + """We're not using `.unflatten() for :class:`Box` and :class:`Discrete` because a graph is not a homogeneous space, see `.flatten` docstring.""" + + def _graph_unflatten(unflatten_space, unflatten_x): + ret = None + if unflatten_space is not None and unflatten_x is not None: + if isinstance(unflatten_space, Box): + ret = unflatten_x.reshape(unflatten_x.shape[0], -1) + elif isinstance(unflatten_space, Discrete): + ret = np.zeros( + (unflatten_x.shape[0], unflatten_space.n - unflatten_space.start), + dtype=unflatten_space.dtype, + ) + ret[ + np.arange(unflatten_x.shape[0]), unflatten_x - unflatten_space.start + ] = 1 + return ret + + nodes = _graph_unflatten(space.node_space, x.nodes) + edges = _graph_unflatten(space.edge_space, x.edges) + + return GraphInstance(nodes, edges, x.edge_links) + + +@flatten.register(Text) +def _flatten_text(space: Text, x: str) -> np.ndarray: + arr = np.full( + shape=(space.max_length,), fill_value=len(space.character_set), dtype=np.int32 + ) + for i, val in enumerate(x): + arr[i] = space.character_index(val) + return arr + + +@flatten.register(Sequence) +def _flatten_sequence(space, x) -> tuple: + return tuple(flatten(space.feature_space, item) for item in x) + + +@singledispatch +def unflatten(space: Space[T], x: FlatType) -> T: + """Unflatten a data point from a space. + + This reverses the transformation applied by :func:`flatten`. You must ensure + that the ``space`` argument is the same as for the :func:`flatten` call. + + Args: + space: The space used to unflatten ``x`` + x: The array to unflatten + + Returns: + A point with a structure that matches the space. + + Raises: + NotImplementedError: if the space is not defined in ``gym.spaces``. + """ + raise NotImplementedError(f"Unknown space: `{space}`") + + +@unflatten.register(Box) +@unflatten.register(MultiBinary) +def _unflatten_box_multibinary( + space: Union[Box, MultiBinary], x: np.ndarray +) -> np.ndarray: + return np.asarray(x, dtype=space.dtype).reshape(space.shape) + + +@unflatten.register(Discrete) +def _unflatten_discrete(space: Discrete, x: np.ndarray) -> int: + return int(space.start + np.nonzero(x)[0][0]) + + +@unflatten.register(MultiDiscrete) +def _unflatten_multidiscrete(space: MultiDiscrete, x: np.ndarray) -> np.ndarray: + offsets = np.zeros((space.nvec.size + 1,), dtype=space.dtype) + offsets[1:] = np.cumsum(space.nvec.flatten()) + + (indices,) = cast(type(offsets[:-1]), np.nonzero(x)) + return np.asarray(indices - offsets[:-1], dtype=space.dtype).reshape(space.shape) + + +@unflatten.register(Tuple) +def _unflatten_tuple(space: Tuple, x: Union[np.ndarray, tuple]) -> tuple: + if space.is_np_flattenable: + assert isinstance( + x, np.ndarray + ), f"{space} is numpy-flattenable. Thus, you should only unflatten numpy arrays for this space. Got a {type(x)}" + dims = np.asarray([flatdim(s) for s in space.spaces], dtype=np.int_) + list_flattened = np.split(x, np.cumsum(dims[:-1])) + return tuple( + unflatten(s, flattened) + for flattened, s in zip(list_flattened, space.spaces) + ) + assert isinstance( + x, tuple + ), f"{space} is not numpy-flattenable. Thus, you should only unflatten tuples for this space. Got a {type(x)}" + return tuple(unflatten(s, flattened) for flattened, s in zip(x, space.spaces)) + + +@unflatten.register(Dict) +def _unflatten_dict(space: Dict, x: Union[np.ndarray, TypingDict]) -> dict: + if space.is_np_flattenable: + dims = np.asarray([flatdim(s) for s in space.spaces.values()], dtype=np.int_) + list_flattened = np.split(x, np.cumsum(dims[:-1])) + return OrderedDict( + [ + (key, unflatten(s, flattened)) + for flattened, (key, s) in zip(list_flattened, space.spaces.items()) + ] + ) + assert isinstance( + x, dict + ), f"{space} is not numpy-flattenable. Thus, you should only unflatten dictionary for this space. Got a {type(x)}" + return OrderedDict((key, unflatten(s, x[key])) for key, s in space.spaces.items()) + + +@unflatten.register(Graph) +def _unflatten_graph(space: Graph, x: GraphInstance) -> GraphInstance: + """We're not using `.unflatten() for :class:`Box` and :class:`Discrete` because a graph is not a homogeneous space. + + The size of the outcome is actually not fixed, but determined based on the number of + nodes and edges in the graph. + """ + + def _graph_unflatten(space, x): + ret = None + if space is not None and x is not None: + if isinstance(space, Box): + ret = x.reshape(-1, *space.shape) + elif isinstance(space, Discrete): + ret = np.asarray(np.nonzero(x))[-1, :] + return ret + + nodes = _graph_unflatten(space.node_space, x.nodes) + edges = _graph_unflatten(space.edge_space, x.edges) + + return GraphInstance(nodes, edges, x.edge_links) + + +@unflatten.register(Text) +def _unflatten_text(space: Text, x: np.ndarray) -> str: + return "".join( + [space.character_list[val] for val in x if val < len(space.character_set)] + ) + + +@unflatten.register(Sequence) +def _unflatten_sequence(space: Sequence, x: tuple) -> tuple: + return tuple(unflatten(space.feature_space, item) for item in x) + + +@singledispatch +def flatten_space(space: Space) -> Union[Dict, Sequence, Tuple, Graph]: + """Flatten a space into a space that is as flat as possible. + + This function will attempt to flatten `space` into a single :class:`Box` space. + However, this might not be possible when `space` is an instance of :class:`Graph`, + :class:`Sequence` or a compound space that contains a :class:`Graph` or :class:`Sequence`space. + This is equivalent to :func:`flatten`, but operates on the space itself. The + result for non-graph spaces is always a `Box` with flat boundaries. While + the result for graph spaces is always a `Graph` with `node_space` being a `Box` + with flat boundaries and `edge_space` being a `Box` with flat boundaries or + `None`. The box has exactly :func:`flatdim` dimensions. Flattening a sample + of the original space has the same effect as taking a sample of the flattenend + space. + + Example:: + + >>> box = Box(0.0, 1.0, shape=(3, 4, 5)) + >>> box + Box(3, 4, 5) + >>> flatten_space(box) + Box(60,) + >>> flatten(box, box.sample()) in flatten_space(box) + True + + Example that flattens a discrete space:: + + >>> discrete = Discrete(5) + >>> flatten_space(discrete) + Box(5,) + >>> flatten(box, box.sample()) in flatten_space(box) + True + + Example that recursively flattens a dict:: + + >>> space = Dict({"position": Discrete(2), "velocity": Box(0, 1, shape=(2, 2))}) + >>> flatten_space(space) + Box(6,) + >>> flatten(space, space.sample()) in flatten_space(space) + True + + + Example that flattens a graph:: + + >>> space = Graph(node_space=Box(low=-100, high=100, shape=(3, 4)), edge_space=Discrete(5)) + >>> flatten_space(space) + Graph(Box(-100.0, 100.0, (12,), float32), Box(0, 1, (5,), int64)) + >>> flatten(space, space.sample()) in flatten_space(space) + True + + Args: + space: The space to flatten + + Returns: + A flattened Box + + Raises: + NotImplementedError: if the space is not defined in ``gym.spaces``. + """ + raise NotImplementedError(f"Unknown space: `{space}`") + + +@flatten_space.register(Box) +def _flatten_space_box(space: Box) -> Box: + return Box(space.low.flatten(), space.high.flatten(), dtype=space.dtype) + + +@flatten_space.register(Discrete) +@flatten_space.register(MultiBinary) +@flatten_space.register(MultiDiscrete) +def _flatten_space_binary(space: Union[Discrete, MultiBinary, MultiDiscrete]) -> Box: + return Box(low=0, high=1, shape=(flatdim(space),), dtype=space.dtype) + + +@flatten_space.register(Tuple) +def _flatten_space_tuple(space: Tuple) -> Union[Box, Tuple]: + if space.is_np_flattenable: + space_list = [flatten_space(s) for s in space.spaces] + return Box( + low=np.concatenate([s.low for s in space_list]), + high=np.concatenate([s.high for s in space_list]), + dtype=np.result_type(*[s.dtype for s in space_list]), + ) + return Tuple(spaces=[flatten_space(s) for s in space.spaces]) + + +@flatten_space.register(Dict) +def _flatten_space_dict(space: Dict) -> Union[Box, Dict]: + if space.is_np_flattenable: + space_list = [flatten_space(s) for s in space.spaces.values()] + return Box( + low=np.concatenate([s.low for s in space_list]), + high=np.concatenate([s.high for s in space_list]), + dtype=np.result_type(*[s.dtype for s in space_list]), + ) + return Dict( + spaces=OrderedDict( + (key, flatten_space(space)) for key, space in space.spaces.items() + ) + ) + + +@flatten_space.register(Graph) +def _flatten_space_graph(space: Graph) -> Graph: + return Graph( + node_space=flatten_space(space.node_space), + edge_space=flatten_space(space.edge_space) + if space.edge_space is not None + else None, + ) + + +@flatten_space.register(Text) +def _flatten_space_text(space: Text) -> Box: + return Box( + low=0, high=len(space.character_set), shape=(space.max_length,), dtype=np.int32 + ) + + +@flatten_space.register(Sequence) +def _flatten_space_sequence(space: Sequence) -> Sequence: + return Sequence(flatten_space(space.feature_space)) diff --git a/MLPY/Lib/site-packages/gym/utils/__init__.py b/MLPY/Lib/site-packages/gym/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6258a576a2b27481f2f27f444602ef568e220edf --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/__init__.py @@ -0,0 +1,10 @@ +"""A set of common utilities used within the environments. + +These are not intended as API functions, and will not remain stable over time. +""" + +# These submodules should not have any import-time dependencies. +# We want this since we use `utils` during our import-time sanity checks +# that verify that our dependencies are actually present. +from gym.utils.colorize import colorize +from gym.utils.ezpickle import EzPickle diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..771af80739762575a791d03761e68b4d01b236d3 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/colorize.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/colorize.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..050a6a5ae3923496fffaf4954d8afdc90b37fdbb Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/colorize.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/env_checker.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/env_checker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eb1230fe4faf30b43eb5b6990bc0a489a2d1396 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/env_checker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/ezpickle.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/ezpickle.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d260f4a55aec919139e906adbe92a69b9bb62e7 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/ezpickle.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/passive_env_checker.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/passive_env_checker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ba612aa78eff8a99fb3761d90a8ddd8087c9af3 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/passive_env_checker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/play.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/play.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bfa321552141f8ca21ca5a8fcf36dd37e137887 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/play.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/save_video.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/save_video.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2555a88ce9825fa1692d93018f64d22b43b7ca23 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/save_video.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/seeding.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/seeding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc5ed8771ef1f6c44a4f3f2572f54df23c7af638 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/seeding.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/__pycache__/step_api_compatibility.cpython-39.pyc b/MLPY/Lib/site-packages/gym/utils/__pycache__/step_api_compatibility.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f58422bf47d96951bbc4c713b0db04fbabea2b26 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/utils/__pycache__/step_api_compatibility.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/utils/colorize.py b/MLPY/Lib/site-packages/gym/utils/colorize.py new file mode 100644 index 0000000000000000000000000000000000000000..6674ded5d117456227618631200ea5456677ebf6 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/colorize.py @@ -0,0 +1,41 @@ +"""A set of common utilities used within the environments. + +These are not intended as API functions, and will not remain stable over time. +""" + +color2num = dict( + gray=30, + red=31, + green=32, + yellow=33, + blue=34, + magenta=35, + cyan=36, + white=37, + crimson=38, +) + + +def colorize( + string: str, color: str, bold: bool = False, highlight: bool = False +) -> str: + """Returns string surrounded by appropriate terminal colour codes to print colourised text. + + Args: + string: The message to colourise + color: Literal values are gray, red, green, yellow, blue, magenta, cyan, white, crimson + bold: If to bold the string + highlight: If to highlight the string + + Returns: + Colourised string + """ + attr = [] + num = color2num[color] + if highlight: + num += 10 + attr.append(str(num)) + if bold: + attr.append("1") + attrs = ";".join(attr) + return f"\x1b[{attrs}m{string}\x1b[0m" diff --git a/MLPY/Lib/site-packages/gym/utils/env_checker.py b/MLPY/Lib/site-packages/gym/utils/env_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..0e34ad5b9a17a5c1bb260a1e0968e0bb3c665938 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/env_checker.py @@ -0,0 +1,310 @@ +"""A set of functions for checking an environment details. + +This file is originally from the Stable Baselines3 repository hosted on GitHub +(https://github.com/DLR-RM/stable-baselines3/) +Original Author: Antonin Raffin + +It also uses some warnings/assertions from the PettingZoo repository hosted on GitHub +(https://github.com/PettingZoo-Team/PettingZoo) +Original Author: J K Terry + +This was rewritten and split into "env_checker.py" and "passive_env_checker.py" for invasive and passive environment checking +Original Author: Mark Towers + +These projects are covered by the MIT License. +""" + +import inspect +from copy import deepcopy + +import numpy as np + +import gym +from gym import logger, spaces +from gym.utils.passive_env_checker import ( + check_action_space, + check_observation_space, + env_render_passive_checker, + env_reset_passive_checker, + env_step_passive_checker, +) + + +def data_equivalence(data_1, data_2) -> bool: + """Assert equality between data 1 and 2, i.e observations, actions, info. + + Args: + data_1: data structure 1 + data_2: data structure 2 + + Returns: + If observation 1 and 2 are equivalent + """ + if type(data_1) == type(data_2): + if isinstance(data_1, dict): + return data_1.keys() == data_2.keys() and all( + data_equivalence(data_1[k], data_2[k]) for k in data_1.keys() + ) + elif isinstance(data_1, (tuple, list)): + return len(data_1) == len(data_2) and all( + data_equivalence(o_1, o_2) for o_1, o_2 in zip(data_1, data_2) + ) + elif isinstance(data_1, np.ndarray): + return data_1.shape == data_2.shape and np.allclose( + data_1, data_2, atol=0.00001 + ) + else: + return data_1 == data_2 + else: + return False + + +def check_reset_seed(env: gym.Env): + """Check that the environment can be reset with a seed. + + Args: + env: The environment to check + + Raises: + AssertionError: The environment cannot be reset with a random seed, + even though `seed` or `kwargs` appear in the signature. + """ + signature = inspect.signature(env.reset) + if "seed" in signature.parameters or ( + "kwargs" in signature.parameters + and signature.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + ): + try: + obs_1, info = env.reset(seed=123) + assert ( + obs_1 in env.observation_space + ), "The observation returned by `env.reset(seed=123)` is not within the observation space." + assert ( + env.unwrapped._np_random # pyright: ignore [reportPrivateUsage] + is not None + ), "Expects the random number generator to have been generated given a seed was passed to reset. Mostly likely the environment reset function does not call `super().reset(seed=seed)`." + seed_123_rng = deepcopy( + env.unwrapped._np_random # pyright: ignore [reportPrivateUsage] + ) + + obs_2, info = env.reset(seed=123) + assert ( + obs_2 in env.observation_space + ), "The observation returned by `env.reset(seed=123)` is not within the observation space." + if env.spec is not None and env.spec.nondeterministic is False: + assert data_equivalence( + obs_1, obs_2 + ), "Using `env.reset(seed=123)` is non-deterministic as the observations are not equivalent." + assert ( + env.unwrapped._np_random.bit_generator.state # pyright: ignore [reportPrivateUsage] + == seed_123_rng.bit_generator.state + ), "Mostly likely the environment reset function does not call `super().reset(seed=seed)` as the random generates are not same when the same seeds are passed to `env.reset`." + + obs_3, info = env.reset(seed=456) + assert ( + obs_3 in env.observation_space + ), "The observation returned by `env.reset(seed=456)` is not within the observation space." + assert ( + env.unwrapped._np_random.bit_generator.state # pyright: ignore [reportPrivateUsage] + != seed_123_rng.bit_generator.state + ), "Mostly likely the environment reset function does not call `super().reset(seed=seed)` as the random number generators are not different when different seeds are passed to `env.reset`." + + except TypeError as e: + raise AssertionError( + "The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. " + f"This should never happen, please report this issue. The error was: {e}" + ) + + seed_param = signature.parameters.get("seed") + # Check the default value is None + if seed_param is not None and seed_param.default is not None: + logger.warn( + "The default seed argument in reset should be `None`, otherwise the environment will by default always be deterministic. " + f"Actual default: {seed_param.default}" + ) + else: + raise gym.error.Error( + "The `reset` method does not provide a `seed` or `**kwargs` keyword argument." + ) + + +def check_reset_options(env: gym.Env): + """Check that the environment can be reset with options. + + Args: + env: The environment to check + + Raises: + AssertionError: The environment cannot be reset with options, + even though `options` or `kwargs` appear in the signature. + """ + signature = inspect.signature(env.reset) + if "options" in signature.parameters or ( + "kwargs" in signature.parameters + and signature.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + ): + try: + env.reset(options={}) + except TypeError as e: + raise AssertionError( + "The environment cannot be reset with options, even though `options` or `**kwargs` appear in the signature. " + f"This should never happen, please report this issue. The error was: {e}" + ) + else: + raise gym.error.Error( + "The `reset` method does not provide an `options` or `**kwargs` keyword argument." + ) + + +def check_reset_return_info_deprecation(env: gym.Env): + """Makes sure support for deprecated `return_info` argument is dropped. + + Args: + env: The environment to check + Raises: + UserWarning + """ + signature = inspect.signature(env.reset) + if "return_info" in signature.parameters: + logger.warn( + "`return_info` is deprecated as an optional argument to `reset`. `reset`" + "should now always return `obs, info` where `obs` is an observation, and `info` is a dictionary" + "containing additional information." + ) + + +def check_seed_deprecation(env: gym.Env): + """Makes sure support for deprecated function `seed` is dropped. + + Args: + env: The environment to check + Raises: + UserWarning + """ + seed_fn = getattr(env, "seed", None) + if callable(seed_fn): + logger.warn( + "Official support for the `seed` function is dropped. " + "Standard practice is to reset gym environments using `env.reset(seed=)`" + ) + + +def check_reset_return_type(env: gym.Env): + """Checks that :meth:`reset` correctly returns a tuple of the form `(obs , info)`. + + Args: + env: The environment to check + Raises: + AssertionError depending on spec violation + """ + result = env.reset() + assert isinstance( + result, tuple + ), f"The result returned by `env.reset()` was not a tuple of the form `(obs, info)`, where `obs` is a observation and `info` is a dictionary containing additional information. Actual type: `{type(result)}`" + assert ( + len(result) == 2 + ), f"Calling the reset method did not return a 2-tuple, actual length: {len(result)}" + + obs, info = result + assert ( + obs in env.observation_space + ), "The first element returned by `env.reset()` is not within the observation space." + assert isinstance( + info, dict + ), f"The second element returned by `env.reset()` was not a dictionary, actual type: {type(info)}" + + +def check_space_limit(space, space_type: str): + """Check the space limit for only the Box space as a test that only runs as part of `check_env`.""" + if isinstance(space, spaces.Box): + if np.any(np.equal(space.low, -np.inf)): + logger.warn( + f"A Box {space_type} space minimum value is -infinity. This is probably too low." + ) + if np.any(np.equal(space.high, np.inf)): + logger.warn( + f"A Box {space_type} space maximum value is -infinity. This is probably too high." + ) + + # Check that the Box space is normalized + if space_type == "action": + if len(space.shape) == 1: # for vector boxes + if ( + np.any( + np.logical_and( + space.low != np.zeros_like(space.low), + np.abs(space.low) != np.abs(space.high), + ) + ) + or np.any(space.low < -1) + or np.any(space.high > 1) + ): + # todo - Add to gymlibrary.ml? + logger.warn( + "For Box action spaces, we recommend using a symmetric and normalized space (range=[-1, 1] or [0, 1]). " + "See https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html for more information." + ) + elif isinstance(space, spaces.Tuple): + for subspace in space.spaces: + check_space_limit(subspace, space_type) + elif isinstance(space, spaces.Dict): + for subspace in space.values(): + check_space_limit(subspace, space_type) + + +def check_env(env: gym.Env, warn: bool = None, skip_render_check: bool = False): + """Check that an environment follows Gym API. + + This is an invasive function that calls the environment's reset and step. + + This is particularly useful when using a custom environment. + Please take a look at https://www.gymlibrary.dev/content/environment_creation/ + for more information about the API. + + Args: + env: The Gym environment that will be checked + warn: Ignored + skip_render_check: Whether to skip the checks for the render method. True by default (useful for the CI) + """ + if warn is not None: + logger.warn("`check_env(warn=...)` parameter is now ignored.") + + assert isinstance( + env, gym.Env + ), "The environment must inherit from the gym.Env class. See https://www.gymlibrary.dev/content/environment_creation/ for more info." + + if env.unwrapped is not env: + logger.warn( + f"The environment ({env}) is different from the unwrapped version ({env.unwrapped}). This could effect the environment checker as the environment most likely has a wrapper applied to it. We recommend using the raw environment for `check_env` using `env.unwrapped`." + ) + + # ============= Check the spaces (observation and action) ================ + assert hasattr( + env, "action_space" + ), "The environment must specify an action space. See https://www.gymlibrary.dev/content/environment_creation/ for more info." + check_action_space(env.action_space) + check_space_limit(env.action_space, "action") + + assert hasattr( + env, "observation_space" + ), "The environment must specify an observation space. See https://www.gymlibrary.dev/content/environment_creation/ for more info." + check_observation_space(env.observation_space) + check_space_limit(env.observation_space, "observation") + + # ==== Check the reset method ==== + check_seed_deprecation(env) + check_reset_return_info_deprecation(env) + check_reset_return_type(env) + check_reset_seed(env) + check_reset_options(env) + + # ============ Check the returned values =============== + env_reset_passive_checker(env) + env_step_passive_checker(env, env.action_space.sample()) + + # ==== Check the render method and the declared render modes ==== + if not skip_render_check: + if env.render_mode is not None: + env_render_passive_checker(env) + + # todo: recreate the environment with a different render_mode for check that each work diff --git a/MLPY/Lib/site-packages/gym/utils/ezpickle.py b/MLPY/Lib/site-packages/gym/utils/ezpickle.py new file mode 100644 index 0000000000000000000000000000000000000000..9a601dba6da244b2e6f81d473f4d615c5e2eb685 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/ezpickle.py @@ -0,0 +1,35 @@ +"""Class for pickling and unpickling objects via their constructor arguments.""" + + +class EzPickle: + """Objects that are pickled and unpickled via their constructor arguments. + + Example:: + + >>> class Dog(Animal, EzPickle): + ... def __init__(self, furcolor, tailkind="bushy"): + ... Animal.__init__() + ... EzPickle.__init__(furcolor, tailkind) + + When this object is unpickled, a new ``Dog`` will be constructed by passing the provided furcolor and tailkind into the constructor. + However, philosophers are still not sure whether it is still the same dog. + + This is generally needed only for environments which wrap C/C++ code, such as MuJoCo and Atari. + """ + + def __init__(self, *args, **kwargs): + """Uses the ``args`` and ``kwargs`` from the object's constructor for pickling.""" + self._ezpickle_args = args + self._ezpickle_kwargs = kwargs + + def __getstate__(self): + """Returns the object pickle state with args and kwargs.""" + return { + "_ezpickle_args": self._ezpickle_args, + "_ezpickle_kwargs": self._ezpickle_kwargs, + } + + def __setstate__(self, d): + """Sets the object pickle state using d.""" + out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"]) + self.__dict__.update(out.__dict__) diff --git a/MLPY/Lib/site-packages/gym/utils/passive_env_checker.py b/MLPY/Lib/site-packages/gym/utils/passive_env_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..bd826510f48ede980ed50f9ce79e5e64682a8325 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/passive_env_checker.py @@ -0,0 +1,320 @@ +"""A set of functions for passively checking environment implementations.""" +import inspect +from functools import partial +from typing import Callable + +import numpy as np + +from gym import Space, error, logger, spaces + + +def _check_box_observation_space(observation_space: spaces.Box): + """Checks that a :class:`Box` observation space is defined in a sensible way. + + Args: + observation_space: A box observation space + """ + # Check if the box is an image + if len(observation_space.shape) == 3: + if observation_space.dtype != np.uint8: + logger.warn( + f"It seems a Box observation space is an image but the `dtype` is not `np.uint8`, actual type: {observation_space.dtype}. " + "If the Box observation space is not an image, we recommend flattening the observation to have only a 1D vector." + ) + if np.any(observation_space.low != 0) or np.any(observation_space.high != 255): + logger.warn( + "It seems a Box observation space is an image but the upper and lower bounds are not in [0, 255]. " + "Generally, CNN policies assume observations are within that range, so you may encounter an issue if the observation values are not." + ) + + if len(observation_space.shape) not in [1, 3]: + logger.warn( + "A Box observation space has an unconventional shape (neither an image, nor a 1D vector). " + "We recommend flattening the observation to have only a 1D vector or use a custom policy to properly process the data. " + f"Actual observation shape: {observation_space.shape}" + ) + + assert ( + observation_space.low.shape == observation_space.shape + ), f"The Box observation space shape and low shape have different shapes, low shape: {observation_space.low.shape}, box shape: {observation_space.shape}" + assert ( + observation_space.high.shape == observation_space.shape + ), f"The Box observation space shape and high shape have have different shapes, high shape: {observation_space.high.shape}, box shape: {observation_space.shape}" + + if np.any(observation_space.low == observation_space.high): + logger.warn("A Box observation space maximum and minimum values are equal.") + elif np.any(observation_space.high < observation_space.low): + logger.warn("A Box observation space low value is greater than a high value.") + + +def _check_box_action_space(action_space: spaces.Box): + """Checks that a :class:`Box` action space is defined in a sensible way. + + Args: + action_space: A box action space + """ + assert ( + action_space.low.shape == action_space.shape + ), f"The Box action space shape and low shape have have different shapes, low shape: {action_space.low.shape}, box shape: {action_space.shape}" + assert ( + action_space.high.shape == action_space.shape + ), f"The Box action space shape and high shape have different shapes, high shape: {action_space.high.shape}, box shape: {action_space.shape}" + + if np.any(action_space.low == action_space.high): + logger.warn("A Box action space maximum and minimum values are equal.") + elif np.any(action_space.high < action_space.low): + logger.warn("A Box action space low value is greater than a high value.") + + +def check_space( + space: Space, space_type: str, check_box_space_fn: Callable[[spaces.Box], None] +): + """A passive check of the environment action space that should not affect the environment.""" + if not isinstance(space, spaces.Space): + raise AssertionError( + f"{space_type} space does not inherit from `gym.spaces.Space`, actual type: {type(space)}" + ) + + elif isinstance(space, spaces.Box): + check_box_space_fn(space) + elif isinstance(space, spaces.Discrete): + assert ( + 0 < space.n + ), f"Discrete {space_type} space's number of elements must be positive, actual number of elements: {space.n}" + assert ( + space.shape == () + ), f"Discrete {space_type} space's shape should be empty, actual shape: {space.shape}" + elif isinstance(space, spaces.MultiDiscrete): + assert ( + space.shape == space.nvec.shape + ), f"Multi-discrete {space_type} space's shape must be equal to the nvec shape, space shape: {space.shape}, nvec shape: {space.nvec.shape}" + assert np.all( + 0 < space.nvec + ), f"Multi-discrete {space_type} space's all nvec elements must be greater than 0, actual nvec: {space.nvec}" + elif isinstance(space, spaces.MultiBinary): + assert np.all( + 0 < np.asarray(space.shape) + ), f"Multi-binary {space_type} space's all shape elements must be greater than 0, actual shape: {space.shape}" + elif isinstance(space, spaces.Tuple): + assert 0 < len( + space.spaces + ), f"An empty Tuple {space_type} space is not allowed." + for subspace in space.spaces: + check_space(subspace, space_type, check_box_space_fn) + elif isinstance(space, spaces.Dict): + assert 0 < len( + space.spaces.keys() + ), f"An empty Dict {space_type} space is not allowed." + for subspace in space.values(): + check_space(subspace, space_type, check_box_space_fn) + + +check_observation_space = partial( + check_space, + space_type="observation", + check_box_space_fn=_check_box_observation_space, +) +check_action_space = partial( + check_space, space_type="action", check_box_space_fn=_check_box_action_space +) + + +def check_obs(obs, observation_space: spaces.Space, method_name: str): + """Check that the observation returned by the environment correspond to the declared one. + + Args: + obs: The observation to check + observation_space: The observation space of the observation + method_name: The method name that generated the observation + """ + pre = f"The obs returned by the `{method_name}()` method" + if isinstance(observation_space, spaces.Discrete): + if not isinstance(obs, (np.int64, int)): + logger.warn(f"{pre} should be an int or np.int64, actual type: {type(obs)}") + elif isinstance(observation_space, spaces.Box): + if observation_space.shape != (): + if not isinstance(obs, np.ndarray): + logger.warn( + f"{pre} was expecting a numpy array, actual type: {type(obs)}" + ) + elif obs.dtype != observation_space.dtype: + logger.warn( + f"{pre} was expecting numpy array dtype to be {observation_space.dtype}, actual type: {obs.dtype}" + ) + elif isinstance(observation_space, (spaces.MultiBinary, spaces.MultiDiscrete)): + if not isinstance(obs, np.ndarray): + logger.warn(f"{pre} was expecting a numpy array, actual type: {type(obs)}") + elif isinstance(observation_space, spaces.Tuple): + if not isinstance(obs, tuple): + logger.warn(f"{pre} was expecting a tuple, actual type: {type(obs)}") + assert len(obs) == len( + observation_space.spaces + ), f"{pre} length is not same as the observation space length, obs length: {len(obs)}, space length: {len(observation_space.spaces)}" + for sub_obs, sub_space in zip(obs, observation_space.spaces): + check_obs(sub_obs, sub_space, method_name) + elif isinstance(observation_space, spaces.Dict): + assert isinstance(obs, dict), f"{pre} must be a dict, actual type: {type(obs)}" + assert ( + obs.keys() == observation_space.spaces.keys() + ), f"{pre} observation keys is not same as the observation space keys, obs keys: {list(obs.keys())}, space keys: {list(observation_space.spaces.keys())}" + for space_key in observation_space.spaces.keys(): + check_obs(obs[space_key], observation_space[space_key], method_name) + + try: + if obs not in observation_space: + logger.warn(f"{pre} is not within the observation space.") + except Exception as e: + logger.warn(f"{pre} is not within the observation space with exception: {e}") + + +def env_reset_passive_checker(env, **kwargs): + """A passive check of the `Env.reset` function investigating the returning reset information and returning the data unchanged.""" + signature = inspect.signature(env.reset) + if "seed" not in signature.parameters and "kwargs" not in signature.parameters: + logger.warn( + "Future gym versions will require that `Env.reset` can be passed a `seed` instead of using `Env.seed` for resetting the environment random number generator." + ) + else: + seed_param = signature.parameters.get("seed") + # Check the default value is None + if seed_param is not None and seed_param.default is not None: + logger.warn( + "The default seed argument in `Env.reset` should be `None`, otherwise the environment will by default always be deterministic. " + f"Actual default: {seed_param}" + ) + + if "options" not in signature.parameters and "kwargs" not in signature.parameters: + logger.warn( + "Future gym versions will require that `Env.reset` can be passed `options` to allow the environment initialisation to be passed additional information." + ) + + # Checks the result of env.reset with kwargs + result = env.reset(**kwargs) + + if not isinstance(result, tuple): + logger.warn( + f"The result returned by `env.reset()` was not a tuple of the form `(obs, info)`, where `obs` is a observation and `info` is a dictionary containing additional information. Actual type: `{type(result)}`" + ) + elif len(result) != 2: + logger.warn( + "The result returned by `env.reset()` should be `(obs, info)` by default, , where `obs` is a observation and `info` is a dictionary containing additional information." + ) + else: + obs, info = result + check_obs(obs, env.observation_space, "reset") + assert isinstance( + info, dict + ), f"The second element returned by `env.reset()` was not a dictionary, actual type: {type(info)}" + return result + + +def env_step_passive_checker(env, action): + """A passive check for the environment step, investigating the returning data then returning the data unchanged.""" + # We don't check the action as for some environments then out-of-bounds values can be given + result = env.step(action) + assert isinstance( + result, tuple + ), f"Expects step result to be a tuple, actual type: {type(result)}" + if len(result) == 4: + logger.deprecation( + "Core environment is written in old step API which returns one bool instead of two. " + "It is recommended to rewrite the environment with new step API. " + ) + obs, reward, done, info = result + + if not isinstance(done, (bool, np.bool8)): + logger.warn( + f"Expects `done` signal to be a boolean, actual type: {type(done)}" + ) + elif len(result) == 5: + obs, reward, terminated, truncated, info = result + + # np.bool is actual python bool not np boolean type, therefore bool_ or bool8 + if not isinstance(terminated, (bool, np.bool8)): + logger.warn( + f"Expects `terminated` signal to be a boolean, actual type: {type(terminated)}" + ) + if not isinstance(truncated, (bool, np.bool8)): + logger.warn( + f"Expects `truncated` signal to be a boolean, actual type: {type(truncated)}" + ) + else: + raise error.Error( + f"Expected `Env.step` to return a four or five element tuple, actual number of elements returned: {len(result)}." + ) + + check_obs(obs, env.observation_space, "step") + + if not ( + np.issubdtype(type(reward), np.integer) + or np.issubdtype(type(reward), np.floating) + ): + logger.warn( + f"The reward returned by `step()` must be a float, int, np.integer or np.floating, actual type: {type(reward)}" + ) + else: + if np.isnan(reward): + logger.warn("The reward is a NaN value.") + if np.isinf(reward): + logger.warn("The reward is an inf value.") + + assert isinstance( + info, dict + ), f"The `info` returned by `step()` must be a python dictionary, actual type: {type(info)}" + + return result + + +def env_render_passive_checker(env, *args, **kwargs): + """A passive check of the `Env.render` that the declared render modes/fps in the metadata of the environment is declared.""" + render_modes = env.metadata.get("render_modes") + if render_modes is None: + logger.warn( + "No render modes was declared in the environment (env.metadata['render_modes'] is None or not defined), you may have trouble when calling `.render()`." + ) + else: + if not isinstance(render_modes, (list, tuple)): + logger.warn( + f"Expects the render_modes to be a sequence (i.e. list, tuple), actual type: {type(render_modes)}" + ) + elif not all(isinstance(mode, str) for mode in render_modes): + logger.warn( + f"Expects all render modes to be strings, actual types: {[type(mode) for mode in render_modes]}" + ) + + render_fps = env.metadata.get("render_fps") + # We only require `render_fps` if rendering is actually implemented + if len(render_modes) > 0: + if render_fps is None: + logger.warn( + "No render fps was declared in the environment (env.metadata['render_fps'] is None or not defined), rendering may occur at inconsistent fps." + ) + else: + if not ( + np.issubdtype(type(render_fps), np.integer) + or np.issubdtype(type(render_fps), np.floating) + ): + logger.warn( + f"Expects the `env.metadata['render_fps']` to be an integer or a float, actual type: {type(render_fps)}" + ) + else: + assert ( + render_fps > 0 + ), f"Expects the `env.metadata['render_fps']` to be greater than zero, actual value: {render_fps}" + + # env.render is now an attribute with default None + if len(render_modes) == 0: + assert ( + env.render_mode is None + ), f"With no render_modes, expects the Env.render_mode to be None, actual value: {env.render_mode}" + else: + assert env.render_mode is None or env.render_mode in render_modes, ( + "The environment was initialized successfully however with an unsupported render mode. " + f"Render mode: {env.render_mode}, modes: {render_modes}" + ) + + result = env.render(*args, **kwargs) + + # TODO: Check that the result is correct + + return result diff --git a/MLPY/Lib/site-packages/gym/utils/play.py b/MLPY/Lib/site-packages/gym/utils/play.py new file mode 100644 index 0000000000000000000000000000000000000000..796c2e4085a381c1eb60602d08c8be70a642128e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/play.py @@ -0,0 +1,386 @@ +"""Utilities of visualising an environment.""" +from collections import deque +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np + +import gym.error +from gym import Env, logger +from gym.core import ActType, ObsType +from gym.error import DependencyNotInstalled +from gym.logger import deprecation + +try: + import pygame + from pygame import Surface + from pygame.event import Event + from pygame.locals import VIDEORESIZE +except ImportError: + raise gym.error.DependencyNotInstalled( + "Pygame is not installed, run `pip install gym[classic_control]`" + ) + +try: + import matplotlib + + matplotlib.use("TkAgg") + import matplotlib.pyplot as plt +except ImportError: + logger.warn("Matplotlib is not installed, run `pip install gym[other]`") + matplotlib, plt = None, None + + +class MissingKeysToAction(Exception): + """Raised when the environment does not have a default ``keys_to_action`` mapping.""" + + +class PlayableGame: + """Wraps an environment allowing keyboard inputs to interact with the environment.""" + + def __init__( + self, + env: Env, + keys_to_action: Optional[Dict[Tuple[int, ...], int]] = None, + zoom: Optional[float] = None, + ): + """Wraps an environment with a dictionary of keyboard buttons to action and if to zoom in on the environment. + + Args: + env: The environment to play + keys_to_action: The dictionary of keyboard tuples and action value + zoom: If to zoom in on the environment render + """ + if env.render_mode not in {"rgb_array", "rgb_array_list"}: + logger.error( + "PlayableGame wrapper works only with rgb_array and rgb_array_list render modes, " + f"but your environment render_mode = {env.render_mode}." + ) + + self.env = env + self.relevant_keys = self._get_relevant_keys(keys_to_action) + self.video_size = self._get_video_size(zoom) + self.screen = pygame.display.set_mode(self.video_size) + self.pressed_keys = [] + self.running = True + + def _get_relevant_keys( + self, keys_to_action: Optional[Dict[Tuple[int], int]] = None + ) -> set: + if keys_to_action is None: + if hasattr(self.env, "get_keys_to_action"): + keys_to_action = self.env.get_keys_to_action() + elif hasattr(self.env.unwrapped, "get_keys_to_action"): + keys_to_action = self.env.unwrapped.get_keys_to_action() + else: + raise MissingKeysToAction( + f"{self.env.spec.id} does not have explicit key to action mapping, " + "please specify one manually" + ) + assert isinstance(keys_to_action, dict) + relevant_keys = set(sum((list(k) for k in keys_to_action.keys()), [])) + return relevant_keys + + def _get_video_size(self, zoom: Optional[float] = None) -> Tuple[int, int]: + rendered = self.env.render() + if isinstance(rendered, List): + rendered = rendered[-1] + assert rendered is not None and isinstance(rendered, np.ndarray) + video_size = (rendered.shape[1], rendered.shape[0]) + + if zoom is not None: + video_size = (int(video_size[0] * zoom), int(video_size[1] * zoom)) + + return video_size + + def process_event(self, event: Event): + """Processes a PyGame event. + + In particular, this function is used to keep track of which buttons are currently pressed + and to exit the :func:`play` function when the PyGame window is closed. + + Args: + event: The event to process + """ + if event.type == pygame.KEYDOWN: + if event.key in self.relevant_keys: + self.pressed_keys.append(event.key) + elif event.key == pygame.K_ESCAPE: + self.running = False + elif event.type == pygame.KEYUP: + if event.key in self.relevant_keys: + self.pressed_keys.remove(event.key) + elif event.type == pygame.QUIT: + self.running = False + elif event.type == VIDEORESIZE: + self.video_size = event.size + self.screen = pygame.display.set_mode(self.video_size) + + +def display_arr( + screen: Surface, arr: np.ndarray, video_size: Tuple[int, int], transpose: bool +): + """Displays a numpy array on screen. + + Args: + screen: The screen to show the array on + arr: The array to show + video_size: The video size of the screen + transpose: If to transpose the array on the screen + """ + arr_min, arr_max = np.min(arr), np.max(arr) + arr = 255.0 * (arr - arr_min) / (arr_max - arr_min) + pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr) + pyg_img = pygame.transform.scale(pyg_img, video_size) + screen.blit(pyg_img, (0, 0)) + + +def play( + env: Env, + transpose: Optional[bool] = True, + fps: Optional[int] = None, + zoom: Optional[float] = None, + callback: Optional[Callable] = None, + keys_to_action: Optional[Dict[Union[Tuple[Union[str, int]], str], ActType]] = None, + seed: Optional[int] = None, + noop: ActType = 0, +): + """Allows one to play the game using keyboard. + + Example:: + + >>> import gym + >>> from gym.utils.play import play + >>> play(gym.make("CarRacing-v1", render_mode="rgb_array"), keys_to_action={ + ... "w": np.array([0, 0.7, 0]), + ... "a": np.array([-1, 0, 0]), + ... "s": np.array([0, 0, 1]), + ... "d": np.array([1, 0, 0]), + ... "wa": np.array([-1, 0.7, 0]), + ... "dw": np.array([1, 0.7, 0]), + ... "ds": np.array([1, 0, 1]), + ... "as": np.array([-1, 0, 1]), + ... }, noop=np.array([0,0,0])) + + + Above code works also if the environment is wrapped, so it's particularly useful in + verifying that the frame-level preprocessing does not render the game + unplayable. + + If you wish to plot real time statistics as you play, you can use + :class:`gym.utils.play.PlayPlot`. Here's a sample code for plotting the reward + for last 150 steps. + + >>> def callback(obs_t, obs_tp1, action, rew, terminated, truncated, info): + ... return [rew,] + >>> plotter = PlayPlot(callback, 150, ["reward"]) + >>> play(gym.make("ALE/AirRaid-v5"), callback=plotter.callback) + + + Args: + env: Environment to use for playing. + transpose: If this is ``True``, the output of observation is transposed. Defaults to ``True``. + fps: Maximum number of steps of the environment executed every second. If ``None`` (the default), + ``env.metadata["render_fps""]`` (or 30, if the environment does not specify "render_fps") is used. + zoom: Zoom the observation in, ``zoom`` amount, should be positive float + callback: If a callback is provided, it will be executed after every step. It takes the following input: + obs_t: observation before performing action + obs_tp1: observation after performing action + action: action that was executed + rew: reward that was received + terminated: whether the environment is terminated or not + truncated: whether the environment is truncated or not + info: debug info + keys_to_action: Mapping from keys pressed to action performed. + Different formats are supported: Key combinations can either be expressed as a tuple of unicode code + points of the keys, as a tuple of characters, or as a string where each character of the string represents + one key. + For example if pressing 'w' and space at the same time is supposed + to trigger action number 2 then ``key_to_action`` dict could look like this: + >>> { + ... # ... + ... (ord('w'), ord(' ')): 2 + ... # ... + ... } + or like this: + >>> { + ... # ... + ... ("w", " "): 2 + ... # ... + ... } + or like this: + >>> { + ... # ... + ... "w ": 2 + ... # ... + ... } + If ``None``, default ``key_to_action`` mapping for that environment is used, if provided. + seed: Random seed used when resetting the environment. If None, no seed is used. + noop: The action used when no key input has been entered, or the entered key combination is unknown. + """ + env.reset(seed=seed) + + if keys_to_action is None: + if hasattr(env, "get_keys_to_action"): + keys_to_action = env.get_keys_to_action() + elif hasattr(env.unwrapped, "get_keys_to_action"): + keys_to_action = env.unwrapped.get_keys_to_action() + else: + raise MissingKeysToAction( + f"{env.spec.id} does not have explicit key to action mapping, " + "please specify one manually" + ) + assert keys_to_action is not None + + key_code_to_action = {} + for key_combination, action in keys_to_action.items(): + key_code = tuple( + sorted(ord(key) if isinstance(key, str) else key for key in key_combination) + ) + key_code_to_action[key_code] = action + + game = PlayableGame(env, key_code_to_action, zoom) + + if fps is None: + fps = env.metadata.get("render_fps", 30) + + done, obs = True, None + clock = pygame.time.Clock() + + while game.running: + if done: + done = False + obs = env.reset(seed=seed) + else: + action = key_code_to_action.get(tuple(sorted(game.pressed_keys)), noop) + prev_obs = obs + obs, rew, terminated, truncated, info = env.step(action) + done = terminated or truncated + if callback is not None: + callback(prev_obs, obs, action, rew, terminated, truncated, info) + if obs is not None: + rendered = env.render() + if isinstance(rendered, List): + rendered = rendered[-1] + assert rendered is not None and isinstance(rendered, np.ndarray) + display_arr( + game.screen, rendered, transpose=transpose, video_size=game.video_size + ) + + # process pygame events + for event in pygame.event.get(): + game.process_event(event) + + pygame.display.flip() + clock.tick(fps) + pygame.quit() + + +class PlayPlot: + """Provides a callback to create live plots of arbitrary metrics when using :func:`play`. + + This class is instantiated with a function that accepts information about a single environment transition: + - obs_t: observation before performing action + - obs_tp1: observation after performing action + - action: action that was executed + - rew: reward that was received + - terminated: whether the environment is terminated or not + - truncated: whether the environment is truncated or not + - info: debug info + + It should return a list of metrics that are computed from this data. + For instance, the function may look like this:: + + >>> def compute_metrics(obs_t, obs_tp, action, reward, terminated, truncated, info): + ... return [reward, info["cumulative_reward"], np.linalg.norm(action)] + + :class:`PlayPlot` provides the method :meth:`callback` which will pass its arguments along to that function + and uses the returned values to update live plots of the metrics. + + Typically, this :meth:`callback` will be used in conjunction with :func:`play` to see how the metrics evolve as you play:: + + >>> plotter = PlayPlot(compute_metrics, horizon_timesteps=200, + ... plot_names=["Immediate Rew.", "Cumulative Rew.", "Action Magnitude"]) + >>> play(your_env, callback=plotter.callback) + """ + + def __init__( + self, callback: callable, horizon_timesteps: int, plot_names: List[str] + ): + """Constructor of :class:`PlayPlot`. + + The function ``callback`` that is passed to this constructor should return + a list of metrics that is of length ``len(plot_names)``. + + Args: + callback: Function that computes metrics from environment transitions + horizon_timesteps: The time horizon used for the live plots + plot_names: List of plot titles + + Raises: + DependencyNotInstalled: If matplotlib is not installed + """ + deprecation( + "`PlayPlot` is marked as deprecated and will be removed in the near future." + ) + self.data_callback = callback + self.horizon_timesteps = horizon_timesteps + self.plot_names = plot_names + + if plt is None: + raise DependencyNotInstalled( + "matplotlib is not installed, run `pip install gym[other]`" + ) + + num_plots = len(self.plot_names) + self.fig, self.ax = plt.subplots(num_plots) + if num_plots == 1: + self.ax = [self.ax] + for axis, name in zip(self.ax, plot_names): + axis.set_title(name) + self.t = 0 + self.cur_plot: List[Optional[plt.Axes]] = [None for _ in range(num_plots)] + self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)] + + def callback( + self, + obs_t: ObsType, + obs_tp1: ObsType, + action: ActType, + rew: float, + terminated: bool, + truncated: bool, + info: dict, + ): + """The callback that calls the provided data callback and adds the data to the plots. + + Args: + obs_t: The observation at time step t + obs_tp1: The observation at time step t+1 + action: The action + rew: The reward + terminated: If the environment is terminated + truncated: If the environment is truncated + info: The information from the environment + """ + points = self.data_callback( + obs_t, obs_tp1, action, rew, terminated, truncated, info + ) + for point, data_series in zip(points, self.data): + data_series.append(point) + self.t += 1 + + xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t + + for i, plot in enumerate(self.cur_plot): + if plot is not None: + plot.remove() + self.cur_plot[i] = self.ax[i].scatter( + range(xmin, xmax), list(self.data[i]), c="blue" + ) + self.ax[i].set_xlim(xmin, xmax) + + if plt is None: + raise DependencyNotInstalled( + "matplotlib is not installed, run `pip install gym[other]`" + ) + plt.pause(0.000001) diff --git a/MLPY/Lib/site-packages/gym/utils/save_video.py b/MLPY/Lib/site-packages/gym/utils/save_video.py new file mode 100644 index 0000000000000000000000000000000000000000..f134e6e178aa30a0bb13b4e85e81a27a4e86065b --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/save_video.py @@ -0,0 +1,107 @@ +"""Utility functions to save rendering videos.""" +import os +from typing import Callable, Optional + +import gym +from gym import logger + +try: + from moviepy.video.io.ImageSequenceClip import ImageSequenceClip +except ImportError: + raise gym.error.DependencyNotInstalled( + "MoviePy is not installed, run `pip install moviepy`" + ) + + +def capped_cubic_video_schedule(episode_id: int) -> bool: + """The default episode trigger. + + This function will trigger recordings at the episode indices 0, 1, 4, 8, 27, ..., :math:`k^3`, ..., 729, 1000, 2000, 3000, ... + + Args: + episode_id: The episode number + + Returns: + If to apply a video schedule number + """ + if episode_id < 1000: + return int(round(episode_id ** (1.0 / 3))) ** 3 == episode_id + else: + return episode_id % 1000 == 0 + + +def save_video( + frames: list, + video_folder: str, + episode_trigger: Callable[[int], bool] = None, + step_trigger: Callable[[int], bool] = None, + video_length: Optional[int] = None, + name_prefix: str = "rl-video", + episode_index: int = 0, + step_starting_index: int = 0, + **kwargs, +): + """Save videos from rendering frames. + + This function extract video from a list of render frame episodes. + + Args: + frames (List[RenderFrame]): A list of frames to compose the video. + video_folder (str): The folder where the recordings will be stored + episode_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this episode + step_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this step + video_length (int): The length of recorded episodes. If it isn't specified, the entire episode is recorded. + Otherwise, snippets of the specified length are captured. + name_prefix (str): Will be prepended to the filename of the recordings. + episode_index (int): The index of the current episode. + step_starting_index (int): The step index of the first frame. + **kwargs: The kwargs that will be passed to moviepy's ImageSequenceClip. + You need to specify either fps or duration. + + Example: + >>> import gym + >>> from gym.utils.save_video import save_video + >>> env = gym.make("FrozenLake-v1", render_mode="rgb_array_list") + >>> env.reset() + >>> step_starting_index = 0 + >>> episode_index = 0 + >>> for step_index in range(199): + ... action = env.action_space.sample() + ... _, _, done, _ = env.step(action) + ... if done: + ... save_video( + ... env.render(), + ... "videos", + ... fps=env.metadata["render_fps"], + ... step_starting_index=step_starting_index, + ... episode_index=episode_index + ... ) + ... step_starting_index = step_index + 1 + ... episode_index += 1 + ... env.reset() + >>> env.close() + """ + if not isinstance(frames, list): + logger.error(f"Expected a list of frames, got a {type(frames)} instead.") + if episode_trigger is None and step_trigger is None: + episode_trigger = capped_cubic_video_schedule + + video_folder = os.path.abspath(video_folder) + os.makedirs(video_folder, exist_ok=True) + path_prefix = f"{video_folder}/{name_prefix}" + + if episode_trigger is not None and episode_trigger(episode_index): + clip = ImageSequenceClip(frames[:video_length], **kwargs) + clip.write_videofile(f"{path_prefix}-episode-{episode_index}.mp4") + + if step_trigger is not None: + # skip the first frame since it comes from reset + for step_index, frame_index in enumerate( + range(1, len(frames)), start=step_starting_index + ): + if step_trigger(step_index): + end_index = ( + frame_index + video_length if video_length is not None else None + ) + clip = ImageSequenceClip(frames[frame_index:end_index], **kwargs) + clip.write_videofile(f"{path_prefix}-step-{step_index}.mp4") diff --git a/MLPY/Lib/site-packages/gym/utils/seeding.py b/MLPY/Lib/site-packages/gym/utils/seeding.py new file mode 100644 index 0000000000000000000000000000000000000000..a5ecafbe99e186001ff7fb2db00689f71d28585a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/seeding.py @@ -0,0 +1,30 @@ +"""Set of random number generator functions: seeding, generator, hashing seeds.""" +from typing import Any, Optional, Tuple + +import numpy as np + +from gym import error + + +def np_random(seed: Optional[int] = None) -> Tuple[np.random.Generator, Any]: + """Generates a random number generator from the seed and returns the Generator and seed. + + Args: + seed: The seed used to create the generator + + Returns: + The generator and resulting seed + + Raises: + Error: Seed must be a non-negative integer or omitted + """ + if seed is not None and not (isinstance(seed, int) and 0 <= seed): + raise error.Error(f"Seed must be a non-negative integer or omitted, not {seed}") + + seed_seq = np.random.SeedSequence(seed) + np_seed = seed_seq.entropy + rng = RandomNumberGenerator(np.random.PCG64(seed_seq)) + return rng, np_seed + + +RNG = RandomNumberGenerator = np.random.Generator diff --git a/MLPY/Lib/site-packages/gym/utils/step_api_compatibility.py b/MLPY/Lib/site-packages/gym/utils/step_api_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..2ac2f9b23849d9bf6ee454ec8731d98e81274fd0 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/utils/step_api_compatibility.py @@ -0,0 +1,161 @@ +"""Contains methods for step compatibility, from old-to-new and new-to-old API.""" +from typing import Tuple, Union + +import numpy as np + +from gym.core import ObsType + +DoneStepType = Tuple[ + Union[ObsType, np.ndarray], + Union[float, np.ndarray], + Union[bool, np.ndarray], + Union[dict, list], +] + +TerminatedTruncatedStepType = Tuple[ + Union[ObsType, np.ndarray], + Union[float, np.ndarray], + Union[bool, np.ndarray], + Union[bool, np.ndarray], + Union[dict, list], +] + + +def convert_to_terminated_truncated_step_api( + step_returns: Union[DoneStepType, TerminatedTruncatedStepType], is_vector_env=False +) -> TerminatedTruncatedStepType: + """Function to transform step returns to new step API irrespective of input API. + + Args: + step_returns (tuple): Items returned by step(). Can be (obs, rew, done, info) or (obs, rew, terminated, truncated, info) + is_vector_env (bool): Whether the step_returns are from a vector environment + """ + if len(step_returns) == 5: + return step_returns + else: + assert len(step_returns) == 4 + observations, rewards, dones, infos = step_returns + + # Cases to handle - info single env / info vector env (list) / info vector env (dict) + if is_vector_env is False: + truncated = infos.pop("TimeLimit.truncated", False) + return ( + observations, + rewards, + dones and not truncated, + dones and truncated, + infos, + ) + elif isinstance(infos, list): + truncated = np.array( + [info.pop("TimeLimit.truncated", False) for info in infos] + ) + return ( + observations, + rewards, + np.logical_and(dones, np.logical_not(truncated)), + np.logical_and(dones, truncated), + infos, + ) + elif isinstance(infos, dict): + num_envs = len(dones) + truncated = infos.pop("TimeLimit.truncated", np.zeros(num_envs, dtype=bool)) + return ( + observations, + rewards, + np.logical_and(dones, np.logical_not(truncated)), + np.logical_and(dones, truncated), + infos, + ) + else: + raise TypeError( + f"Unexpected value of infos, as is_vector_envs=False, expects `info` to be a list or dict, actual type: {type(infos)}" + ) + + +def convert_to_done_step_api( + step_returns: Union[TerminatedTruncatedStepType, DoneStepType], + is_vector_env: bool = False, +) -> DoneStepType: + """Function to transform step returns to old step API irrespective of input API. + + Args: + step_returns (tuple): Items returned by step(). Can be (obs, rew, done, info) or (obs, rew, terminated, truncated, info) + is_vector_env (bool): Whether the step_returns are from a vector environment + """ + if len(step_returns) == 4: + return step_returns + else: + assert len(step_returns) == 5 + observations, rewards, terminated, truncated, infos = step_returns + + # Cases to handle - info single env / info vector env (list) / info vector env (dict) + if is_vector_env is False: + if truncated or terminated: + infos["TimeLimit.truncated"] = truncated and not terminated + return ( + observations, + rewards, + terminated or truncated, + infos, + ) + elif isinstance(infos, list): + for info, env_truncated, env_terminated in zip( + infos, truncated, terminated + ): + if env_truncated or env_terminated: + info["TimeLimit.truncated"] = env_truncated and not env_terminated + return ( + observations, + rewards, + np.logical_or(terminated, truncated), + infos, + ) + elif isinstance(infos, dict): + if np.logical_or(np.any(truncated), np.any(terminated)): + infos["TimeLimit.truncated"] = np.logical_and( + truncated, np.logical_not(terminated) + ) + return ( + observations, + rewards, + np.logical_or(terminated, truncated), + infos, + ) + else: + raise TypeError( + f"Unexpected value of infos, as is_vector_envs=False, expects `info` to be a list or dict, actual type: {type(infos)}" + ) + + +def step_api_compatibility( + step_returns: Union[TerminatedTruncatedStepType, DoneStepType], + output_truncation_bool: bool = True, + is_vector_env: bool = False, +) -> Union[TerminatedTruncatedStepType, DoneStepType]: + """Function to transform step returns to the API specified by `output_truncation_bool` bool. + + Done (old) step API refers to step() method returning (observation, reward, done, info) + Terminated Truncated (new) step API refers to step() method returning (observation, reward, terminated, truncated, info) + (Refer to docs for details on the API change) + + Args: + step_returns (tuple): Items returned by step(). Can be (obs, rew, done, info) or (obs, rew, terminated, truncated, info) + output_truncation_bool (bool): Whether the output should return two booleans (new API) or one (old) (True by default) + is_vector_env (bool): Whether the step_returns are from a vector environment + + Returns: + step_returns (tuple): Depending on `output_truncation_bool` bool, it can return (obs, rew, done, info) or (obs, rew, terminated, truncated, info) + + Examples: + This function can be used to ensure compatibility in step interfaces with conflicting API. Eg. if env is written in old API, + wrapper is written in new API, and the final step output is desired to be in old API. + + >>> obs, rew, done, info = step_api_compatibility(env.step(action), output_truncation_bool=False) + >>> obs, rew, terminated, truncated, info = step_api_compatibility(env.step(action), output_truncation_bool=True) + >>> observations, rewards, dones, infos = step_api_compatibility(vec_env.step(action), is_vector_env=True) + """ + if output_truncation_bool: + return convert_to_terminated_truncated_step_api(step_returns, is_vector_env) + else: + return convert_to_done_step_api(step_returns, is_vector_env) diff --git a/MLPY/Lib/site-packages/gym/vector/__init__.py b/MLPY/Lib/site-packages/gym/vector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb9653f29cd2ead34c7c03efbfb59e439dc515a --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/__init__.py @@ -0,0 +1,73 @@ +"""Module for vector environments.""" +from typing import Iterable, List, Optional, Union + +import gym +from gym.vector.async_vector_env import AsyncVectorEnv +from gym.vector.sync_vector_env import SyncVectorEnv +from gym.vector.vector_env import VectorEnv, VectorEnvWrapper + +__all__ = ["AsyncVectorEnv", "SyncVectorEnv", "VectorEnv", "VectorEnvWrapper", "make"] + + +def make( + id: str, + num_envs: int = 1, + asynchronous: bool = True, + wrappers: Optional[Union[callable, List[callable]]] = None, + disable_env_checker: Optional[bool] = None, + **kwargs, +) -> VectorEnv: + """Create a vectorized environment from multiple copies of an environment, from its id. + + Example:: + + >>> import gym + >>> env = gym.vector.make('CartPole-v1', num_envs=3) + >>> env.reset() + array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827], + [ 0.03073904, 0.00145001, -0.03088818, -0.03131252], + [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]], + dtype=float32) + + Args: + id: The environment ID. This must be a valid ID from the registry. + num_envs: Number of copies of the environment. + asynchronous: If `True`, wraps the environments in an :class:`AsyncVectorEnv` (which uses `multiprocessing`_ to run the environments in parallel). If ``False``, wraps the environments in a :class:`SyncVectorEnv`. + wrappers: If not ``None``, then apply the wrappers to each internal environment during creation. + disable_env_checker: If to run the env checker for the first environment only. None will default to the environment spec `disable_env_checker` parameter + (that is by default False), otherwise will run according to this argument (True = not run, False = run) + **kwargs: Keywords arguments applied during `gym.make` + + Returns: + The vectorized environment. + """ + + def create_env(env_num: int): + """Creates an environment that can enable or disable the environment checker.""" + # If the env_num > 0 then disable the environment checker otherwise use the parameter + _disable_env_checker = True if env_num > 0 else disable_env_checker + + def _make_env(): + env = gym.envs.registration.make( + id, + disable_env_checker=_disable_env_checker, + **kwargs, + ) + if wrappers is not None: + if callable(wrappers): + env = wrappers(env) + elif isinstance(wrappers, Iterable) and all( + [callable(w) for w in wrappers] + ): + for wrapper in wrappers: + env = wrapper(env) + else: + raise NotImplementedError + return env + + return _make_env + + env_fns = [ + create_env(disable_env_checker or env_num > 0) for env_num in range(num_envs) + ] + return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns) diff --git a/MLPY/Lib/site-packages/gym/vector/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4d37ec69973aa96fbd3fdd94e7c1f87709ceb30 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/__pycache__/async_vector_env.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/__pycache__/async_vector_env.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a73fea81dcdef74f9e903dadf5a3323ca23b504 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/__pycache__/async_vector_env.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/__pycache__/sync_vector_env.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/__pycache__/sync_vector_env.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..485c323f4f5985f5868fd00a20356a1dcd8628b2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/__pycache__/sync_vector_env.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/__pycache__/vector_env.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/__pycache__/vector_env.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6066801eb237c95464e9018bf5dbba57ff661fde Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/__pycache__/vector_env.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/async_vector_env.py b/MLPY/Lib/site-packages/gym/vector/async_vector_env.py new file mode 100644 index 0000000000000000000000000000000000000000..d721a7dd0429207bf76efd86da70c8463c09e40f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/async_vector_env.py @@ -0,0 +1,684 @@ +"""An async vector environment.""" +import multiprocessing as mp +import sys +import time +from copy import deepcopy +from enum import Enum +from typing import List, Optional, Sequence, Tuple, Union + +import numpy as np + +import gym +from gym import logger +from gym.core import ObsType +from gym.error import ( + AlreadyPendingCallError, + ClosedEnvironmentError, + CustomSpaceError, + NoAsyncCallError, +) +from gym.vector.utils import ( + CloudpickleWrapper, + clear_mpi_env_vars, + concatenate, + create_empty_array, + create_shared_memory, + iterate, + read_from_shared_memory, + write_to_shared_memory, +) +from gym.vector.vector_env import VectorEnv + +__all__ = ["AsyncVectorEnv"] + + +class AsyncState(Enum): + DEFAULT = "default" + WAITING_RESET = "reset" + WAITING_STEP = "step" + WAITING_CALL = "call" + + +class AsyncVectorEnv(VectorEnv): + """Vectorized environment that runs multiple environments in parallel. + + It uses ``multiprocessing`` processes, and pipes for communication. + + Example:: + + >>> import gym + >>> env = gym.vector.AsyncVectorEnv([ + ... lambda: gym.make("Pendulum-v0", g=9.81), + ... lambda: gym.make("Pendulum-v0", g=1.62) + ... ]) + >>> env.reset() + array([[-0.8286432 , 0.5597771 , 0.90249056], + [-0.85009176, 0.5266346 , 0.60007906]], dtype=float32) + """ + + def __init__( + self, + env_fns: Sequence[callable], + observation_space: Optional[gym.Space] = None, + action_space: Optional[gym.Space] = None, + shared_memory: bool = True, + copy: bool = True, + context: Optional[str] = None, + daemon: bool = True, + worker: Optional[callable] = None, + ): + """Vectorized environment that runs multiple environments in parallel. + + Args: + env_fns: Functions that create the environments. + observation_space: Observation space of a single environment. If ``None``, + then the observation space of the first environment is taken. + action_space: Action space of a single environment. If ``None``, + then the action space of the first environment is taken. + shared_memory: If ``True``, then the observations from the worker processes are communicated back through + shared variables. This can improve the efficiency if the observations are large (e.g. images). + copy: If ``True``, then the :meth:`~AsyncVectorEnv.reset` and :meth:`~AsyncVectorEnv.step` methods + return a copy of the observations. + context: Context for `multiprocessing`_. If ``None``, then the default context is used. + daemon: If ``True``, then subprocesses have ``daemon`` flag turned on; that is, they will quit if + the head process quits. However, ``daemon=True`` prevents subprocesses to spawn children, + so for some environments you may want to have it set to ``False``. + worker: If set, then use that worker in a subprocess instead of a default one. + Can be useful to override some inner vector env logic, for instance, how resets on termination or truncation are handled. + + Warnings: worker is an advanced mode option. It provides a high degree of flexibility and a high chance + to shoot yourself in the foot; thus, if you are writing your own worker, it is recommended to start + from the code for ``_worker`` (or ``_worker_shared_memory``) method, and add changes. + + Raises: + RuntimeError: If the observation space of some sub-environment does not match observation_space + (or, by default, the observation space of the first sub-environment). + ValueError: If observation_space is a custom space (i.e. not a default space in Gym, + such as gym.spaces.Box, gym.spaces.Discrete, or gym.spaces.Dict) and shared_memory is True. + """ + ctx = mp.get_context(context) + self.env_fns = env_fns + self.shared_memory = shared_memory + self.copy = copy + dummy_env = env_fns[0]() + self.metadata = dummy_env.metadata + + if (observation_space is None) or (action_space is None): + observation_space = observation_space or dummy_env.observation_space + action_space = action_space or dummy_env.action_space + dummy_env.close() + del dummy_env + super().__init__( + num_envs=len(env_fns), + observation_space=observation_space, + action_space=action_space, + ) + + if self.shared_memory: + try: + _obs_buffer = create_shared_memory( + self.single_observation_space, n=self.num_envs, ctx=ctx + ) + self.observations = read_from_shared_memory( + self.single_observation_space, _obs_buffer, n=self.num_envs + ) + except CustomSpaceError: + raise ValueError( + "Using `shared_memory=True` in `AsyncVectorEnv` " + "is incompatible with non-standard Gym observation spaces " + "(i.e. custom spaces inheriting from `gym.Space`), and is " + "only compatible with default Gym spaces (e.g. `Box`, " + "`Tuple`, `Dict`) for batching. Set `shared_memory=False` " + "if you use custom observation spaces." + ) + else: + _obs_buffer = None + self.observations = create_empty_array( + self.single_observation_space, n=self.num_envs, fn=np.zeros + ) + + self.parent_pipes, self.processes = [], [] + self.error_queue = ctx.Queue() + target = _worker_shared_memory if self.shared_memory else _worker + target = worker or target + with clear_mpi_env_vars(): + for idx, env_fn in enumerate(self.env_fns): + parent_pipe, child_pipe = ctx.Pipe() + process = ctx.Process( + target=target, + name=f"Worker<{type(self).__name__}>-{idx}", + args=( + idx, + CloudpickleWrapper(env_fn), + child_pipe, + parent_pipe, + _obs_buffer, + self.error_queue, + ), + ) + + self.parent_pipes.append(parent_pipe) + self.processes.append(process) + + process.daemon = daemon + process.start() + child_pipe.close() + + self._state = AsyncState.DEFAULT + self._check_spaces() + + def reset_async( + self, + seed: Optional[Union[int, List[int]]] = None, + options: Optional[dict] = None, + ): + """Send calls to the :obj:`reset` methods of the sub-environments. + + To get the results of these calls, you may invoke :meth:`reset_wait`. + + Args: + seed: List of seeds for each environment + options: The reset option + + Raises: + ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). + AlreadyPendingCallError: If the environment is already waiting for a pending call to another + method (e.g. :meth:`step_async`). This can be caused by two consecutive + calls to :meth:`reset_async`, with no call to :meth:`reset_wait` in between. + """ + self._assert_is_running() + + if seed is None: + seed = [None for _ in range(self.num_envs)] + if isinstance(seed, int): + seed = [seed + i for i in range(self.num_envs)] + assert len(seed) == self.num_envs + + if self._state != AsyncState.DEFAULT: + raise AlreadyPendingCallError( + f"Calling `reset_async` while waiting for a pending call to `{self._state.value}` to complete", + self._state.value, + ) + + for pipe, single_seed in zip(self.parent_pipes, seed): + single_kwargs = {} + if single_seed is not None: + single_kwargs["seed"] = single_seed + if options is not None: + single_kwargs["options"] = options + + pipe.send(("reset", single_kwargs)) + self._state = AsyncState.WAITING_RESET + + def reset_wait( + self, + timeout: Optional[Union[int, float]] = None, + seed: Optional[int] = None, + options: Optional[dict] = None, + ) -> Union[ObsType, Tuple[ObsType, List[dict]]]: + """Waits for the calls triggered by :meth:`reset_async` to finish and returns the results. + + Args: + timeout: Number of seconds before the call to `reset_wait` times out. If `None`, the call to `reset_wait` never times out. + seed: ignored + options: ignored + + Returns: + A tuple of batched observations and list of dictionaries + + Raises: + ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). + NoAsyncCallError: If :meth:`reset_wait` was called without any prior call to :meth:`reset_async`. + TimeoutError: If :meth:`reset_wait` timed out. + """ + self._assert_is_running() + if self._state != AsyncState.WAITING_RESET: + raise NoAsyncCallError( + "Calling `reset_wait` without any prior " "call to `reset_async`.", + AsyncState.WAITING_RESET.value, + ) + + if not self._poll(timeout): + self._state = AsyncState.DEFAULT + raise mp.TimeoutError( + f"The call to `reset_wait` has timed out after {timeout} second(s)." + ) + + results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) + self._raise_if_errors(successes) + self._state = AsyncState.DEFAULT + + infos = {} + results, info_data = zip(*results) + for i, info in enumerate(info_data): + infos = self._add_info(infos, info, i) + + if not self.shared_memory: + self.observations = concatenate( + self.single_observation_space, results, self.observations + ) + + return (deepcopy(self.observations) if self.copy else self.observations), infos + + def step_async(self, actions: np.ndarray): + """Send the calls to :obj:`step` to each sub-environment. + + Args: + actions: Batch of actions. element of :attr:`~VectorEnv.action_space` + + Raises: + ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). + AlreadyPendingCallError: If the environment is already waiting for a pending call to another + method (e.g. :meth:`reset_async`). This can be caused by two consecutive + calls to :meth:`step_async`, with no call to :meth:`step_wait` in + between. + """ + self._assert_is_running() + if self._state != AsyncState.DEFAULT: + raise AlreadyPendingCallError( + f"Calling `step_async` while waiting for a pending call to `{self._state.value}` to complete.", + self._state.value, + ) + + actions = iterate(self.action_space, actions) + for pipe, action in zip(self.parent_pipes, actions): + pipe.send(("step", action)) + self._state = AsyncState.WAITING_STEP + + def step_wait( + self, timeout: Optional[Union[int, float]] = None + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, List[dict]]: + """Wait for the calls to :obj:`step` in each sub-environment to finish. + + Args: + timeout: Number of seconds before the call to :meth:`step_wait` times out. If ``None``, the call to :meth:`step_wait` never times out. + + Returns: + The batched environment step information, (obs, reward, terminated, truncated, info) + + Raises: + ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). + NoAsyncCallError: If :meth:`step_wait` was called without any prior call to :meth:`step_async`. + TimeoutError: If :meth:`step_wait` timed out. + """ + self._assert_is_running() + if self._state != AsyncState.WAITING_STEP: + raise NoAsyncCallError( + "Calling `step_wait` without any prior call " "to `step_async`.", + AsyncState.WAITING_STEP.value, + ) + + if not self._poll(timeout): + self._state = AsyncState.DEFAULT + raise mp.TimeoutError( + f"The call to `step_wait` has timed out after {timeout} second(s)." + ) + + observations_list, rewards, terminateds, truncateds, infos = [], [], [], [], {} + successes = [] + for i, pipe in enumerate(self.parent_pipes): + result, success = pipe.recv() + obs, rew, terminated, truncated, info = result + + successes.append(success) + observations_list.append(obs) + rewards.append(rew) + terminateds.append(terminated) + truncateds.append(truncated) + infos = self._add_info(infos, info, i) + + self._raise_if_errors(successes) + self._state = AsyncState.DEFAULT + + if not self.shared_memory: + self.observations = concatenate( + self.single_observation_space, + observations_list, + self.observations, + ) + + return ( + deepcopy(self.observations) if self.copy else self.observations, + np.array(rewards), + np.array(terminateds, dtype=np.bool_), + np.array(truncateds, dtype=np.bool_), + infos, + ) + + def call_async(self, name: str, *args, **kwargs): + """Calls the method with name asynchronously and apply args and kwargs to the method. + + Args: + name: Name of the method or property to call. + *args: Arguments to apply to the method call. + **kwargs: Keyword arguments to apply to the method call. + + Raises: + ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). + AlreadyPendingCallError: Calling `call_async` while waiting for a pending call to complete + """ + self._assert_is_running() + if self._state != AsyncState.DEFAULT: + raise AlreadyPendingCallError( + "Calling `call_async` while waiting " + f"for a pending call to `{self._state.value}` to complete.", + self._state.value, + ) + + for pipe in self.parent_pipes: + pipe.send(("_call", (name, args, kwargs))) + self._state = AsyncState.WAITING_CALL + + def call_wait(self, timeout: Optional[Union[int, float]] = None) -> list: + """Calls all parent pipes and waits for the results. + + Args: + timeout: Number of seconds before the call to `step_wait` times out. + If `None` (default), the call to `step_wait` never times out. + + Returns: + List of the results of the individual calls to the method or property for each environment. + + Raises: + NoAsyncCallError: Calling `call_wait` without any prior call to `call_async`. + TimeoutError: The call to `call_wait` has timed out after timeout second(s). + """ + self._assert_is_running() + if self._state != AsyncState.WAITING_CALL: + raise NoAsyncCallError( + "Calling `call_wait` without any prior call to `call_async`.", + AsyncState.WAITING_CALL.value, + ) + + if not self._poll(timeout): + self._state = AsyncState.DEFAULT + raise mp.TimeoutError( + f"The call to `call_wait` has timed out after {timeout} second(s)." + ) + + results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) + self._raise_if_errors(successes) + self._state = AsyncState.DEFAULT + + return results + + def set_attr(self, name: str, values: Union[list, tuple, object]): + """Sets an attribute of the sub-environments. + + Args: + name: Name of the property to be set in each individual environment. + values: Values of the property to be set to. If ``values`` is a list or + tuple, then it corresponds to the values for each individual + environment, otherwise a single value is set for all environments. + + Raises: + ValueError: Values must be a list or tuple with length equal to the number of environments. + AlreadyPendingCallError: Calling `set_attr` while waiting for a pending call to complete. + """ + self._assert_is_running() + if not isinstance(values, (list, tuple)): + values = [values for _ in range(self.num_envs)] + if len(values) != self.num_envs: + raise ValueError( + "Values must be a list or tuple with length equal to the " + f"number of environments. Got `{len(values)}` values for " + f"{self.num_envs} environments." + ) + + if self._state != AsyncState.DEFAULT: + raise AlreadyPendingCallError( + "Calling `set_attr` while waiting " + f"for a pending call to `{self._state.value}` to complete.", + self._state.value, + ) + + for pipe, value in zip(self.parent_pipes, values): + pipe.send(("_setattr", (name, value))) + _, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) + self._raise_if_errors(successes) + + def close_extras( + self, timeout: Optional[Union[int, float]] = None, terminate: bool = False + ): + """Close the environments & clean up the extra resources (processes and pipes). + + Args: + timeout: Number of seconds before the call to :meth:`close` times out. If ``None``, + the call to :meth:`close` never times out. If the call to :meth:`close` + times out, then all processes are terminated. + terminate: If ``True``, then the :meth:`close` operation is forced and all processes are terminated. + + Raises: + TimeoutError: If :meth:`close` timed out. + """ + timeout = 0 if terminate else timeout + try: + if self._state != AsyncState.DEFAULT: + logger.warn( + f"Calling `close` while waiting for a pending call to `{self._state.value}` to complete." + ) + function = getattr(self, f"{self._state.value}_wait") + function(timeout) + except mp.TimeoutError: + terminate = True + + if terminate: + for process in self.processes: + if process.is_alive(): + process.terminate() + else: + for pipe in self.parent_pipes: + if (pipe is not None) and (not pipe.closed): + pipe.send(("close", None)) + for pipe in self.parent_pipes: + if (pipe is not None) and (not pipe.closed): + pipe.recv() + + for pipe in self.parent_pipes: + if pipe is not None: + pipe.close() + for process in self.processes: + process.join() + + def _poll(self, timeout=None): + self._assert_is_running() + if timeout is None: + return True + end_time = time.perf_counter() + timeout + delta = None + for pipe in self.parent_pipes: + delta = max(end_time - time.perf_counter(), 0) + if pipe is None: + return False + if pipe.closed or (not pipe.poll(delta)): + return False + return True + + def _check_spaces(self): + self._assert_is_running() + spaces = (self.single_observation_space, self.single_action_space) + for pipe in self.parent_pipes: + pipe.send(("_check_spaces", spaces)) + results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) + self._raise_if_errors(successes) + same_observation_spaces, same_action_spaces = zip(*results) + if not all(same_observation_spaces): + raise RuntimeError( + "Some environments have an observation space different from " + f"`{self.single_observation_space}`. In order to batch observations, " + "the observation spaces from all environments must be equal." + ) + if not all(same_action_spaces): + raise RuntimeError( + "Some environments have an action space different from " + f"`{self.single_action_space}`. In order to batch actions, the " + "action spaces from all environments must be equal." + ) + + def _assert_is_running(self): + if self.closed: + raise ClosedEnvironmentError( + f"Trying to operate on `{type(self).__name__}`, after a call to `close()`." + ) + + def _raise_if_errors(self, successes): + if all(successes): + return + + num_errors = self.num_envs - sum(successes) + assert num_errors > 0 + for i in range(num_errors): + index, exctype, value = self.error_queue.get() + logger.error( + f"Received the following error from Worker-{index}: {exctype.__name__}: {value}" + ) + logger.error(f"Shutting down Worker-{index}.") + self.parent_pipes[index].close() + self.parent_pipes[index] = None + + if i == num_errors - 1: + logger.error("Raising the last exception back to the main process.") + raise exctype(value) + + def __del__(self): + """On deleting the object, checks that the vector environment is closed.""" + if not getattr(self, "closed", True) and hasattr(self, "_state"): + self.close(terminate=True) + + +def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + assert shared_memory is None + env = env_fn() + parent_pipe.close() + try: + while True: + command, data = pipe.recv() + if command == "reset": + observation, info = env.reset(**data) + pipe.send(((observation, info), True)) + + elif command == "step": + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if terminated or truncated: + old_observation, old_info = observation, info + observation, info = env.reset() + info["final_observation"] = old_observation + info["final_info"] = old_info + pipe.send(((observation, reward, terminated, truncated, info), True)) + elif command == "seed": + env.seed(data) + pipe.send((None, True)) + elif command == "close": + pipe.send((None, True)) + break + elif command == "_call": + name, args, kwargs = data + if name in ["reset", "step", "seed", "close"]: + raise ValueError( + f"Trying to call function `{name}` with " + f"`_call`. Use `{name}` directly instead." + ) + function = getattr(env, name) + if callable(function): + pipe.send((function(*args, **kwargs), True)) + else: + pipe.send((function, True)) + elif command == "_setattr": + name, value = data + setattr(env, name, value) + pipe.send((None, True)) + elif command == "_check_spaces": + pipe.send( + ( + (data[0] == env.observation_space, data[1] == env.action_space), + True, + ) + ) + else: + raise RuntimeError( + f"Received unknown command `{command}`. Must " + "be one of {`reset`, `step`, `seed`, `close`, `_call`, " + "`_setattr`, `_check_spaces`}." + ) + except (KeyboardInterrupt, Exception): + error_queue.put((index,) + sys.exc_info()[:2]) + pipe.send((None, False)) + finally: + env.close() + + +def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue): + assert shared_memory is not None + env = env_fn() + observation_space = env.observation_space + parent_pipe.close() + try: + while True: + command, data = pipe.recv() + if command == "reset": + observation, info = env.reset(**data) + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, info), True)) + + elif command == "step": + ( + observation, + reward, + terminated, + truncated, + info, + ) = env.step(data) + if terminated or truncated: + old_observation, old_info = observation, info + observation, info = env.reset() + info["final_observation"] = old_observation + info["final_info"] = old_info + write_to_shared_memory( + observation_space, index, observation, shared_memory + ) + pipe.send(((None, reward, terminated, truncated, info), True)) + elif command == "seed": + env.seed(data) + pipe.send((None, True)) + elif command == "close": + pipe.send((None, True)) + break + elif command == "_call": + name, args, kwargs = data + if name in ["reset", "step", "seed", "close"]: + raise ValueError( + f"Trying to call function `{name}` with " + f"`_call`. Use `{name}` directly instead." + ) + function = getattr(env, name) + if callable(function): + pipe.send((function(*args, **kwargs), True)) + else: + pipe.send((function, True)) + elif command == "_setattr": + name, value = data + setattr(env, name, value) + pipe.send((None, True)) + elif command == "_check_spaces": + pipe.send( + ((data[0] == observation_space, data[1] == env.action_space), True) + ) + else: + raise RuntimeError( + f"Received unknown command `{command}`. Must " + "be one of {`reset`, `step`, `seed`, `close`, `_call`, " + "`_setattr`, `_check_spaces`}." + ) + except (KeyboardInterrupt, Exception): + error_queue.put((index,) + sys.exc_info()[:2]) + pipe.send((None, False)) + finally: + env.close() diff --git a/MLPY/Lib/site-packages/gym/vector/sync_vector_env.py b/MLPY/Lib/site-packages/gym/vector/sync_vector_env.py new file mode 100644 index 0000000000000000000000000000000000000000..f34693663a50fa9f9ffb1f57f8f01605c2c93e0c --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/sync_vector_env.py @@ -0,0 +1,236 @@ +"""A synchronous vector environment.""" +from copy import deepcopy +from typing import Any, Callable, Iterator, List, Optional, Sequence, Union + +import numpy as np + +from gym import Env +from gym.spaces import Space +from gym.vector.utils import concatenate, create_empty_array, iterate +from gym.vector.vector_env import VectorEnv + +__all__ = ["SyncVectorEnv"] + + +class SyncVectorEnv(VectorEnv): + """Vectorized environment that serially runs multiple environments. + + Example:: + + >>> import gym + >>> env = gym.vector.SyncVectorEnv([ + ... lambda: gym.make("Pendulum-v0", g=9.81), + ... lambda: gym.make("Pendulum-v0", g=1.62) + ... ]) + >>> env.reset() + array([[-0.8286432 , 0.5597771 , 0.90249056], + [-0.85009176, 0.5266346 , 0.60007906]], dtype=float32) + """ + + def __init__( + self, + env_fns: Iterator[Callable[[], Env]], + observation_space: Space = None, + action_space: Space = None, + copy: bool = True, + ): + """Vectorized environment that serially runs multiple environments. + + Args: + env_fns: iterable of callable functions that create the environments. + observation_space: Observation space of a single environment. If ``None``, + then the observation space of the first environment is taken. + action_space: Action space of a single environment. If ``None``, + then the action space of the first environment is taken. + copy: If ``True``, then the :meth:`reset` and :meth:`step` methods return a copy of the observations. + + Raises: + RuntimeError: If the observation space of some sub-environment does not match observation_space + (or, by default, the observation space of the first sub-environment). + """ + self.env_fns = env_fns + self.envs = [env_fn() for env_fn in env_fns] + self.copy = copy + self.metadata = self.envs[0].metadata + + if (observation_space is None) or (action_space is None): + observation_space = observation_space or self.envs[0].observation_space + action_space = action_space or self.envs[0].action_space + super().__init__( + num_envs=len(self.envs), + observation_space=observation_space, + action_space=action_space, + ) + + self._check_spaces() + self.observations = create_empty_array( + self.single_observation_space, n=self.num_envs, fn=np.zeros + ) + self._rewards = np.zeros((self.num_envs,), dtype=np.float64) + self._terminateds = np.zeros((self.num_envs,), dtype=np.bool_) + self._truncateds = np.zeros((self.num_envs,), dtype=np.bool_) + self._actions = None + + def seed(self, seed: Optional[Union[int, Sequence[int]]] = None): + """Sets the seed in all sub-environments. + + Args: + seed: The seed + """ + super().seed(seed=seed) + if seed is None: + seed = [None for _ in range(self.num_envs)] + if isinstance(seed, int): + seed = [seed + i for i in range(self.num_envs)] + assert len(seed) == self.num_envs + + for env, single_seed in zip(self.envs, seed): + env.seed(single_seed) + + def reset_wait( + self, + seed: Optional[Union[int, List[int]]] = None, + options: Optional[dict] = None, + ): + """Waits for the calls triggered by :meth:`reset_async` to finish and returns the results. + + Args: + seed: The reset environment seed + options: Option information for the environment reset + + Returns: + The reset observation of the environment and reset information + """ + if seed is None: + seed = [None for _ in range(self.num_envs)] + if isinstance(seed, int): + seed = [seed + i for i in range(self.num_envs)] + assert len(seed) == self.num_envs + + self._terminateds[:] = False + self._truncateds[:] = False + observations = [] + infos = {} + for i, (env, single_seed) in enumerate(zip(self.envs, seed)): + + kwargs = {} + if single_seed is not None: + kwargs["seed"] = single_seed + if options is not None: + kwargs["options"] = options + + observation, info = env.reset(**kwargs) + observations.append(observation) + infos = self._add_info(infos, info, i) + + self.observations = concatenate( + self.single_observation_space, observations, self.observations + ) + return (deepcopy(self.observations) if self.copy else self.observations), infos + + def step_async(self, actions): + """Sets :attr:`_actions` for use by the :meth:`step_wait` by converting the ``actions`` to an iterable version.""" + self._actions = iterate(self.action_space, actions) + + def step_wait(self): + """Steps through each of the environments returning the batched results. + + Returns: + The batched environment step results + """ + observations, infos = [], {} + for i, (env, action) in enumerate(zip(self.envs, self._actions)): + + ( + observation, + self._rewards[i], + self._terminateds[i], + self._truncateds[i], + info, + ) = env.step(action) + + if self._terminateds[i] or self._truncateds[i]: + old_observation, old_info = observation, info + observation, info = env.reset() + info["final_observation"] = old_observation + info["final_info"] = old_info + observations.append(observation) + infos = self._add_info(infos, info, i) + self.observations = concatenate( + self.single_observation_space, observations, self.observations + ) + + return ( + deepcopy(self.observations) if self.copy else self.observations, + np.copy(self._rewards), + np.copy(self._terminateds), + np.copy(self._truncateds), + infos, + ) + + def call(self, name, *args, **kwargs) -> tuple: + """Calls the method with name and applies args and kwargs. + + Args: + name: The method name + *args: The method args + **kwargs: The method kwargs + + Returns: + Tuple of results + """ + results = [] + for env in self.envs: + function = getattr(env, name) + if callable(function): + results.append(function(*args, **kwargs)) + else: + results.append(function) + + return tuple(results) + + def set_attr(self, name: str, values: Union[list, tuple, Any]): + """Sets an attribute of the sub-environments. + + Args: + name: The property name to change + values: Values of the property to be set to. If ``values`` is a list or + tuple, then it corresponds to the values for each individual + environment, otherwise, a single value is set for all environments. + + Raises: + ValueError: Values must be a list or tuple with length equal to the number of environments. + """ + if not isinstance(values, (list, tuple)): + values = [values for _ in range(self.num_envs)] + if len(values) != self.num_envs: + raise ValueError( + "Values must be a list or tuple with length equal to the " + f"number of environments. Got `{len(values)}` values for " + f"{self.num_envs} environments." + ) + + for env, value in zip(self.envs, values): + setattr(env, name, value) + + def close_extras(self, **kwargs): + """Close the environments.""" + [env.close() for env in self.envs] + + def _check_spaces(self) -> bool: + for env in self.envs: + if not (env.observation_space == self.single_observation_space): + raise RuntimeError( + "Some environments have an observation space different from " + f"`{self.single_observation_space}`. In order to batch observations, " + "the observation spaces from all environments must be equal." + ) + + if not (env.action_space == self.single_action_space): + raise RuntimeError( + "Some environments have an action space different from " + f"`{self.single_action_space}`. In order to batch actions, the " + "action spaces from all environments must be equal." + ) + + return True diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__init__.py b/MLPY/Lib/site-packages/gym/vector/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c03f85335d1d3a4e4a19c7ba94b4350fd3b033ac --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/utils/__init__.py @@ -0,0 +1,23 @@ +"""Module for gym vector utils.""" +from gym.vector.utils.misc import CloudpickleWrapper, clear_mpi_env_vars +from gym.vector.utils.numpy_utils import concatenate, create_empty_array +from gym.vector.utils.shared_memory import ( + create_shared_memory, + read_from_shared_memory, + write_to_shared_memory, +) +from gym.vector.utils.spaces import _BaseGymSpaces # pyright: reportPrivateUsage=false +from gym.vector.utils.spaces import BaseGymSpaces, batch_space, iterate + +__all__ = [ + "CloudpickleWrapper", + "clear_mpi_env_vars", + "concatenate", + "create_empty_array", + "create_shared_memory", + "read_from_shared_memory", + "write_to_shared_memory", + "BaseGymSpaces", + "batch_space", + "iterate", +] diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e4923bd888bb72a07cd5ceb7eb585906f8b7c0e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/misc.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94e01b044b56aea16631727a3b85bf8f345ff043 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/numpy_utils.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/numpy_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fa9efc14dba99794c31b6f318deb12c944e7645 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/numpy_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/shared_memory.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/shared_memory.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbcf8de3f630e690d6d36f7012d6ab3eda0d8631 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/shared_memory.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/spaces.cpython-39.pyc b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/spaces.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1be4197a7fbfa84e9d5c55c5487e9451debe507c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/vector/utils/__pycache__/spaces.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/vector/utils/misc.py b/MLPY/Lib/site-packages/gym/vector/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..38edbd94be99f9fe054f64bca74ea1a51b4d14b4 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/utils/misc.py @@ -0,0 +1,55 @@ +"""Miscellaneous utilities.""" +import contextlib +import os + +__all__ = ["CloudpickleWrapper", "clear_mpi_env_vars"] + + +class CloudpickleWrapper: + """Wrapper that uses cloudpickle to pickle and unpickle the result.""" + + def __init__(self, fn: callable): + """Cloudpickle wrapper for a function.""" + self.fn = fn + + def __getstate__(self): + """Get the state using `cloudpickle.dumps(self.fn)`.""" + import cloudpickle + + return cloudpickle.dumps(self.fn) + + def __setstate__(self, ob): + """Sets the state with obs.""" + import pickle + + self.fn = pickle.loads(ob) + + def __call__(self): + """Calls the function `self.fn` with no arguments.""" + return self.fn() + + +@contextlib.contextmanager +def clear_mpi_env_vars(): + """Clears the MPI of environment variables. + + `from mpi4py import MPI` will call `MPI_Init` by default. + If the child process has MPI environment variables, MPI will think that the child process + is an MPI process just like the parent and do bad things such as hang. + + This context manager is a hacky way to clear those environment variables + temporarily such as when we are starting multiprocessing Processes. + + Yields: + Yields for the context manager + """ + removed_environment = {} + for k, v in list(os.environ.items()): + for prefix in ["OMPI_", "PMI_"]: + if k.startswith(prefix): + removed_environment[k] = v + del os.environ[k] + try: + yield + finally: + os.environ.update(removed_environment) diff --git a/MLPY/Lib/site-packages/gym/vector/utils/numpy_utils.py b/MLPY/Lib/site-packages/gym/vector/utils/numpy_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d596cde864590b5ae835fb8aee8618d6fb580204 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/utils/numpy_utils.py @@ -0,0 +1,136 @@ +"""Numpy utility functions: concatenate space samples and create empty array.""" +from collections import OrderedDict +from functools import singledispatch +from typing import Iterable, Union + +import numpy as np + +from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Space, Tuple + +__all__ = ["concatenate", "create_empty_array"] + + +@singledispatch +def concatenate( + space: Space, items: Iterable, out: Union[tuple, dict, np.ndarray] +) -> Union[tuple, dict, np.ndarray]: + """Concatenate multiple samples from space into a single object. + + Example:: + + >>> from gym.spaces import Box + >>> space = Box(low=0, high=1, shape=(3,), dtype=np.float32) + >>> out = np.zeros((2, 3), dtype=np.float32) + >>> items = [space.sample() for _ in range(2)] + >>> concatenate(space, items, out) + array([[0.6348213 , 0.28607962, 0.60760117], + [0.87383074, 0.192658 , 0.2148103 ]], dtype=float32) + + Args: + space: Observation space of a single environment in the vectorized environment. + items: Samples to be concatenated. + out: The output object. This object is a (possibly nested) numpy array. + + Returns: + The output object. This object is a (possibly nested) numpy array. + + Raises: + ValueError: Space is not a valid :class:`gym.Space` instance + """ + raise ValueError( + f"Space of type `{type(space)}` is not a valid `gym.Space` instance." + ) + + +@concatenate.register(Box) +@concatenate.register(Discrete) +@concatenate.register(MultiDiscrete) +@concatenate.register(MultiBinary) +def _concatenate_base(space, items, out): + return np.stack(items, axis=0, out=out) + + +@concatenate.register(Tuple) +def _concatenate_tuple(space, items, out): + return tuple( + concatenate(subspace, [item[i] for item in items], out[i]) + for (i, subspace) in enumerate(space.spaces) + ) + + +@concatenate.register(Dict) +def _concatenate_dict(space, items, out): + return OrderedDict( + [ + (key, concatenate(subspace, [item[key] for item in items], out[key])) + for (key, subspace) in space.spaces.items() + ] + ) + + +@concatenate.register(Space) +def _concatenate_custom(space, items, out): + return tuple(items) + + +@singledispatch +def create_empty_array( + space: Space, n: int = 1, fn: callable = np.zeros +) -> Union[tuple, dict, np.ndarray]: + """Create an empty (possibly nested) numpy array. + + Example:: + + >>> from gym.spaces import Box, Dict + >>> space = Dict({ + ... 'position': Box(low=0, high=1, shape=(3,), dtype=np.float32), + ... 'velocity': Box(low=0, high=1, shape=(2,), dtype=np.float32)}) + >>> create_empty_array(space, n=2, fn=np.zeros) + OrderedDict([('position', array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32)), + ('velocity', array([[0., 0.], + [0., 0.]], dtype=float32))]) + + Args: + space: Observation space of a single environment in the vectorized environment. + n: Number of environments in the vectorized environment. If `None`, creates an empty sample from `space`. + fn: Function to apply when creating the empty numpy array. Examples of such functions are `np.empty` or `np.zeros`. + + Returns: + The output object. This object is a (possibly nested) numpy array. + + Raises: + ValueError: Space is not a valid :class:`gym.Space` instance + """ + raise ValueError( + f"Space of type `{type(space)}` is not a valid `gym.Space` instance." + ) + + +@create_empty_array.register(Box) +@create_empty_array.register(Discrete) +@create_empty_array.register(MultiDiscrete) +@create_empty_array.register(MultiBinary) +def _create_empty_array_base(space, n=1, fn=np.zeros): + shape = space.shape if (n is None) else (n,) + space.shape + return fn(shape, dtype=space.dtype) + + +@create_empty_array.register(Tuple) +def _create_empty_array_tuple(space, n=1, fn=np.zeros): + return tuple(create_empty_array(subspace, n=n, fn=fn) for subspace in space.spaces) + + +@create_empty_array.register(Dict) +def _create_empty_array_dict(space, n=1, fn=np.zeros): + return OrderedDict( + [ + (key, create_empty_array(subspace, n=n, fn=fn)) + for (key, subspace) in space.spaces.items() + ] + ) + + +@create_empty_array.register(Space) +def _create_empty_array_custom(space, n=1, fn=np.zeros): + return None diff --git a/MLPY/Lib/site-packages/gym/vector/utils/shared_memory.py b/MLPY/Lib/site-packages/gym/vector/utils/shared_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..6d664c47ea6f8c048198847b677a511c341f5637 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/utils/shared_memory.py @@ -0,0 +1,182 @@ +"""Utility functions for vector environments to share memory between processes.""" +import multiprocessing as mp +from collections import OrderedDict +from ctypes import c_bool +from functools import singledispatch +from typing import Union + +import numpy as np + +from gym.error import CustomSpaceError +from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Space, Tuple + +__all__ = ["create_shared_memory", "read_from_shared_memory", "write_to_shared_memory"] + + +@singledispatch +def create_shared_memory( + space: Space, n: int = 1, ctx=mp +) -> Union[dict, tuple, mp.Array]: + """Create a shared memory object, to be shared across processes. + + This eventually contains the observations from the vectorized environment. + + Args: + space: Observation space of a single environment in the vectorized environment. + n: Number of environments in the vectorized environment (i.e. the number of processes). + ctx: The multiprocess module + + Returns: + shared_memory for the shared object across processes. + + Raises: + CustomSpaceError: Space is not a valid :class:`gym.Space` instance + """ + raise CustomSpaceError( + "Cannot create a shared memory for space with " + f"type `{type(space)}`. Shared memory only supports " + "default Gym spaces (e.g. `Box`, `Tuple`, " + "`Dict`, etc...), and does not support custom " + "Gym spaces." + ) + + +@create_shared_memory.register(Box) +@create_shared_memory.register(Discrete) +@create_shared_memory.register(MultiDiscrete) +@create_shared_memory.register(MultiBinary) +def _create_base_shared_memory(space, n: int = 1, ctx=mp): + dtype = space.dtype.char + if dtype in "?": + dtype = c_bool + return ctx.Array(dtype, n * int(np.prod(space.shape))) + + +@create_shared_memory.register(Tuple) +def _create_tuple_shared_memory(space, n: int = 1, ctx=mp): + return tuple( + create_shared_memory(subspace, n=n, ctx=ctx) for subspace in space.spaces + ) + + +@create_shared_memory.register(Dict) +def _create_dict_shared_memory(space, n=1, ctx=mp): + return OrderedDict( + [ + (key, create_shared_memory(subspace, n=n, ctx=ctx)) + for (key, subspace) in space.spaces.items() + ] + ) + + +@singledispatch +def read_from_shared_memory( + space: Space, shared_memory: Union[dict, tuple, mp.Array], n: int = 1 +) -> Union[dict, tuple, np.ndarray]: + """Read the batch of observations from shared memory as a numpy array. + + ..notes:: + The numpy array objects returned by `read_from_shared_memory` shares the + memory of `shared_memory`. Any changes to `shared_memory` are forwarded + to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`. + + Args: + space: Observation space of a single environment in the vectorized environment. + shared_memory: Shared object across processes. This contains the observations from the vectorized environment. + This object is created with `create_shared_memory`. + n: Number of environments in the vectorized environment (i.e. the number of processes). + + Returns: + Batch of observations as a (possibly nested) numpy array. + + Raises: + CustomSpaceError: Space is not a valid :class:`gym.Space` instance + """ + raise CustomSpaceError( + "Cannot read from a shared memory for space with " + f"type `{type(space)}`. Shared memory only supports " + "default Gym spaces (e.g. `Box`, `Tuple`, " + "`Dict`, etc...), and does not support custom " + "Gym spaces." + ) + + +@read_from_shared_memory.register(Box) +@read_from_shared_memory.register(Discrete) +@read_from_shared_memory.register(MultiDiscrete) +@read_from_shared_memory.register(MultiBinary) +def _read_base_from_shared_memory(space, shared_memory, n: int = 1): + return np.frombuffer(shared_memory.get_obj(), dtype=space.dtype).reshape( + (n,) + space.shape + ) + + +@read_from_shared_memory.register(Tuple) +def _read_tuple_from_shared_memory(space, shared_memory, n: int = 1): + return tuple( + read_from_shared_memory(subspace, memory, n=n) + for (memory, subspace) in zip(shared_memory, space.spaces) + ) + + +@read_from_shared_memory.register(Dict) +def _read_dict_from_shared_memory(space, shared_memory, n: int = 1): + return OrderedDict( + [ + (key, read_from_shared_memory(subspace, shared_memory[key], n=n)) + for (key, subspace) in space.spaces.items() + ] + ) + + +@singledispatch +def write_to_shared_memory( + space: Space, + index: int, + value: np.ndarray, + shared_memory: Union[dict, tuple, mp.Array], +): + """Write the observation of a single environment into shared memory. + + Args: + space: Observation space of a single environment in the vectorized environment. + index: Index of the environment (must be in `[0, num_envs)`). + value: Observation of the single environment to write to shared memory. + shared_memory: Shared object across processes. This contains the observations from the vectorized environment. + This object is created with `create_shared_memory`. + + Raises: + CustomSpaceError: Space is not a valid :class:`gym.Space` instance + """ + raise CustomSpaceError( + "Cannot write to a shared memory for space with " + f"type `{type(space)}`. Shared memory only supports " + "default Gym spaces (e.g. `Box`, `Tuple`, " + "`Dict`, etc...), and does not support custom " + "Gym spaces." + ) + + +@write_to_shared_memory.register(Box) +@write_to_shared_memory.register(Discrete) +@write_to_shared_memory.register(MultiDiscrete) +@write_to_shared_memory.register(MultiBinary) +def _write_base_to_shared_memory(space, index, value, shared_memory): + size = int(np.prod(space.shape)) + destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype) + np.copyto( + destination[index * size : (index + 1) * size], + np.asarray(value, dtype=space.dtype).flatten(), + ) + + +@write_to_shared_memory.register(Tuple) +def _write_tuple_to_shared_memory(space, index, values, shared_memory): + for value, memory, subspace in zip(values, shared_memory, space.spaces): + write_to_shared_memory(subspace, index, value, memory) + + +@write_to_shared_memory.register(Dict) +def _write_dict_to_shared_memory(space, index, values, shared_memory): + for key, subspace in space.spaces.items(): + write_to_shared_memory(subspace, index, values[key], shared_memory[key]) diff --git a/MLPY/Lib/site-packages/gym/vector/utils/spaces.py b/MLPY/Lib/site-packages/gym/vector/utils/spaces.py new file mode 100644 index 0000000000000000000000000000000000000000..a914423f098debf6ed074e3de5417a61d7949cb9 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/utils/spaces.py @@ -0,0 +1,211 @@ +"""Utility functions for gym spaces: batch space and iterator.""" +from collections import OrderedDict +from copy import deepcopy +from functools import singledispatch +from typing import Iterator + +import numpy as np + +from gym.error import CustomSpaceError +from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Space, Tuple + +BaseGymSpaces = (Box, Discrete, MultiDiscrete, MultiBinary) +_BaseGymSpaces = BaseGymSpaces +__all__ = ["BaseGymSpaces", "_BaseGymSpaces", "batch_space", "iterate"] + + +@singledispatch +def batch_space(space: Space, n: int = 1) -> Space: + """Create a (batched) space, containing multiple copies of a single space. + + Example:: + + >>> from gym.spaces import Box, Dict + >>> space = Dict({ + ... 'position': Box(low=0, high=1, shape=(3,), dtype=np.float32), + ... 'velocity': Box(low=0, high=1, shape=(2,), dtype=np.float32) + ... }) + >>> batch_space(space, n=5) + Dict(position:Box(5, 3), velocity:Box(5, 2)) + + Args: + space: Space (e.g. the observation space) for a single environment in the vectorized environment. + n: Number of environments in the vectorized environment. + + Returns: + Space (e.g. the observation space) for a batch of environments in the vectorized environment. + + Raises: + ValueError: Cannot batch space that is not a valid :class:`gym.Space` instance + """ + raise ValueError( + f"Cannot batch space with type `{type(space)}`. The space must be a valid `gym.Space` instance." + ) + + +@batch_space.register(Box) +def _batch_space_box(space, n=1): + repeats = tuple([n] + [1] * space.low.ndim) + low, high = np.tile(space.low, repeats), np.tile(space.high, repeats) + return Box(low=low, high=high, dtype=space.dtype, seed=deepcopy(space.np_random)) + + +@batch_space.register(Discrete) +def _batch_space_discrete(space, n=1): + if space.start == 0: + return MultiDiscrete( + np.full((n,), space.n, dtype=space.dtype), + dtype=space.dtype, + seed=deepcopy(space.np_random), + ) + else: + return Box( + low=space.start, + high=space.start + space.n - 1, + shape=(n,), + dtype=space.dtype, + seed=deepcopy(space.np_random), + ) + + +@batch_space.register(MultiDiscrete) +def _batch_space_multidiscrete(space, n=1): + repeats = tuple([n] + [1] * space.nvec.ndim) + high = np.tile(space.nvec, repeats) - 1 + return Box( + low=np.zeros_like(high), + high=high, + dtype=space.dtype, + seed=deepcopy(space.np_random), + ) + + +@batch_space.register(MultiBinary) +def _batch_space_multibinary(space, n=1): + return Box( + low=0, + high=1, + shape=(n,) + space.shape, + dtype=space.dtype, + seed=deepcopy(space.np_random), + ) + + +@batch_space.register(Tuple) +def _batch_space_tuple(space, n=1): + return Tuple( + tuple(batch_space(subspace, n=n) for subspace in space.spaces), + seed=deepcopy(space.np_random), + ) + + +@batch_space.register(Dict) +def _batch_space_dict(space, n=1): + return Dict( + OrderedDict( + [ + (key, batch_space(subspace, n=n)) + for (key, subspace) in space.spaces.items() + ] + ), + seed=deepcopy(space.np_random), + ) + + +@batch_space.register(Space) +def _batch_space_custom(space, n=1): + # Without deepcopy, then the space.np_random is batched_space.spaces[0].np_random + # Which is an issue if you are sampling actions of both the original space and the batched space + batched_space = Tuple( + tuple(deepcopy(space) for _ in range(n)), seed=deepcopy(space.np_random) + ) + new_seeds = list(map(int, batched_space.np_random.integers(0, 1e8, n))) + batched_space.seed(new_seeds) + return batched_space + + +@singledispatch +def iterate(space: Space, items) -> Iterator: + """Iterate over the elements of a (batched) space. + + Example:: + + >>> from gym.spaces import Box, Dict + >>> space = Dict({ + ... 'position': Box(low=0, high=1, shape=(2, 3), dtype=np.float32), + ... 'velocity': Box(low=0, high=1, shape=(2, 2), dtype=np.float32)}) + >>> items = space.sample() + >>> it = iterate(space, items) + >>> next(it) + {'position': array([-0.99644893, -0.08304597, -0.7238421 ], dtype=float32), + 'velocity': array([0.35848552, 0.1533453 ], dtype=float32)} + >>> next(it) + {'position': array([-0.67958736, -0.49076623, 0.38661423], dtype=float32), + 'velocity': array([0.7975036 , 0.93317133], dtype=float32)} + >>> next(it) + StopIteration + + Args: + space: Space to which `items` belong to. + items: Items to be iterated over. + + Returns: + Iterator over the elements in `items`. + + Raises: + ValueError: Space is not an instance of :class:`gym.Space` + """ + raise ValueError( + f"Space of type `{type(space)}` is not a valid `gym.Space` instance." + ) + + +@iterate.register(Discrete) +def _iterate_discrete(space, items): + raise TypeError("Unable to iterate over a space of type `Discrete`.") + + +@iterate.register(Box) +@iterate.register(MultiDiscrete) +@iterate.register(MultiBinary) +def _iterate_base(space, items): + try: + return iter(items) + except TypeError: + raise TypeError(f"Unable to iterate over the following elements: {items}") + + +@iterate.register(Tuple) +def _iterate_tuple(space, items): + # If this is a tuple of custom subspaces only, then simply iterate over items + if all( + isinstance(subspace, Space) + and (not isinstance(subspace, BaseGymSpaces + (Tuple, Dict))) + for subspace in space.spaces + ): + return iter(items) + + return zip( + *[iterate(subspace, items[i]) for i, subspace in enumerate(space.spaces)] + ) + + +@iterate.register(Dict) +def _iterate_dict(space, items): + keys, values = zip( + *[ + (key, iterate(subspace, items[key])) + for key, subspace in space.spaces.items() + ] + ) + for item in zip(*values): + yield OrderedDict([(key, value) for (key, value) in zip(keys, item)]) + + +@iterate.register(Space) +def _iterate_custom(space, items): + raise CustomSpaceError( + f"Unable to iterate over {items}, since {space} " + "is a custom `gym.Space` instance (i.e. not one of " + "`Box`, `Dict`, etc...)." + ) diff --git a/MLPY/Lib/site-packages/gym/vector/vector_env.py b/MLPY/Lib/site-packages/gym/vector/vector_env.py new file mode 100644 index 0000000000000000000000000000000000000000..450fa77de433011f3f7a68d2a544a0c5bfe26ea3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/vector/vector_env.py @@ -0,0 +1,332 @@ +"""Base class for vectorized environments.""" +from typing import Any, List, Optional, Tuple, Union + +import numpy as np + +import gym +from gym.vector.utils.spaces import batch_space + +__all__ = ["VectorEnv"] + + +class VectorEnv(gym.Env): + """Base class for vectorized environments. Runs multiple independent copies of the same environment in parallel. + + This is not the same as 1 environment that has multiple subcomponents, but it is many copies of the same base env. + + Each observation returned from vectorized environment is a batch of observations for each parallel environment. + And :meth:`step` is also expected to receive a batch of actions for each parallel environment. + + Notes: + All parallel environments should share the identical observation and action spaces. + In other words, a vector of multiple different environments is not supported. + """ + + def __init__( + self, + num_envs: int, + observation_space: gym.Space, + action_space: gym.Space, + ): + """Base class for vectorized environments. + + Args: + num_envs: Number of environments in the vectorized environment. + observation_space: Observation space of a single environment. + action_space: Action space of a single environment. + """ + self.num_envs = num_envs + self.is_vector_env = True + self.observation_space = batch_space(observation_space, n=num_envs) + self.action_space = batch_space(action_space, n=num_envs) + + self.closed = False + self.viewer = None + + # The observation and action spaces of a single environment are + # kept in separate properties + self.single_observation_space = observation_space + self.single_action_space = action_space + + def reset_async( + self, + seed: Optional[Union[int, List[int]]] = None, + options: Optional[dict] = None, + ): + """Reset the sub-environments asynchronously. + + This method will return ``None``. A call to :meth:`reset_async` should be followed + by a call to :meth:`reset_wait` to retrieve the results. + + Args: + seed: The reset seed + options: Reset options + """ + pass + + def reset_wait( + self, + seed: Optional[Union[int, List[int]]] = None, + options: Optional[dict] = None, + ): + """Retrieves the results of a :meth:`reset_async` call. + + A call to this method must always be preceded by a call to :meth:`reset_async`. + + Args: + seed: The reset seed + options: Reset options + + Returns: + The results from :meth:`reset_async` + + Raises: + NotImplementedError: VectorEnv does not implement function + """ + raise NotImplementedError("VectorEnv does not implement function") + + def reset( + self, + *, + seed: Optional[Union[int, List[int]]] = None, + options: Optional[dict] = None, + ): + """Reset all parallel environments and return a batch of initial observations. + + Args: + seed: The environment reset seeds + options: If to return the options + + Returns: + A batch of observations from the vectorized environment. + """ + self.reset_async(seed=seed, options=options) + return self.reset_wait(seed=seed, options=options) + + def step_async(self, actions): + """Asynchronously performs steps in the sub-environments. + + The results can be retrieved via a call to :meth:`step_wait`. + + Args: + actions: The actions to take asynchronously + """ + + def step_wait(self, **kwargs): + """Retrieves the results of a :meth:`step_async` call. + + A call to this method must always be preceded by a call to :meth:`step_async`. + + Args: + **kwargs: Additional keywords for vector implementation + + Returns: + The results from the :meth:`step_async` call + """ + + def step(self, actions): + """Take an action for each parallel environment. + + Args: + actions: element of :attr:`action_space` Batch of actions. + + Returns: + Batch of (observations, rewards, terminated, truncated, infos) or (observations, rewards, dones, infos) + """ + self.step_async(actions) + return self.step_wait() + + def call_async(self, name, *args, **kwargs): + """Calls a method name for each parallel environment asynchronously.""" + + def call_wait(self, **kwargs) -> List[Any]: # type: ignore + """After calling a method in :meth:`call_async`, this function collects the results.""" + + def call(self, name: str, *args, **kwargs) -> List[Any]: + """Call a method, or get a property, from each parallel environment. + + Args: + name (str): Name of the method or property to call. + *args: Arguments to apply to the method call. + **kwargs: Keyword arguments to apply to the method call. + + Returns: + List of the results of the individual calls to the method or property for each environment. + """ + self.call_async(name, *args, **kwargs) + return self.call_wait() + + def get_attr(self, name: str): + """Get a property from each parallel environment. + + Args: + name (str): Name of the property to be get from each individual environment. + + Returns: + The property with name + """ + return self.call(name) + + def set_attr(self, name: str, values: Union[list, tuple, object]): + """Set a property in each sub-environment. + + Args: + name (str): Name of the property to be set in each individual environment. + values (list, tuple, or object): Values of the property to be set to. If `values` is a list or + tuple, then it corresponds to the values for each individual environment, otherwise a single value + is set for all environments. + """ + + def close_extras(self, **kwargs): + """Clean up the extra resources e.g. beyond what's in this base class.""" + pass + + def close(self, **kwargs): + """Close all parallel environments and release resources. + + It also closes all the existing image viewers, then calls :meth:`close_extras` and set + :attr:`closed` as ``True``. + + Warnings: + This function itself does not close the environments, it should be handled + in :meth:`close_extras`. This is generic for both synchronous and asynchronous + vectorized environments. + + Notes: + This will be automatically called when garbage collected or program exited. + + Args: + **kwargs: Keyword arguments passed to :meth:`close_extras` + """ + if self.closed: + return + if self.viewer is not None: + self.viewer.close() + self.close_extras(**kwargs) + self.closed = True + + def _add_info(self, infos: dict, info: dict, env_num: int) -> dict: + """Add env info to the info dictionary of the vectorized environment. + + Given the `info` of a single environment add it to the `infos` dictionary + which represents all the infos of the vectorized environment. + Every `key` of `info` is paired with a boolean mask `_key` representing + whether or not the i-indexed environment has this `info`. + + Args: + infos (dict): the infos of the vectorized environment + info (dict): the info coming from the single environment + env_num (int): the index of the single environment + + Returns: + infos (dict): the (updated) infos of the vectorized environment + + """ + for k in info.keys(): + if k not in infos: + info_array, array_mask = self._init_info_arrays(type(info[k])) + else: + info_array, array_mask = infos[k], infos[f"_{k}"] + + info_array[env_num], array_mask[env_num] = info[k], True + infos[k], infos[f"_{k}"] = info_array, array_mask + return infos + + def _init_info_arrays(self, dtype: type) -> Tuple[np.ndarray, np.ndarray]: + """Initialize the info array. + + Initialize the info array. If the dtype is numeric + the info array will have the same dtype, otherwise + will be an array of `None`. Also, a boolean array + of the same length is returned. It will be used for + assessing which environment has info data. + + Args: + dtype (type): data type of the info coming from the env. + + Returns: + array (np.ndarray): the initialized info array. + array_mask (np.ndarray): the initialized boolean array. + + """ + if dtype in [int, float, bool] or issubclass(dtype, np.number): + array = np.zeros(self.num_envs, dtype=dtype) + else: + array = np.zeros(self.num_envs, dtype=object) + array[:] = None + array_mask = np.zeros(self.num_envs, dtype=bool) + return array, array_mask + + def __del__(self): + """Closes the vector environment.""" + if not getattr(self, "closed", True): + self.close() + + def __repr__(self) -> str: + """Returns a string representation of the vector environment. + + Returns: + A string containing the class name, number of environments and environment spec id + """ + if self.spec is None: + return f"{self.__class__.__name__}({self.num_envs})" + else: + return f"{self.__class__.__name__}({self.spec.id}, {self.num_envs})" + + +class VectorEnvWrapper(VectorEnv): + """Wraps the vectorized environment to allow a modular transformation. + + This class is the base class for all wrappers for vectorized environments. The subclass + could override some methods to change the behavior of the original vectorized environment + without touching the original code. + + Notes: + Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`. + """ + + def __init__(self, env: VectorEnv): + assert isinstance(env, VectorEnv) + self.env = env + + # explicitly forward the methods defined in VectorEnv + # to self.env (instead of the base class) + def reset_async(self, **kwargs): + return self.env.reset_async(**kwargs) + + def reset_wait(self, **kwargs): + return self.env.reset_wait(**kwargs) + + def step_async(self, actions): + return self.env.step_async(actions) + + def step_wait(self): + return self.env.step_wait() + + def close(self, **kwargs): + return self.env.close(**kwargs) + + def close_extras(self, **kwargs): + return self.env.close_extras(**kwargs) + + def call(self, name, *args, **kwargs): + return self.env.call(name, *args, **kwargs) + + def set_attr(self, name, values): + return self.env.set_attr(name, values) + + # implicitly forward all other methods and attributes to self.env + def __getattr__(self, name): + if name.startswith("_"): + raise AttributeError(f"attempted to get missing private attribute '{name}'") + return getattr(self.env, name) + + @property + def unwrapped(self): + return self.env.unwrapped + + def __repr__(self): + return f"<{self.__class__.__name__}, {self.env}>" + + def __del__(self): + self.env.__del__() diff --git a/MLPY/Lib/site-packages/gym/version.py b/MLPY/Lib/site-packages/gym/version.py new file mode 100644 index 0000000000000000000000000000000000000000..6d91b3944476bbd1da42f8801fbeec295f430d1e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/version.py @@ -0,0 +1 @@ +VERSION = "0.26.2" diff --git a/MLPY/Lib/site-packages/gym/wrappers/__init__.py b/MLPY/Lib/site-packages/gym/wrappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf2de5307786777b3ce639753eec39038e6879f --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/__init__.py @@ -0,0 +1,23 @@ +"""Module of wrapper classes.""" +from gym import error +from gym.wrappers.atari_preprocessing import AtariPreprocessing +from gym.wrappers.autoreset import AutoResetWrapper +from gym.wrappers.clip_action import ClipAction +from gym.wrappers.filter_observation import FilterObservation +from gym.wrappers.flatten_observation import FlattenObservation +from gym.wrappers.frame_stack import FrameStack, LazyFrames +from gym.wrappers.gray_scale_observation import GrayScaleObservation +from gym.wrappers.human_rendering import HumanRendering +from gym.wrappers.normalize import NormalizeObservation, NormalizeReward +from gym.wrappers.order_enforcing import OrderEnforcing +from gym.wrappers.record_episode_statistics import RecordEpisodeStatistics +from gym.wrappers.record_video import RecordVideo, capped_cubic_video_schedule +from gym.wrappers.render_collection import RenderCollection +from gym.wrappers.rescale_action import RescaleAction +from gym.wrappers.resize_observation import ResizeObservation +from gym.wrappers.step_api_compatibility import StepAPICompatibility +from gym.wrappers.time_aware_observation import TimeAwareObservation +from gym.wrappers.time_limit import TimeLimit +from gym.wrappers.transform_observation import TransformObservation +from gym.wrappers.transform_reward import TransformReward +from gym.wrappers.vector_list_info import VectorListInfo diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2822db5acbbf685ff2944e14fa0bc2e0c3f9bd7d Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/atari_preprocessing.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/atari_preprocessing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa2826f82991a27d39d6502c19a7be69485dc04 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/atari_preprocessing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/autoreset.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/autoreset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77865e63a7c838d729f82b09e6e5088c45d81fc0 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/autoreset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/clip_action.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/clip_action.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7c50af7647f53466bdaf39d46a6a16cba833404 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/clip_action.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/compatibility.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/compatibility.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90bf2298eeaa91f7d1ce95db6786aef4dc1fd294 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/compatibility.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/env_checker.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/env_checker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..126000e87e4a32876e7fd0f51482da2e860352be Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/env_checker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/filter_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/filter_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e55ec4774950acbea45cee981a1b68f89ae5cf16 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/filter_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/flatten_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/flatten_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5cd35924d68b7320b9723a1a6a0f33997101d9 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/flatten_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/frame_stack.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/frame_stack.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83aec4967ec7e45db1b8beb688436bd5196828dc Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/frame_stack.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/gray_scale_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/gray_scale_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..096eceb2cde93d8d5bd83c6cf42d49fc7c24ed30 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/gray_scale_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/human_rendering.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/human_rendering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ed6629d85cddee464ff2526c7357628fd068c39 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/human_rendering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/normalize.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/normalize.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b45417003ceebc3d957e929527e1bd5ad2a65b04 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/normalize.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/order_enforcing.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/order_enforcing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa2329f5a63e8942c57c4cfcf1acbcc1f7bf6db Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/order_enforcing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/pixel_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/pixel_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7ccb2d403ddd3b493924d3667901056c93179cd Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/pixel_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_episode_statistics.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_episode_statistics.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c13789ee6648561356aa449aeec84cbe6d53af Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_episode_statistics.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_video.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_video.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27a2ba03f675e8220b1c2d77c3a46aff268cc5f0 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/record_video.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/render_collection.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/render_collection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a25b90ad4a6b1b58f770a4c8c0eff25c05df39e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/render_collection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/rescale_action.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/rescale_action.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d546adc5473b08624e31e639b623c7981725a4e Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/rescale_action.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/resize_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/resize_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bc1739791089e61acd088b2135f12402dcec44b Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/resize_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/step_api_compatibility.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/step_api_compatibility.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa1ad80ddb1705001f69e7f6512c54be042736cd Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/step_api_compatibility.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_aware_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_aware_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e87911d5e19d01daf2535b563041ff914945e3 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_aware_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_limit.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_limit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4695596b843bea2942aefb6966a870bbc9697b2 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/time_limit.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_observation.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_observation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f54ca513285b4e407069c92222e99897f92e8635 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_observation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_reward.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_reward.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f13c294d5913eabf13ed42075dfcabc41cb7ecd6 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/transform_reward.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/__pycache__/vector_list_info.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/vector_list_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc68d338b62206934f9aa33b3c5549386f03a58c Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/__pycache__/vector_list_info.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/atari_preprocessing.py b/MLPY/Lib/site-packages/gym/wrappers/atari_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..ce445dd26978a4ef29dc521e2fa52cc8a8e53773 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/atari_preprocessing.py @@ -0,0 +1,190 @@ +"""Implementation of Atari 2600 Preprocessing following the guidelines of Machado et al., 2018.""" +import numpy as np + +import gym +from gym.spaces import Box + +try: + import cv2 +except ImportError: + cv2 = None + + +class AtariPreprocessing(gym.Wrapper): + """Atari 2600 preprocessing wrapper. + + This class follows the guidelines in Machado et al. (2018), + "Revisiting the Arcade Learning Environment: Evaluation Protocols and Open Problems for General Agents". + + Specifically, the following preprocess stages applies to the atari environment: + - Noop Reset: Obtains the initial state by taking a random number of no-ops on reset, default max 30 no-ops. + - Frame skipping: The number of frames skipped between steps, 4 by default + - Max-pooling: Pools over the most recent two observations from the frame skips + - Termination signal when a life is lost: When the agent losses a life during the environment, then the environment is terminated. + Turned off by default. Not recommended by Machado et al. (2018). + - Resize to a square image: Resizes the atari environment original observation shape from 210x180 to 84x84 by default + - Grayscale observation: If the observation is colour or greyscale, by default, greyscale. + - Scale observation: If to scale the observation between [0, 1) or [0, 255), by default, not scaled. + """ + + def __init__( + self, + env: gym.Env, + noop_max: int = 30, + frame_skip: int = 4, + screen_size: int = 84, + terminal_on_life_loss: bool = False, + grayscale_obs: bool = True, + grayscale_newaxis: bool = False, + scale_obs: bool = False, + ): + """Wrapper for Atari 2600 preprocessing. + + Args: + env (Env): The environment to apply the preprocessing + noop_max (int): For No-op reset, the max number no-ops actions are taken at reset, to turn off, set to 0. + frame_skip (int): The number of frames between new observation the agents observations effecting the frequency at which the agent experiences the game. + screen_size (int): resize Atari frame + terminal_on_life_loss (bool): `if True`, then :meth:`step()` returns `terminated=True` whenever a + life is lost. + grayscale_obs (bool): if True, then gray scale observation is returned, otherwise, RGB observation + is returned. + grayscale_newaxis (bool): `if True and grayscale_obs=True`, then a channel axis is added to + grayscale observations to make them 3-dimensional. + scale_obs (bool): if True, then observation normalized in range [0,1) is returned. It also limits memory + optimization benefits of FrameStack Wrapper. + + Raises: + DependencyNotInstalled: opencv-python package not installed + ValueError: Disable frame-skipping in the original env + """ + super().__init__(env) + if cv2 is None: + raise gym.error.DependencyNotInstalled( + "opencv-python package not installed, run `pip install gym[other]` to get dependencies for atari" + ) + assert frame_skip > 0 + assert screen_size > 0 + assert noop_max >= 0 + if frame_skip > 1: + if ( + "NoFrameskip" not in env.spec.id + and getattr(env.unwrapped, "_frameskip", None) != 1 + ): + raise ValueError( + "Disable frame-skipping in the original env. Otherwise, more than one " + "frame-skip will happen as through this wrapper" + ) + self.noop_max = noop_max + assert env.unwrapped.get_action_meanings()[0] == "NOOP" + + self.frame_skip = frame_skip + self.screen_size = screen_size + self.terminal_on_life_loss = terminal_on_life_loss + self.grayscale_obs = grayscale_obs + self.grayscale_newaxis = grayscale_newaxis + self.scale_obs = scale_obs + + # buffer of most recent two observations for max pooling + assert isinstance(env.observation_space, Box) + if grayscale_obs: + self.obs_buffer = [ + np.empty(env.observation_space.shape[:2], dtype=np.uint8), + np.empty(env.observation_space.shape[:2], dtype=np.uint8), + ] + else: + self.obs_buffer = [ + np.empty(env.observation_space.shape, dtype=np.uint8), + np.empty(env.observation_space.shape, dtype=np.uint8), + ] + + self.lives = 0 + self.game_over = False + + _low, _high, _obs_dtype = ( + (0, 255, np.uint8) if not scale_obs else (0, 1, np.float32) + ) + _shape = (screen_size, screen_size, 1 if grayscale_obs else 3) + if grayscale_obs and not grayscale_newaxis: + _shape = _shape[:-1] # Remove channel axis + self.observation_space = Box( + low=_low, high=_high, shape=_shape, dtype=_obs_dtype + ) + + @property + def ale(self): + """Make ale as a class property to avoid serialization error.""" + return self.env.unwrapped.ale + + def step(self, action): + """Applies the preprocessing for an :meth:`env.step`.""" + total_reward, terminated, truncated, info = 0.0, False, False, {} + + for t in range(self.frame_skip): + _, reward, terminated, truncated, info = self.env.step(action) + total_reward += reward + self.game_over = terminated + + if self.terminal_on_life_loss: + new_lives = self.ale.lives() + terminated = terminated or new_lives < self.lives + self.game_over = terminated + self.lives = new_lives + + if terminated or truncated: + break + if t == self.frame_skip - 2: + if self.grayscale_obs: + self.ale.getScreenGrayscale(self.obs_buffer[1]) + else: + self.ale.getScreenRGB(self.obs_buffer[1]) + elif t == self.frame_skip - 1: + if self.grayscale_obs: + self.ale.getScreenGrayscale(self.obs_buffer[0]) + else: + self.ale.getScreenRGB(self.obs_buffer[0]) + return self._get_obs(), total_reward, terminated, truncated, info + + def reset(self, **kwargs): + """Resets the environment using preprocessing.""" + # NoopReset + _, reset_info = self.env.reset(**kwargs) + + noops = ( + self.env.unwrapped.np_random.integers(1, self.noop_max + 1) + if self.noop_max > 0 + else 0 + ) + for _ in range(noops): + _, _, terminated, truncated, step_info = self.env.step(0) + reset_info.update(step_info) + if terminated or truncated: + _, reset_info = self.env.reset(**kwargs) + + self.lives = self.ale.lives() + if self.grayscale_obs: + self.ale.getScreenGrayscale(self.obs_buffer[0]) + else: + self.ale.getScreenRGB(self.obs_buffer[0]) + self.obs_buffer[1].fill(0) + + return self._get_obs(), reset_info + + def _get_obs(self): + if self.frame_skip > 1: # more efficient in-place pooling + np.maximum(self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0]) + assert cv2 is not None + obs = cv2.resize( + self.obs_buffer[0], + (self.screen_size, self.screen_size), + interpolation=cv2.INTER_AREA, + ) + + if self.scale_obs: + obs = np.asarray(obs, dtype=np.float32) / 255.0 + else: + obs = np.asarray(obs, dtype=np.uint8) + + if self.grayscale_obs and self.grayscale_newaxis: + obs = np.expand_dims(obs, axis=-1) # Add a channel axis + return obs diff --git a/MLPY/Lib/site-packages/gym/wrappers/autoreset.py b/MLPY/Lib/site-packages/gym/wrappers/autoreset.py new file mode 100644 index 0000000000000000000000000000000000000000..17646abfea095573553c1905b5cbb874893b5d57 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/autoreset.py @@ -0,0 +1,61 @@ +"""Wrapper that autoreset environments when `terminated=True` or `truncated=True`.""" +import gym + + +class AutoResetWrapper(gym.Wrapper): + """A class for providing an automatic reset functionality for gym environments when calling :meth:`self.step`. + + When calling step causes :meth:`Env.step` to return `terminated=True` or `truncated=True`, :meth:`Env.reset` is called, + and the return format of :meth:`self.step` is as follows: ``(new_obs, final_reward, final_terminated, final_truncated, info)`` + with new step API and ``(new_obs, final_reward, final_done, info)`` with the old step API. + - ``new_obs`` is the first observation after calling :meth:`self.env.reset` + - ``final_reward`` is the reward after calling :meth:`self.env.step`, prior to calling :meth:`self.env.reset`. + - ``final_terminated`` is the terminated value before calling :meth:`self.env.reset`. + - ``final_truncated`` is the truncated value before calling :meth:`self.env.reset`. Both `final_terminated` and `final_truncated` cannot be False. + - ``info`` is a dict containing all the keys from the info dict returned by the call to :meth:`self.env.reset`, + with an additional key "final_observation" containing the observation returned by the last call to :meth:`self.env.step` + and "final_info" containing the info dict returned by the last call to :meth:`self.env.step`. + + Warning: When using this wrapper to collect rollouts, note that when :meth:`Env.step` returns `terminated` or `truncated`, a + new observation from after calling :meth:`Env.reset` is returned by :meth:`Env.step` alongside the + final reward, terminated and truncated state from the previous episode. + If you need the final state from the previous episode, you need to retrieve it via the + "final_observation" key in the info dict. + Make sure you know what you're doing if you use this wrapper! + """ + + def __init__(self, env: gym.Env): + """A class for providing an automatic reset functionality for gym environments when calling :meth:`self.step`. + + Args: + env (gym.Env): The environment to apply the wrapper + """ + super().__init__(env) + + def step(self, action): + """Steps through the environment with action and resets the environment if a terminated or truncated signal is encountered. + + Args: + action: The action to take + + Returns: + The autoreset environment :meth:`step` + """ + obs, reward, terminated, truncated, info = self.env.step(action) + if terminated or truncated: + + new_obs, new_info = self.env.reset() + assert ( + "final_observation" not in new_info + ), 'info dict cannot contain key "final_observation" ' + assert ( + "final_info" not in new_info + ), 'info dict cannot contain key "final_info" ' + + new_info["final_observation"] = obs + new_info["final_info"] = info + + obs = new_obs + info = new_info + + return obs, reward, terminated, truncated, info diff --git a/MLPY/Lib/site-packages/gym/wrappers/clip_action.py b/MLPY/Lib/site-packages/gym/wrappers/clip_action.py new file mode 100644 index 0000000000000000000000000000000000000000..de2363847685e564e088452b842ce462cedbc56e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/clip_action.py @@ -0,0 +1,40 @@ +"""Wrapper for clipping actions within a valid bound.""" +import numpy as np + +import gym +from gym import ActionWrapper +from gym.spaces import Box + + +class ClipAction(ActionWrapper): + """Clip the continuous action within the valid :class:`Box` observation space bound. + + Example: + >>> import gym + >>> env = gym.make('Bipedal-Walker-v3') + >>> env = ClipAction(env) + >>> env.action_space + Box(-1.0, 1.0, (4,), float32) + >>> env.step(np.array([5.0, 2.0, -10.0, 0.0])) + # Executes the action np.array([1.0, 1.0, -1.0, 0]) in the base environment + """ + + def __init__(self, env: gym.Env): + """A wrapper for clipping continuous actions within the valid bound. + + Args: + env: The environment to apply the wrapper + """ + assert isinstance(env.action_space, Box) + super().__init__(env) + + def action(self, action): + """Clips the action within the valid bounds. + + Args: + action: The action to clip + + Returns: + The clipped action + """ + return np.clip(action, self.action_space.low, self.action_space.high) diff --git a/MLPY/Lib/site-packages/gym/wrappers/compatibility.py b/MLPY/Lib/site-packages/gym/wrappers/compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..4c0886139e542fcba77e39e29ddf421e0d67a2e6 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/compatibility.py @@ -0,0 +1,130 @@ +"""A compatibility wrapper converting an old-style environment into a valid environment.""" +import sys +from typing import Any, Dict, Optional, Tuple + +import gym +from gym.core import ObsType +from gym.utils.step_api_compatibility import convert_to_terminated_truncated_step_api + +if sys.version_info >= (3, 8): + from typing import Protocol, runtime_checkable +elif sys.version_info >= (3, 7): + from typing_extensions import Protocol, runtime_checkable +else: + Protocol = object + runtime_checkable = lambda x: x # noqa: E731 + + +@runtime_checkable +class LegacyEnv(Protocol): + """A protocol for environments using the old step API.""" + + observation_space: gym.Space + action_space: gym.Space + + def reset(self) -> Any: + """Reset the environment and return the initial observation.""" + ... + + def step(self, action: Any) -> Tuple[Any, float, bool, Dict]: + """Run one timestep of the environment's dynamics.""" + ... + + def render(self, mode: Optional[str] = "human") -> Any: + """Render the environment.""" + ... + + def close(self): + """Close the environment.""" + ... + + def seed(self, seed: Optional[int] = None): + """Set the seed for this env's random number generator(s).""" + ... + + +class EnvCompatibility(gym.Env): + r"""A wrapper which can transform an environment from the old API to the new API. + + Old step API refers to step() method returning (observation, reward, done, info), and reset() only retuning the observation. + New step API refers to step() method returning (observation, reward, terminated, truncated, info) and reset() returning (observation, info). + (Refer to docs for details on the API change) + + Known limitations: + - Environments that use `self.np_random` might not work as expected. + """ + + def __init__(self, old_env: LegacyEnv, render_mode: Optional[str] = None): + """A wrapper which converts old-style envs to valid modern envs. + + Some information may be lost in the conversion, so we recommend updating your environment. + + Args: + old_env (LegacyEnv): the env to wrap, implemented with the old API + render_mode (str): the render mode to use when rendering the environment, passed automatically to env.render + """ + self.metadata = getattr(old_env, "metadata", {"render_modes": []}) + self.render_mode = render_mode + self.reward_range = getattr(old_env, "reward_range", None) + self.spec = getattr(old_env, "spec", None) + self.env = old_env + + self.observation_space = old_env.observation_space + self.action_space = old_env.action_space + + def reset( + self, seed: Optional[int] = None, options: Optional[dict] = None + ) -> Tuple[ObsType, dict]: + """Resets the environment. + + Args: + seed: the seed to reset the environment with + options: the options to reset the environment with + + Returns: + (observation, info) + """ + if seed is not None: + self.env.seed(seed) + # Options are ignored + + if self.render_mode == "human": + self.render() + + return self.env.reset(), {} + + def step(self, action: Any) -> Tuple[Any, float, bool, bool, Dict]: + """Steps through the environment. + + Args: + action: action to step through the environment with + + Returns: + (observation, reward, terminated, truncated, info) + """ + obs, reward, done, info = self.env.step(action) + + if self.render_mode == "human": + self.render() + + return convert_to_terminated_truncated_step_api((obs, reward, done, info)) + + def render(self) -> Any: + """Renders the environment. + + Returns: + The rendering of the environment, depending on the render mode + """ + return self.env.render(mode=self.render_mode) + + def close(self): + """Closes the environment.""" + self.env.close() + + def __str__(self): + """Returns the wrapper name and the unwrapped environment string.""" + return f"<{type(self).__name__}{self.env}>" + + def __repr__(self): + """Returns the string representation of the wrapper.""" + return str(self) diff --git a/MLPY/Lib/site-packages/gym/wrappers/env_checker.py b/MLPY/Lib/site-packages/gym/wrappers/env_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..4c310403b7df987d13cbc3a622f86a7f9d4efcf3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/env_checker.py @@ -0,0 +1,55 @@ +"""A passive environment checker wrapper for an environment's observation and action space along with the reset, step and render functions.""" +import gym +from gym.core import ActType +from gym.utils.passive_env_checker import ( + check_action_space, + check_observation_space, + env_render_passive_checker, + env_reset_passive_checker, + env_step_passive_checker, +) + + +class PassiveEnvChecker(gym.Wrapper): + """A passive environment checker wrapper that surrounds the step, reset and render functions to check they follow the gym API.""" + + def __init__(self, env): + """Initialises the wrapper with the environments, run the observation and action space tests.""" + super().__init__(env) + + assert hasattr( + env, "action_space" + ), "The environment must specify an action space. https://www.gymlibrary.dev/content/environment_creation/" + check_action_space(env.action_space) + assert hasattr( + env, "observation_space" + ), "The environment must specify an observation space. https://www.gymlibrary.dev/content/environment_creation/" + check_observation_space(env.observation_space) + + self.checked_reset = False + self.checked_step = False + self.checked_render = False + + def step(self, action: ActType): + """Steps through the environment that on the first call will run the `passive_env_step_check`.""" + if self.checked_step is False: + self.checked_step = True + return env_step_passive_checker(self.env, action) + else: + return self.env.step(action) + + def reset(self, **kwargs): + """Resets the environment that on the first call will run the `passive_env_reset_check`.""" + if self.checked_reset is False: + self.checked_reset = True + return env_reset_passive_checker(self.env, **kwargs) + else: + return self.env.reset(**kwargs) + + def render(self, *args, **kwargs): + """Renders the environment that on the first call will run the `passive_env_render_check`.""" + if self.checked_render is False: + self.checked_render = True + return env_render_passive_checker(self.env, *args, **kwargs) + else: + return self.env.render(*args, **kwargs) diff --git a/MLPY/Lib/site-packages/gym/wrappers/filter_observation.py b/MLPY/Lib/site-packages/gym/wrappers/filter_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..922c8288038fe555b7041a7ad222a94822e78e59 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/filter_observation.py @@ -0,0 +1,91 @@ +"""A wrapper for filtering dictionary observations by their keys.""" +import copy +from typing import Sequence + +import gym +from gym import spaces + + +class FilterObservation(gym.ObservationWrapper): + """Filter Dict observation space by the keys. + + Example: + >>> import gym + >>> env = gym.wrappers.TransformObservation( + ... gym.make('CartPole-v1'), lambda obs: {'obs': obs, 'time': 0} + ... ) + >>> env.observation_space = gym.spaces.Dict(obs=env.observation_space, time=gym.spaces.Discrete(1)) + >>> env.reset() + {'obs': array([-0.00067088, -0.01860439, 0.04772898, -0.01911527], dtype=float32), 'time': 0} + >>> env = FilterObservation(env, filter_keys=['time']) + >>> env.reset() + {'obs': array([ 0.04560107, 0.04466959, -0.0328232 , -0.02367178], dtype=float32)} + >>> env.step(0) + ({'obs': array([ 0.04649447, -0.14996664, -0.03329664, 0.25847703], dtype=float32)}, 1.0, False, {}) + """ + + def __init__(self, env: gym.Env, filter_keys: Sequence[str] = None): + """A wrapper that filters dictionary observations by their keys. + + Args: + env: The environment to apply the wrapper + filter_keys: List of keys to be included in the observations. If ``None``, observations will not be filtered and this wrapper has no effect + + Raises: + ValueError: If the environment's observation space is not :class:`spaces.Dict` + ValueError: If any of the `filter_keys` are not included in the original `env`'s observation space + """ + super().__init__(env) + + wrapped_observation_space = env.observation_space + if not isinstance(wrapped_observation_space, spaces.Dict): + raise ValueError( + f"FilterObservationWrapper is only usable with dict observations, " + f"environment observation space is {type(wrapped_observation_space)}" + ) + + observation_keys = wrapped_observation_space.spaces.keys() + if filter_keys is None: + filter_keys = tuple(observation_keys) + + missing_keys = {key for key in filter_keys if key not in observation_keys} + if missing_keys: + raise ValueError( + "All the filter_keys must be included in the original observation space.\n" + f"Filter keys: {filter_keys}\n" + f"Observation keys: {observation_keys}\n" + f"Missing keys: {missing_keys}" + ) + + self.observation_space = type(wrapped_observation_space)( + [ + (name, copy.deepcopy(space)) + for name, space in wrapped_observation_space.spaces.items() + if name in filter_keys + ] + ) + + self._env = env + self._filter_keys = tuple(filter_keys) + + def observation(self, observation): + """Filters the observations. + + Args: + observation: The observation to filter + + Returns: + The filtered observations + """ + filter_observation = self._filter_observation(observation) + return filter_observation + + def _filter_observation(self, observation): + observation = type(observation)( + [ + (name, value) + for name, value in observation.items() + if name in self._filter_keys + ] + ) + return observation diff --git a/MLPY/Lib/site-packages/gym/wrappers/flatten_observation.py b/MLPY/Lib/site-packages/gym/wrappers/flatten_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6518b875b75c7dfcced6ed7c7340cb27f7d07e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/flatten_observation.py @@ -0,0 +1,40 @@ +"""Wrapper for flattening observations of an environment.""" +import gym +import gym.spaces as spaces + + +class FlattenObservation(gym.ObservationWrapper): + """Observation wrapper that flattens the observation. + + Example: + >>> import gym + >>> env = gym.make('CarRacing-v1') + >>> env.observation_space.shape + (96, 96, 3) + >>> env = FlattenObservation(env) + >>> env.observation_space.shape + (27648,) + >>> obs = env.reset() + >>> obs.shape + (27648,) + """ + + def __init__(self, env: gym.Env): + """Flattens the observations of an environment. + + Args: + env: The environment to apply the wrapper + """ + super().__init__(env) + self.observation_space = spaces.flatten_space(env.observation_space) + + def observation(self, observation): + """Flattens an observation. + + Args: + observation: The observation to flatten + + Returns: + The flattened observation + """ + return spaces.flatten(self.env.observation_space, observation) diff --git a/MLPY/Lib/site-packages/gym/wrappers/frame_stack.py b/MLPY/Lib/site-packages/gym/wrappers/frame_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..d55ae998c7bed46b64e6d212aa9cc409bef1f59e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/frame_stack.py @@ -0,0 +1,190 @@ +"""Wrapper that stacks frames.""" +from collections import deque +from typing import Union + +import numpy as np + +import gym +from gym.error import DependencyNotInstalled +from gym.spaces import Box + + +class LazyFrames: + """Ensures common frames are only stored once to optimize memory use. + + To further reduce the memory use, it is optionally to turn on lz4 to compress the observations. + + Note: + This object should only be converted to numpy array just before forward pass. + """ + + __slots__ = ("frame_shape", "dtype", "shape", "lz4_compress", "_frames") + + def __init__(self, frames: list, lz4_compress: bool = False): + """Lazyframe for a set of frames and if to apply lz4. + + Args: + frames (list): The frames to convert to lazy frames + lz4_compress (bool): Use lz4 to compress the frames internally + + Raises: + DependencyNotInstalled: lz4 is not installed + """ + self.frame_shape = tuple(frames[0].shape) + self.shape = (len(frames),) + self.frame_shape + self.dtype = frames[0].dtype + if lz4_compress: + try: + from lz4.block import compress + except ImportError: + raise DependencyNotInstalled( + "lz4 is not installed, run `pip install gym[other]`" + ) + + frames = [compress(frame) for frame in frames] + self._frames = frames + self.lz4_compress = lz4_compress + + def __array__(self, dtype=None): + """Gets a numpy array of stacked frames with specific dtype. + + Args: + dtype: The dtype of the stacked frames + + Returns: + The array of stacked frames with dtype + """ + arr = self[:] + if dtype is not None: + return arr.astype(dtype) + return arr + + def __len__(self): + """Returns the number of frame stacks. + + Returns: + The number of frame stacks + """ + return self.shape[0] + + def __getitem__(self, int_or_slice: Union[int, slice]): + """Gets the stacked frames for a particular index or slice. + + Args: + int_or_slice: Index or slice to get items for + + Returns: + np.stacked frames for the int or slice + + """ + if isinstance(int_or_slice, int): + return self._check_decompress(self._frames[int_or_slice]) # single frame + return np.stack( + [self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0 + ) + + def __eq__(self, other): + """Checks that the current frames are equal to the other object.""" + return self.__array__() == other + + def _check_decompress(self, frame): + if self.lz4_compress: + from lz4.block import decompress + + return np.frombuffer(decompress(frame), dtype=self.dtype).reshape( + self.frame_shape + ) + return frame + + +class FrameStack(gym.ObservationWrapper): + """Observation wrapper that stacks the observations in a rolling manner. + + For example, if the number of stacks is 4, then the returned observation contains + the most recent 4 observations. For environment 'Pendulum-v1', the original observation + is an array with shape [3], so if we stack 4 observations, the processed observation + has shape [4, 3]. + + Note: + - To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`. + - The observation space must be :class:`Box` type. If one uses :class:`Dict` + as observation space, it should apply :class:`FlattenObservation` wrapper first. + - After :meth:`reset` is called, the frame buffer will be filled with the initial observation. I.e. the observation returned by :meth:`reset` will consist of ``num_stack`-many identical frames, + + Example: + >>> import gym + >>> env = gym.make('CarRacing-v1') + >>> env = FrameStack(env, 4) + >>> env.observation_space + Box(4, 96, 96, 3) + >>> obs = env.reset() + >>> obs.shape + (4, 96, 96, 3) + """ + + def __init__( + self, + env: gym.Env, + num_stack: int, + lz4_compress: bool = False, + ): + """Observation wrapper that stacks the observations in a rolling manner. + + Args: + env (Env): The environment to apply the wrapper + num_stack (int): The number of frames to stack + lz4_compress (bool): Use lz4 to compress the frames internally + """ + super().__init__(env) + self.num_stack = num_stack + self.lz4_compress = lz4_compress + + self.frames = deque(maxlen=num_stack) + + low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0) + high = np.repeat( + self.observation_space.high[np.newaxis, ...], num_stack, axis=0 + ) + self.observation_space = Box( + low=low, high=high, dtype=self.observation_space.dtype + ) + + def observation(self, observation): + """Converts the wrappers current frames to lazy frames. + + Args: + observation: Ignored + + Returns: + :class:`LazyFrames` object for the wrapper's frame buffer, :attr:`self.frames` + """ + assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack) + return LazyFrames(list(self.frames), self.lz4_compress) + + def step(self, action): + """Steps through the environment, appending the observation to the frame buffer. + + Args: + action: The action to step through the environment with + + Returns: + Stacked observations, reward, terminated, truncated, and information from the environment + """ + observation, reward, terminated, truncated, info = self.env.step(action) + self.frames.append(observation) + return self.observation(None), reward, terminated, truncated, info + + def reset(self, **kwargs): + """Reset the environment with kwargs. + + Args: + **kwargs: The kwargs for the environment reset + + Returns: + The stacked observations + """ + obs, info = self.env.reset(**kwargs) + + [self.frames.append(obs) for _ in range(self.num_stack)] + + return self.observation(None), info diff --git a/MLPY/Lib/site-packages/gym/wrappers/gray_scale_observation.py b/MLPY/Lib/site-packages/gym/wrappers/gray_scale_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..1c626f41f4fb55b90c8683c9d788b57c3bad7674 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/gray_scale_observation.py @@ -0,0 +1,64 @@ +"""Wrapper that converts a color observation to grayscale.""" +import numpy as np + +import gym +from gym.spaces import Box + + +class GrayScaleObservation(gym.ObservationWrapper): + """Convert the image observation from RGB to gray scale. + + Example: + >>> env = gym.make('CarRacing-v1') + >>> env.observation_space + Box(0, 255, (96, 96, 3), uint8) + >>> env = GrayScaleObservation(gym.make('CarRacing-v1')) + >>> env.observation_space + Box(0, 255, (96, 96), uint8) + >>> env = GrayScaleObservation(gym.make('CarRacing-v1'), keep_dim=True) + >>> env.observation_space + Box(0, 255, (96, 96, 1), uint8) + """ + + def __init__(self, env: gym.Env, keep_dim: bool = False): + """Convert the image observation from RGB to gray scale. + + Args: + env (Env): The environment to apply the wrapper + keep_dim (bool): If `True`, a singleton dimension will be added, i.e. observations are of the shape AxBx1. + Otherwise, they are of shape AxB. + """ + super().__init__(env) + self.keep_dim = keep_dim + + assert ( + isinstance(self.observation_space, Box) + and len(self.observation_space.shape) == 3 + and self.observation_space.shape[-1] == 3 + ) + + obs_shape = self.observation_space.shape[:2] + if self.keep_dim: + self.observation_space = Box( + low=0, high=255, shape=(obs_shape[0], obs_shape[1], 1), dtype=np.uint8 + ) + else: + self.observation_space = Box( + low=0, high=255, shape=obs_shape, dtype=np.uint8 + ) + + def observation(self, observation): + """Converts the colour observation to greyscale. + + Args: + observation: Color observations + + Returns: + Grayscale observations + """ + import cv2 + + observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY) + if self.keep_dim: + observation = np.expand_dims(observation, -1) + return observation diff --git a/MLPY/Lib/site-packages/gym/wrappers/human_rendering.py b/MLPY/Lib/site-packages/gym/wrappers/human_rendering.py new file mode 100644 index 0000000000000000000000000000000000000000..f488e9d1b80ac87068223d2e9983021b47308200 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/human_rendering.py @@ -0,0 +1,132 @@ +"""A wrapper that adds human-renering functionality to an environment.""" +import numpy as np + +import gym +from gym.error import DependencyNotInstalled + + +class HumanRendering(gym.Wrapper): + """Performs human rendering for an environment that only supports "rgb_array"rendering. + + This wrapper is particularly useful when you have implemented an environment that can produce + RGB images but haven't implemented any code to render the images to the screen. + If you want to use this wrapper with your environments, remember to specify ``"render_fps"`` + in the metadata of your environment. + + The ``render_mode`` of the wrapped environment must be either ``'rgb_array'`` or ``'rgb_array_list'``. + + Example: + >>> env = gym.make("LunarLander-v2", render_mode="rgb_array") + >>> wrapped = HumanRendering(env) + >>> wrapped.reset() # This will start rendering to the screen + + The wrapper can also be applied directly when the environment is instantiated, simply by passing + ``render_mode="human"`` to ``make``. The wrapper will only be applied if the environment does not + implement human-rendering natively (i.e. ``render_mode`` does not contain ``"human"``). + + Example: + >>> env = gym.make("NoNativeRendering-v2", render_mode="human") # NoNativeRendering-v0 doesn't implement human-rendering natively + >>> env.reset() # This will start rendering to the screen + + Warning: If the base environment uses ``render_mode="rgb_array_list"``, its (i.e. the *base environment's*) render method + will always return an empty list: + + >>> env = gym.make("LunarLander-v2", render_mode="rgb_array_list") + >>> wrapped = HumanRendering(env) + >>> wrapped.reset() + >>> env.render() + [] # env.render() will always return an empty list! + + """ + + def __init__(self, env): + """Initialize a :class:`HumanRendering` instance. + + Args: + env: The environment that is being wrapped + """ + super().__init__(env) + assert env.render_mode in [ + "rgb_array", + "rgb_array_list", + ], f"Expected env.render_mode to be one of 'rgb_array' or 'rgb_array_list' but got '{env.render_mode}'" + assert ( + "render_fps" in env.metadata + ), "The base environment must specify 'render_fps' to be used with the HumanRendering wrapper" + + self.screen_size = None + self.window = None + self.clock = None + + @property + def render_mode(self): + """Always returns ``'human'``.""" + return "human" + + def step(self, *args, **kwargs): + """Perform a step in the base environment and render a frame to the screen.""" + result = self.env.step(*args, **kwargs) + self._render_frame() + return result + + def reset(self, *args, **kwargs): + """Reset the base environment and render a frame to the screen.""" + result = self.env.reset(*args, **kwargs) + self._render_frame() + return result + + def render(self): + """This method doesn't do much, actual rendering is performed in :meth:`step` and :meth:`reset`.""" + return None + + def _render_frame(self): + """Fetch the last frame from the base environment and render it to the screen.""" + try: + import pygame + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[box2d]`" + ) + if self.env.render_mode == "rgb_array_list": + last_rgb_array = self.env.render() + assert isinstance(last_rgb_array, list) + last_rgb_array = last_rgb_array[-1] + elif self.env.render_mode == "rgb_array": + last_rgb_array = self.env.render() + else: + raise Exception( + f"Wrapped environment must have mode 'rgb_array' or 'rgb_array_list', actual render mode: {self.env.render_mode}" + ) + assert isinstance(last_rgb_array, np.ndarray) + + rgb_array = np.transpose(last_rgb_array, axes=(1, 0, 2)) + + if self.screen_size is None: + self.screen_size = rgb_array.shape[:2] + + assert ( + self.screen_size == rgb_array.shape[:2] + ), f"The shape of the rgb array has changed from {self.screen_size} to {rgb_array.shape[:2]}" + + if self.window is None: + pygame.init() + pygame.display.init() + self.window = pygame.display.set_mode(self.screen_size) + + if self.clock is None: + self.clock = pygame.time.Clock() + + surf = pygame.surfarray.make_surface(rgb_array) + self.window.blit(surf, (0, 0)) + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + def close(self): + """Close the rendering window.""" + super().close() + if self.window is not None: + import pygame + + pygame.display.quit() + pygame.quit() diff --git a/MLPY/Lib/site-packages/gym/wrappers/monitoring/__init__.py b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..589c74c7e7fe4ea1467b82e4692dd4828b5f08c4 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__init__.py @@ -0,0 +1 @@ +"""Module for monitoring.video_recorder.""" diff --git a/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a57276db09aac69ced63581fe36317bdc21f9f8 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/video_recorder.cpython-39.pyc b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/video_recorder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ffba663f9f9564303b5b93ecf018232c1e47511 Binary files /dev/null and b/MLPY/Lib/site-packages/gym/wrappers/monitoring/__pycache__/video_recorder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym/wrappers/monitoring/video_recorder.py b/MLPY/Lib/site-packages/gym/wrappers/monitoring/video_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..9e34102de771aec1660062fd86a85b7a723c20d3 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/monitoring/video_recorder.py @@ -0,0 +1,178 @@ +"""A wrapper for video recording environments by rolling it out, frame by frame.""" +import json +import os +import os.path +import tempfile +from typing import List, Optional + +from gym import error, logger + + +class VideoRecorder: + """VideoRecorder renders a nice movie of a rollout, frame by frame. + + It comes with an ``enabled`` option, so you can still use the same code on episodes where you don't want to record video. + + Note: + You are responsible for calling :meth:`close` on a created VideoRecorder, or else you may leak an encoder process. + """ + + def __init__( + self, + env, + path: Optional[str] = None, + metadata: Optional[dict] = None, + enabled: bool = True, + base_path: Optional[str] = None, + ): + """Video recorder renders a nice movie of a rollout, frame by frame. + + Args: + env (Env): Environment to take video of. + path (Optional[str]): Path to the video file; will be randomly chosen if omitted. + metadata (Optional[dict]): Contents to save to the metadata file. + enabled (bool): Whether to actually record video, or just no-op (for convenience) + base_path (Optional[str]): Alternatively, path to the video file without extension, which will be added. + + Raises: + Error: You can pass at most one of `path` or `base_path` + Error: Invalid path given that must have a particular file extension + """ + try: + # check that moviepy is now installed + import moviepy # noqa: F401 + except ImportError: + raise error.DependencyNotInstalled( + "MoviePy is not installed, run `pip install moviepy`" + ) + + self._async = env.metadata.get("semantics.async") + self.enabled = enabled + self._closed = False + + self.render_history = [] + self.env = env + + self.render_mode = env.render_mode + + if "rgb_array_list" != self.render_mode and "rgb_array" != self.render_mode: + logger.warn( + f"Disabling video recorder because environment {env} was not initialized with any compatible video " + "mode between `rgb_array` and `rgb_array_list`" + ) + # Disable since the environment has not been initialized with a compatible `render_mode` + self.enabled = False + + # Don't bother setting anything else if not enabled + if not self.enabled: + return + + if path is not None and base_path is not None: + raise error.Error("You can pass at most one of `path` or `base_path`.") + + required_ext = ".mp4" + if path is None: + if base_path is not None: + # Base path given, append ext + path = base_path + required_ext + else: + # Otherwise, just generate a unique filename + with tempfile.NamedTemporaryFile(suffix=required_ext) as f: + path = f.name + self.path = path + + path_base, actual_ext = os.path.splitext(self.path) + + if actual_ext != required_ext: + raise error.Error( + f"Invalid path given: {self.path} -- must have file extension {required_ext}." + ) + + self.frames_per_sec = env.metadata.get("render_fps", 30) + + self.broken = False + + # Dump metadata + self.metadata = metadata or {} + self.metadata["content_type"] = "video/mp4" + self.metadata_path = f"{path_base}.meta.json" + self.write_metadata() + + logger.info(f"Starting new video recorder writing to {self.path}") + self.recorded_frames = [] + + @property + def functional(self): + """Returns if the video recorder is functional, is enabled and not broken.""" + return self.enabled and not self.broken + + def capture_frame(self): + """Render the given `env` and add the resulting frame to the video.""" + frame = self.env.render() + if isinstance(frame, List): + self.render_history += frame + frame = frame[-1] + + if not self.functional: + return + if self._closed: + logger.warn( + "The video recorder has been closed and no frames will be captured anymore." + ) + return + logger.debug("Capturing video frame: path=%s", self.path) + + if frame is None: + if self._async: + return + else: + # Indicates a bug in the environment: don't want to raise + # an error here. + logger.warn( + "Env returned None on `render()`. Disabling further rendering for video recorder by marking as " + f"disabled: path={self.path} metadata_path={self.metadata_path}" + ) + self.broken = True + else: + self.recorded_frames.append(frame) + + def close(self): + """Flush all data to disk and close any open frame encoders.""" + if not self.enabled or self._closed: + return + + # First close the environment + self.env.close() + + # Close the encoder + if len(self.recorded_frames) > 0: + try: + from moviepy.video.io.ImageSequenceClip import ImageSequenceClip + except ImportError: + raise error.DependencyNotInstalled( + "MoviePy is not installed, run `pip install moviepy`" + ) + + logger.debug(f"Closing video encoder: path={self.path}") + clip = ImageSequenceClip(self.recorded_frames, fps=self.frames_per_sec) + clip.write_videofile(self.path) + else: + # No frames captured. Set metadata. + if self.metadata is None: + self.metadata = {} + self.metadata["empty"] = True + + self.write_metadata() + + # Stop tracking this for autoclose + self._closed = True + + def write_metadata(self): + """Writes metadata to metadata path.""" + with open(self.metadata_path, "w") as f: + json.dump(self.metadata, f) + + def __del__(self): + """Closes the environment correctly when the recorder is deleted.""" + # Make sure we've closed up shop when garbage collecting + self.close() diff --git a/MLPY/Lib/site-packages/gym/wrappers/normalize.py b/MLPY/Lib/site-packages/gym/wrappers/normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..e8b51675c02e79198b3825e3cf3eb3e5a8be7988 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/normalize.py @@ -0,0 +1,144 @@ +"""Set of wrappers for normalizing actions and observations.""" +import numpy as np + +import gym + + +# taken from https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_normalize.py +class RunningMeanStd: + """Tracks the mean, variance and count of values.""" + + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm + def __init__(self, epsilon=1e-4, shape=()): + """Tracks the mean, variance and count of values.""" + self.mean = np.zeros(shape, "float64") + self.var = np.ones(shape, "float64") + self.count = epsilon + + def update(self, x): + """Updates the mean, var and count from a batch of samples.""" + batch_mean = np.mean(x, axis=0) + batch_var = np.var(x, axis=0) + batch_count = x.shape[0] + self.update_from_moments(batch_mean, batch_var, batch_count) + + def update_from_moments(self, batch_mean, batch_var, batch_count): + """Updates from batch mean, variance and count moments.""" + self.mean, self.var, self.count = update_mean_var_count_from_moments( + self.mean, self.var, self.count, batch_mean, batch_var, batch_count + ) + + +def update_mean_var_count_from_moments( + mean, var, count, batch_mean, batch_var, batch_count +): + """Updates the mean, var and count using the previous mean, var, count and batch values.""" + delta = batch_mean - mean + tot_count = count + batch_count + + new_mean = mean + delta * batch_count / tot_count + m_a = var * count + m_b = batch_var * batch_count + M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count + new_var = M2 / tot_count + new_count = tot_count + + return new_mean, new_var, new_count + + +class NormalizeObservation(gym.core.Wrapper): + """This wrapper will normalize observations s.t. each coordinate is centered with unit variance. + + Note: + The normalization depends on past trajectories and observations will not be normalized correctly if the wrapper was + newly instantiated or the policy was changed recently. + """ + + def __init__(self, env: gym.Env, epsilon: float = 1e-8): + """This wrapper will normalize observations s.t. each coordinate is centered with unit variance. + + Args: + env (Env): The environment to apply the wrapper + epsilon: A stability parameter that is used when scaling the observations. + """ + super().__init__(env) + self.num_envs = getattr(env, "num_envs", 1) + self.is_vector_env = getattr(env, "is_vector_env", False) + if self.is_vector_env: + self.obs_rms = RunningMeanStd(shape=self.single_observation_space.shape) + else: + self.obs_rms = RunningMeanStd(shape=self.observation_space.shape) + self.epsilon = epsilon + + def step(self, action): + """Steps through the environment and normalizes the observation.""" + obs, rews, terminateds, truncateds, infos = self.env.step(action) + if self.is_vector_env: + obs = self.normalize(obs) + else: + obs = self.normalize(np.array([obs]))[0] + return obs, rews, terminateds, truncateds, infos + + def reset(self, **kwargs): + """Resets the environment and normalizes the observation.""" + obs, info = self.env.reset(**kwargs) + + if self.is_vector_env: + return self.normalize(obs), info + else: + return self.normalize(np.array([obs]))[0], info + + def normalize(self, obs): + """Normalises the observation using the running mean and variance of the observations.""" + self.obs_rms.update(obs) + return (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.epsilon) + + +class NormalizeReward(gym.core.Wrapper): + r"""This wrapper will normalize immediate rewards s.t. their exponential moving average has a fixed variance. + + The exponential moving average will have variance :math:`(1 - \gamma)^2`. + + Note: + The scaling depends on past trajectories and rewards will not be scaled correctly if the wrapper was newly + instantiated or the policy was changed recently. + """ + + def __init__( + self, + env: gym.Env, + gamma: float = 0.99, + epsilon: float = 1e-8, + ): + """This wrapper will normalize immediate rewards s.t. their exponential moving average has a fixed variance. + + Args: + env (env): The environment to apply the wrapper + epsilon (float): A stability parameter + gamma (float): The discount factor that is used in the exponential moving average. + """ + super().__init__(env) + self.num_envs = getattr(env, "num_envs", 1) + self.is_vector_env = getattr(env, "is_vector_env", False) + self.return_rms = RunningMeanStd(shape=()) + self.returns = np.zeros(self.num_envs) + self.gamma = gamma + self.epsilon = epsilon + + def step(self, action): + """Steps through the environment, normalizing the rewards returned.""" + obs, rews, terminateds, truncateds, infos = self.env.step(action) + if not self.is_vector_env: + rews = np.array([rews]) + self.returns = self.returns * self.gamma + rews + rews = self.normalize(rews) + dones = np.logical_or(terminateds, truncateds) + self.returns[dones] = 0.0 + if not self.is_vector_env: + rews = rews[0] + return obs, rews, terminateds, truncateds, infos + + def normalize(self, rews): + """Normalizes the rewards with the running mean rewards and their variance.""" + self.return_rms.update(self.returns) + return rews / np.sqrt(self.return_rms.var + self.epsilon) diff --git a/MLPY/Lib/site-packages/gym/wrappers/order_enforcing.py b/MLPY/Lib/site-packages/gym/wrappers/order_enforcing.py new file mode 100644 index 0000000000000000000000000000000000000000..d9f853e72bc6247b7e6cf1fa25b3bf6924b87a5b --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/order_enforcing.py @@ -0,0 +1,56 @@ +"""Wrapper to enforce the proper ordering of environment operations.""" +import gym +from gym.error import ResetNeeded + + +class OrderEnforcing(gym.Wrapper): + """A wrapper that will produce an error if :meth:`step` is called before an initial :meth:`reset`. + + Example: + >>> from gym.envs.classic_control import CartPoleEnv + >>> env = CartPoleEnv() + >>> env = OrderEnforcing(env) + >>> env.step(0) + ResetNeeded: Cannot call env.step() before calling env.reset() + >>> env.render() + ResetNeeded: Cannot call env.render() before calling env.reset() + >>> env.reset() + >>> env.render() + >>> env.step(0) + """ + + def __init__(self, env: gym.Env, disable_render_order_enforcing: bool = False): + """A wrapper that will produce an error if :meth:`step` is called before an initial :meth:`reset`. + + Args: + env: The environment to wrap + disable_render_order_enforcing: If to disable render order enforcing + """ + super().__init__(env) + self._has_reset: bool = False + self._disable_render_order_enforcing: bool = disable_render_order_enforcing + + def step(self, action): + """Steps through the environment with `kwargs`.""" + if not self._has_reset: + raise ResetNeeded("Cannot call env.step() before calling env.reset()") + return self.env.step(action) + + def reset(self, **kwargs): + """Resets the environment with `kwargs`.""" + self._has_reset = True + return self.env.reset(**kwargs) + + def render(self, *args, **kwargs): + """Renders the environment with `kwargs`.""" + if not self._disable_render_order_enforcing and not self._has_reset: + raise ResetNeeded( + "Cannot call `env.render()` before calling `env.reset()`, if this is a intended action, " + "set `disable_render_order_enforcing=True` on the OrderEnforcer wrapper." + ) + return self.env.render(*args, **kwargs) + + @property + def has_reset(self): + """Returns if the environment has been reset before.""" + return self._has_reset diff --git a/MLPY/Lib/site-packages/gym/wrappers/pixel_observation.py b/MLPY/Lib/site-packages/gym/wrappers/pixel_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..9a863c4b1f05dcff15b55ffda6065d5c98e823e0 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/pixel_observation.py @@ -0,0 +1,207 @@ +"""Wrapper for augmenting observations by pixel values.""" +import collections +import copy +from collections.abc import MutableMapping +from typing import Any, Dict, List, Optional, Tuple + +import numpy as np + +import gym +from gym import spaces + +STATE_KEY = "state" + + +class PixelObservationWrapper(gym.ObservationWrapper): + """Augment observations by pixel values. + + Observations of this wrapper will be dictionaries of images. + You can also choose to add the observation of the base environment to this dictionary. + In that case, if the base environment has an observation space of type :class:`Dict`, the dictionary + of rendered images will be updated with the base environment's observation. If, however, the observation + space is of type :class:`Box`, the base environment's observation (which will be an element of the :class:`Box` + space) will be added to the dictionary under the key "state". + + Example: + >>> import gym + >>> env = PixelObservationWrapper(gym.make('CarRacing-v1', render_mode="rgb_array")) + >>> obs = env.reset() + >>> obs.keys() + odict_keys(['pixels']) + >>> obs['pixels'].shape + (400, 600, 3) + >>> env = PixelObservationWrapper(gym.make('CarRacing-v1', render_mode="rgb_array"), pixels_only=False) + >>> obs = env.reset() + >>> obs.keys() + odict_keys(['state', 'pixels']) + >>> obs['state'].shape + (96, 96, 3) + >>> obs['pixels'].shape + (400, 600, 3) + >>> env = PixelObservationWrapper(gym.make('CarRacing-v1', render_mode="rgb_array"), pixel_keys=('obs',)) + >>> obs = env.reset() + >>> obs.keys() + odict_keys(['obs']) + >>> obs['obs'].shape + (400, 600, 3) + """ + + def __init__( + self, + env: gym.Env, + pixels_only: bool = True, + render_kwargs: Optional[Dict[str, Dict[str, Any]]] = None, + pixel_keys: Tuple[str, ...] = ("pixels",), + ): + """Initializes a new pixel Wrapper. + + Args: + env: The environment to wrap. + pixels_only (bool): If ``True`` (default), the original observation returned + by the wrapped environment will be discarded, and a dictionary + observation will only include pixels. If ``False``, the + observation dictionary will contain both the original + observations and the pixel observations. + render_kwargs (dict): Optional dictionary containing that maps elements of ``pixel_keys``to + keyword arguments passed to the :meth:`self.render` method. + pixel_keys: Optional custom string specifying the pixel + observation's key in the ``OrderedDict`` of observations. + Defaults to ``(pixels,)``. + + Raises: + AssertionError: If any of the keys in ``render_kwargs``do not show up in ``pixel_keys``. + ValueError: If ``env``'s observation space is not compatible with the + wrapper. Supported formats are a single array, or a dict of + arrays. + ValueError: If ``env``'s observation already contains any of the + specified ``pixel_keys``. + TypeError: When an unexpected pixel type is used + """ + super().__init__(env) + + # Avoid side-effects that occur when render_kwargs is manipulated + render_kwargs = copy.deepcopy(render_kwargs) + self.render_history = [] + + if render_kwargs is None: + render_kwargs = {} + + for key in render_kwargs: + assert key in pixel_keys, ( + "The argument render_kwargs should map elements of " + "pixel_keys to dictionaries of keyword arguments. " + f"Found key '{key}' in render_kwargs but not in pixel_keys." + ) + + default_render_kwargs = {} + if not env.render_mode: + raise AttributeError( + "env.render_mode must be specified to use PixelObservationWrapper:" + "`gym.make(env_name, render_mode='rgb_array')`." + ) + + for key in pixel_keys: + render_kwargs.setdefault(key, default_render_kwargs) + + wrapped_observation_space = env.observation_space + + if isinstance(wrapped_observation_space, spaces.Box): + self._observation_is_dict = False + invalid_keys = {STATE_KEY} + elif isinstance(wrapped_observation_space, (spaces.Dict, MutableMapping)): + self._observation_is_dict = True + invalid_keys = set(wrapped_observation_space.spaces.keys()) + else: + raise ValueError("Unsupported observation space structure.") + + if not pixels_only: + # Make sure that now keys in the `pixel_keys` overlap with + # `observation_keys` + overlapping_keys = set(pixel_keys) & set(invalid_keys) + if overlapping_keys: + raise ValueError( + f"Duplicate or reserved pixel keys {overlapping_keys!r}." + ) + + if pixels_only: + self.observation_space = spaces.Dict() + elif self._observation_is_dict: + self.observation_space = copy.deepcopy(wrapped_observation_space) + else: + self.observation_space = spaces.Dict({STATE_KEY: wrapped_observation_space}) + + # Extend observation space with pixels. + + self.env.reset() + pixels_spaces = {} + for pixel_key in pixel_keys: + pixels = self._render(**render_kwargs[pixel_key]) + pixels: np.ndarray = pixels[-1] if isinstance(pixels, List) else pixels + + if not hasattr(pixels, "dtype") or not hasattr(pixels, "shape"): + raise TypeError( + f"Render method returns a {pixels.__class__.__name__}, but an array with dtype and shape is expected." + "Be sure to specify the correct render_mode." + ) + + if np.issubdtype(pixels.dtype, np.integer): + low, high = (0, 255) + elif np.issubdtype(pixels.dtype, np.float): + low, high = (-float("inf"), float("inf")) + else: + raise TypeError(pixels.dtype) + + pixels_space = spaces.Box( + shape=pixels.shape, low=low, high=high, dtype=pixels.dtype + ) + pixels_spaces[pixel_key] = pixels_space + + self.observation_space.spaces.update(pixels_spaces) + + self._pixels_only = pixels_only + self._render_kwargs = render_kwargs + self._pixel_keys = pixel_keys + + def observation(self, observation): + """Updates the observations with the pixel observations. + + Args: + observation: The observation to add pixel observations for + + Returns: + The updated pixel observations + """ + pixel_observation = self._add_pixel_observation(observation) + return pixel_observation + + def _add_pixel_observation(self, wrapped_observation): + if self._pixels_only: + observation = collections.OrderedDict() + elif self._observation_is_dict: + observation = type(wrapped_observation)(wrapped_observation) + else: + observation = collections.OrderedDict() + observation[STATE_KEY] = wrapped_observation + + pixel_observations = { + pixel_key: self._render(**self._render_kwargs[pixel_key]) + for pixel_key in self._pixel_keys + } + + observation.update(pixel_observations) + + return observation + + def render(self, *args, **kwargs): + """Renders the environment.""" + render = self.env.render(*args, **kwargs) + if isinstance(render, list): + render = self.render_history + render + self.render_history = [] + return render + + def _render(self, *args, **kwargs): + render = self.env.render(*args, **kwargs) + if isinstance(render, list): + self.render_history += render + return render diff --git a/MLPY/Lib/site-packages/gym/wrappers/record_episode_statistics.py b/MLPY/Lib/site-packages/gym/wrappers/record_episode_statistics.py new file mode 100644 index 0000000000000000000000000000000000000000..0a822cea4ea895b2aa89317126f039cd9e24f330 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/record_episode_statistics.py @@ -0,0 +1,151 @@ +"""Wrapper that tracks the cumulative rewards and episode lengths.""" +import time +from collections import deque +from typing import Optional + +import numpy as np + +import gym + + +def add_vector_episode_statistics( + info: dict, episode_info: dict, num_envs: int, env_num: int +): + """Add episode statistics. + + Add statistics coming from the vectorized environment. + + Args: + info (dict): info dict of the environment. + episode_info (dict): episode statistics data. + num_envs (int): number of environments. + env_num (int): env number of the vectorized environments. + + Returns: + info (dict): the input info dict with the episode statistics. + """ + info["episode"] = info.get("episode", {}) + + info["_episode"] = info.get("_episode", np.zeros(num_envs, dtype=bool)) + info["_episode"][env_num] = True + + for k in episode_info.keys(): + info_array = info["episode"].get(k, np.zeros(num_envs)) + info_array[env_num] = episode_info[k] + info["episode"][k] = info_array + + return info + + +class RecordEpisodeStatistics(gym.Wrapper): + """This wrapper will keep track of cumulative rewards and episode lengths. + + At the end of an episode, the statistics of the episode will be added to ``info`` + using the key ``episode``. If using a vectorized environment also the key + ``_episode`` is used which indicates whether the env at the respective index has + the episode statistics. + + After the completion of an episode, ``info`` will look like this:: + + >>> info = { + ... ... + ... "episode": { + ... "r": "", + ... "l": "", + ... "t": "" + ... }, + ... } + + For a vectorized environments the output will be in the form of:: + + >>> infos = { + ... ... + ... "episode": { + ... "r": "", + ... "l": "", + ... "t": "" + ... }, + ... "_episode": "" + ... } + + Moreover, the most recent rewards and episode lengths are stored in buffers that can be accessed via + :attr:`wrapped_env.return_queue` and :attr:`wrapped_env.length_queue` respectively. + + Attributes: + return_queue: The cumulative rewards of the last ``deque_size``-many episodes + length_queue: The lengths of the last ``deque_size``-many episodes + """ + + def __init__(self, env: gym.Env, deque_size: int = 100): + """This wrapper will keep track of cumulative rewards and episode lengths. + + Args: + env (Env): The environment to apply the wrapper + deque_size: The size of the buffers :attr:`return_queue` and :attr:`length_queue` + """ + super().__init__(env) + self.num_envs = getattr(env, "num_envs", 1) + self.t0 = time.perf_counter() + self.episode_count = 0 + self.episode_returns: Optional[np.ndarray] = None + self.episode_lengths: Optional[np.ndarray] = None + self.return_queue = deque(maxlen=deque_size) + self.length_queue = deque(maxlen=deque_size) + self.is_vector_env = getattr(env, "is_vector_env", False) + + def reset(self, **kwargs): + """Resets the environment using kwargs and resets the episode returns and lengths.""" + observations = super().reset(**kwargs) + self.episode_returns = np.zeros(self.num_envs, dtype=np.float32) + self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32) + return observations + + def step(self, action): + """Steps through the environment, recording the episode statistics.""" + ( + observations, + rewards, + terminateds, + truncateds, + infos, + ) = self.env.step(action) + assert isinstance( + infos, dict + ), f"`info` dtype is {type(infos)} while supported dtype is `dict`. This may be due to usage of other wrappers in the wrong order." + self.episode_returns += rewards + self.episode_lengths += 1 + if not self.is_vector_env: + terminateds = [terminateds] + truncateds = [truncateds] + terminateds = list(terminateds) + truncateds = list(truncateds) + + for i in range(len(terminateds)): + if terminateds[i] or truncateds[i]: + episode_return = self.episode_returns[i] + episode_length = self.episode_lengths[i] + episode_info = { + "episode": { + "r": episode_return, + "l": episode_length, + "t": round(time.perf_counter() - self.t0, 6), + } + } + if self.is_vector_env: + infos = add_vector_episode_statistics( + infos, episode_info["episode"], self.num_envs, i + ) + else: + infos = {**infos, **episode_info} + self.return_queue.append(episode_return) + self.length_queue.append(episode_length) + self.episode_count += 1 + self.episode_returns[i] = 0 + self.episode_lengths[i] = 0 + return ( + observations, + rewards, + terminateds if self.is_vector_env else terminateds[0], + truncateds if self.is_vector_env else truncateds[0], + infos, + ) diff --git a/MLPY/Lib/site-packages/gym/wrappers/record_video.py b/MLPY/Lib/site-packages/gym/wrappers/record_video.py new file mode 100644 index 0000000000000000000000000000000000000000..dea254cfbf635b8fd79daf8eeecd19a29d8f4b4c --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/record_video.py @@ -0,0 +1,211 @@ +"""Wrapper for recording videos.""" +import os +from typing import Callable, Optional + +import gym +from gym import logger +from gym.wrappers.monitoring import video_recorder + + +def capped_cubic_video_schedule(episode_id: int) -> bool: + """The default episode trigger. + + This function will trigger recordings at the episode indices 0, 1, 4, 8, 27, ..., :math:`k^3`, ..., 729, 1000, 2000, 3000, ... + + Args: + episode_id: The episode number + + Returns: + If to apply a video schedule number + """ + if episode_id < 1000: + return int(round(episode_id ** (1.0 / 3))) ** 3 == episode_id + else: + return episode_id % 1000 == 0 + + +class RecordVideo(gym.Wrapper): + """This wrapper records videos of rollouts. + + Usually, you only want to record episodes intermittently, say every hundredth episode. + To do this, you can specify **either** ``episode_trigger`` **or** ``step_trigger`` (not both). + They should be functions returning a boolean that indicates whether a recording should be started at the + current episode or step, respectively. + If neither :attr:`episode_trigger` nor ``step_trigger`` is passed, a default ``episode_trigger`` will be employed. + By default, the recording will be stopped once a `terminated` or `truncated` signal has been emitted by the environment. However, you can + also create recordings of fixed length (possibly spanning several episodes) by passing a strictly positive value for + ``video_length``. + """ + + def __init__( + self, + env: gym.Env, + video_folder: str, + episode_trigger: Callable[[int], bool] = None, + step_trigger: Callable[[int], bool] = None, + video_length: int = 0, + name_prefix: str = "rl-video", + ): + """Wrapper records videos of rollouts. + + Args: + env: The environment that will be wrapped + video_folder (str): The folder where the recordings will be stored + episode_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this episode + step_trigger: Function that accepts an integer and returns ``True`` iff a recording should be started at this step + video_length (int): The length of recorded episodes. If 0, entire episodes are recorded. + Otherwise, snippets of the specified length are captured + name_prefix (str): Will be prepended to the filename of the recordings + """ + super().__init__(env) + + if episode_trigger is None and step_trigger is None: + episode_trigger = capped_cubic_video_schedule + + trigger_count = sum(x is not None for x in [episode_trigger, step_trigger]) + assert trigger_count == 1, "Must specify exactly one trigger" + + self.episode_trigger = episode_trigger + self.step_trigger = step_trigger + self.video_recorder: Optional[video_recorder.VideoRecorder] = None + + self.video_folder = os.path.abspath(video_folder) + # Create output folder if needed + if os.path.isdir(self.video_folder): + logger.warn( + f"Overwriting existing videos at {self.video_folder} folder " + f"(try specifying a different `video_folder` for the `RecordVideo` wrapper if this is not desired)" + ) + os.makedirs(self.video_folder, exist_ok=True) + + self.name_prefix = name_prefix + self.step_id = 0 + self.video_length = video_length + + self.recording = False + self.terminated = False + self.truncated = False + self.recorded_frames = 0 + self.is_vector_env = getattr(env, "is_vector_env", False) + self.episode_id = 0 + + def reset(self, **kwargs): + """Reset the environment using kwargs and then starts recording if video enabled.""" + observations = super().reset(**kwargs) + self.terminated = False + self.truncated = False + if self.recording: + assert self.video_recorder is not None + self.video_recorder.frames = [] + self.video_recorder.capture_frame() + self.recorded_frames += 1 + if self.video_length > 0: + if self.recorded_frames > self.video_length: + self.close_video_recorder() + elif self._video_enabled(): + self.start_video_recorder() + return observations + + def start_video_recorder(self): + """Starts video recorder using :class:`video_recorder.VideoRecorder`.""" + self.close_video_recorder() + + video_name = f"{self.name_prefix}-step-{self.step_id}" + if self.episode_trigger: + video_name = f"{self.name_prefix}-episode-{self.episode_id}" + + base_path = os.path.join(self.video_folder, video_name) + self.video_recorder = video_recorder.VideoRecorder( + env=self.env, + base_path=base_path, + metadata={"step_id": self.step_id, "episode_id": self.episode_id}, + ) + + self.video_recorder.capture_frame() + self.recorded_frames = 1 + self.recording = True + + def _video_enabled(self): + if self.step_trigger: + return self.step_trigger(self.step_id) + else: + return self.episode_trigger(self.episode_id) + + def step(self, action): + """Steps through the environment using action, recording observations if :attr:`self.recording`.""" + ( + observations, + rewards, + terminateds, + truncateds, + infos, + ) = self.env.step(action) + + if not (self.terminated or self.truncated): + # increment steps and episodes + self.step_id += 1 + if not self.is_vector_env: + if terminateds or truncateds: + self.episode_id += 1 + self.terminated = terminateds + self.truncated = truncateds + elif terminateds[0] or truncateds[0]: + self.episode_id += 1 + self.terminated = terminateds[0] + self.truncated = truncateds[0] + + if self.recording: + assert self.video_recorder is not None + self.video_recorder.capture_frame() + self.recorded_frames += 1 + if self.video_length > 0: + if self.recorded_frames > self.video_length: + self.close_video_recorder() + else: + if not self.is_vector_env: + if terminateds or truncateds: + self.close_video_recorder() + elif terminateds[0] or truncateds[0]: + self.close_video_recorder() + + elif self._video_enabled(): + self.start_video_recorder() + + return observations, rewards, terminateds, truncateds, infos + + def close_video_recorder(self): + """Closes the video recorder if currently recording.""" + if self.recording: + assert self.video_recorder is not None + self.video_recorder.close() + self.recording = False + self.recorded_frames = 1 + + def render(self, *args, **kwargs): + """Compute the render frames as specified by render_mode attribute during initialization of the environment or as specified in kwargs.""" + if self.video_recorder is None or not self.video_recorder.enabled: + return super().render(*args, **kwargs) + + if len(self.video_recorder.render_history) > 0: + recorded_frames = [ + self.video_recorder.render_history.pop() + for _ in range(len(self.video_recorder.render_history)) + ] + if self.recording: + return recorded_frames + else: + return recorded_frames + super().render(*args, **kwargs) + else: + if self.recording: + return self.video_recorder.last_frame + else: + return super().render(*args, **kwargs) + + def close(self): + """Closes the wrapper then the video recorder.""" + super().close() + self.close_video_recorder() + + def __del__(self): + """Closes the video recorder.""" + self.close_video_recorder() diff --git a/MLPY/Lib/site-packages/gym/wrappers/render_collection.py b/MLPY/Lib/site-packages/gym/wrappers/render_collection.py new file mode 100644 index 0000000000000000000000000000000000000000..c79df9b0d2df946e9d65e03895a398ffc26543d7 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/render_collection.py @@ -0,0 +1,52 @@ +"""A wrapper that adds render collection mode to an environment.""" +import gym + + +class RenderCollection(gym.Wrapper): + """Save collection of render frames.""" + + def __init__(self, env: gym.Env, pop_frames: bool = True, reset_clean: bool = True): + """Initialize a :class:`RenderCollection` instance. + + Args: + env: The environment that is being wrapped + pop_frames (bool): If true, clear the collection frames after .render() is called. + Default value is True. + reset_clean (bool): If true, clear the collection frames when .reset() is called. + Default value is True. + """ + super().__init__(env) + assert env.render_mode is not None + assert not env.render_mode.endswith("_list") + self.frame_list = [] + self.reset_clean = reset_clean + self.pop_frames = pop_frames + + @property + def render_mode(self): + """Returns the collection render_mode name.""" + return f"{self.env.render_mode}_list" + + def step(self, *args, **kwargs): + """Perform a step in the base environment and collect a frame.""" + output = self.env.step(*args, **kwargs) + self.frame_list.append(self.env.render()) + return output + + def reset(self, *args, **kwargs): + """Reset the base environment, eventually clear the frame_list, and collect a frame.""" + result = self.env.reset(*args, **kwargs) + + if self.reset_clean: + self.frame_list = [] + self.frame_list.append(self.env.render()) + + return result + + def render(self): + """Returns the collection of frames and, if pop_frames = True, clears it.""" + frames = self.frame_list + if self.pop_frames: + self.frame_list = [] + + return frames diff --git a/MLPY/Lib/site-packages/gym/wrappers/rescale_action.py b/MLPY/Lib/site-packages/gym/wrappers/rescale_action.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3cf6cd157f35d65167695b46fd3843b4f66a65 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/rescale_action.py @@ -0,0 +1,82 @@ +"""Wrapper for rescaling actions to within a max and min action.""" +from typing import Union + +import numpy as np + +import gym +from gym import spaces + + +class RescaleAction(gym.ActionWrapper): + """Affinely rescales the continuous action space of the environment to the range [min_action, max_action]. + + The base environment :attr:`env` must have an action space of type :class:`spaces.Box`. If :attr:`min_action` + or :attr:`max_action` are numpy arrays, the shape must match the shape of the environment's action space. + + Example: + >>> import gym + >>> env = gym.make('BipedalWalker-v3') + >>> env.action_space + Box(-1.0, 1.0, (4,), float32) + >>> min_action = -0.5 + >>> max_action = np.array([0.0, 0.5, 1.0, 0.75]) + >>> env = RescaleAction(env, min_action=min_action, max_action=max_action) + >>> env.action_space + Box(-0.5, [0. 0.5 1. 0.75], (4,), float32) + >>> RescaleAction(env, min_action, max_action).action_space == gym.spaces.Box(min_action, max_action) + True + """ + + def __init__( + self, + env: gym.Env, + min_action: Union[float, int, np.ndarray], + max_action: Union[float, int, np.ndarray], + ): + """Initializes the :class:`RescaleAction` wrapper. + + Args: + env (Env): The environment to apply the wrapper + min_action (float, int or np.ndarray): The min values for each action. This may be a numpy array or a scalar. + max_action (float, int or np.ndarray): The max values for each action. This may be a numpy array or a scalar. + """ + assert isinstance( + env.action_space, spaces.Box + ), f"expected Box action space, got {type(env.action_space)}" + assert np.less_equal(min_action, max_action).all(), (min_action, max_action) + + super().__init__(env) + self.min_action = ( + np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + min_action + ) + self.max_action = ( + np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + max_action + ) + self.action_space = spaces.Box( + low=min_action, + high=max_action, + shape=env.action_space.shape, + dtype=env.action_space.dtype, + ) + + def action(self, action): + """Rescales the action affinely from [:attr:`min_action`, :attr:`max_action`] to the action space of the base environment, :attr:`env`. + + Args: + action: The action to rescale + + Returns: + The rescaled action + """ + assert np.all(np.greater_equal(action, self.min_action)), ( + action, + self.min_action, + ) + assert np.all(np.less_equal(action, self.max_action)), (action, self.max_action) + low = self.env.action_space.low + high = self.env.action_space.high + action = low + (high - low) * ( + (action - self.min_action) / (self.max_action - self.min_action) + ) + action = np.clip(action, low, high) + return action diff --git a/MLPY/Lib/site-packages/gym/wrappers/resize_observation.py b/MLPY/Lib/site-packages/gym/wrappers/resize_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..4f486a97bdf74acfe7480fc43e20baae23151ead --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/resize_observation.py @@ -0,0 +1,72 @@ +"""Wrapper for resizing observations.""" +from typing import Union + +import numpy as np + +import gym +from gym.error import DependencyNotInstalled +from gym.spaces import Box + + +class ResizeObservation(gym.ObservationWrapper): + """Resize the image observation. + + This wrapper works on environments with image observations (or more generally observations of shape AxBxC) and resizes + the observation to the shape given by the 2-tuple :attr:`shape`. The argument :attr:`shape` may also be an integer. + In that case, the observation is scaled to a square of side-length :attr:`shape`. + + Example: + >>> import gym + >>> env = gym.make('CarRacing-v1') + >>> env.observation_space.shape + (96, 96, 3) + >>> env = ResizeObservation(env, 64) + >>> env.observation_space.shape + (64, 64, 3) + """ + + def __init__(self, env: gym.Env, shape: Union[tuple, int]): + """Resizes image observations to shape given by :attr:`shape`. + + Args: + env: The environment to apply the wrapper + shape: The shape of the resized observations + """ + super().__init__(env) + if isinstance(shape, int): + shape = (shape, shape) + assert all(x > 0 for x in shape), shape + + self.shape = tuple(shape) + + assert isinstance( + env.observation_space, Box + ), f"Expected the observation space to be Box, actual type: {type(env.observation_space)}" + obs_shape = self.shape + env.observation_space.shape[2:] + self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8) + + def observation(self, observation): + """Updates the observations by resizing the observation to shape given by :attr:`shape`. + + Args: + observation: The observation to reshape + + Returns: + The reshaped observations + + Raises: + DependencyNotInstalled: opencv-python is not installed + """ + try: + import cv2 + except ImportError: + raise DependencyNotInstalled( + "opencv is not install, run `pip install gym[other]`" + ) + + observation = cv2.resize( + observation, self.shape[::-1], interpolation=cv2.INTER_AREA + ) + if observation.ndim == 2: + observation = np.expand_dims(observation, -1) + return observation diff --git a/MLPY/Lib/site-packages/gym/wrappers/step_api_compatibility.py b/MLPY/Lib/site-packages/gym/wrappers/step_api_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..358851cb382fde98490e4a7c75f088fc8ec7190e --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/step_api_compatibility.py @@ -0,0 +1,58 @@ +"""Implementation of StepAPICompatibility wrapper class for transforming envs between new and old step API.""" +import gym +from gym.logger import deprecation +from gym.utils.step_api_compatibility import ( + convert_to_done_step_api, + convert_to_terminated_truncated_step_api, +) + + +class StepAPICompatibility(gym.Wrapper): + r"""A wrapper which can transform an environment from new step API to old and vice-versa. + + Old step API refers to step() method returning (observation, reward, done, info) + New step API refers to step() method returning (observation, reward, terminated, truncated, info) + (Refer to docs for details on the API change) + + Args: + env (gym.Env): the env to wrap. Can be in old or new API + apply_step_compatibility (bool): Apply to convert environment to use new step API that returns two bools. (False by default) + + Examples: + >>> env = gym.make("CartPole-v1") + >>> env # wrapper not applied by default, set to new API + >>>> + >>> env = gym.make("CartPole-v1", apply_api_compatibility=True) # set to old API + >>>>> + >>> env = StepAPICompatibility(CustomEnv(), apply_step_compatibility=False) # manually using wrapper on unregistered envs + + """ + + def __init__(self, env: gym.Env, output_truncation_bool: bool = True): + """A wrapper which can transform an environment from new step API to old and vice-versa. + + Args: + env (gym.Env): the env to wrap. Can be in old or new API + output_truncation_bool (bool): Whether the wrapper's step method outputs two booleans (new API) or one boolean (old API) + """ + super().__init__(env) + self.output_truncation_bool = output_truncation_bool + if not self.output_truncation_bool: + deprecation( + "Initializing environment in old step API which returns one bool instead of two." + ) + + def step(self, action): + """Steps through the environment, returning 5 or 4 items depending on `apply_step_compatibility`. + + Args: + action: action to step through the environment with + + Returns: + (observation, reward, terminated, truncated, info) or (observation, reward, done, info) + """ + step_returns = self.env.step(action) + if self.output_truncation_bool: + return convert_to_terminated_truncated_step_api(step_returns) + else: + return convert_to_done_step_api(step_returns) diff --git a/MLPY/Lib/site-packages/gym/wrappers/time_aware_observation.py b/MLPY/Lib/site-packages/gym/wrappers/time_aware_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..781a77c2533d9602a048757979d5aa4f437aeaf0 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/time_aware_observation.py @@ -0,0 +1,71 @@ +"""Wrapper for adding time aware observations to environment observation.""" +import numpy as np + +import gym +from gym.spaces import Box + + +class TimeAwareObservation(gym.ObservationWrapper): + """Augment the observation with the current time step in the episode. + + The observation space of the wrapped environment is assumed to be a flat :class:`Box`. + In particular, pixel observations are not supported. This wrapper will append the current timestep within the current episode to the observation. + + Example: + >>> import gym + >>> env = gym.make('CartPole-v1') + >>> env = TimeAwareObservation(env) + >>> env.reset() + array([ 0.03810719, 0.03522411, 0.02231044, -0.01088205, 0. ]) + >>> env.step(env.action_space.sample())[0] + array([ 0.03881167, -0.16021058, 0.0220928 , 0.28875574, 1. ]) + """ + + def __init__(self, env: gym.Env): + """Initialize :class:`TimeAwareObservation` that requires an environment with a flat :class:`Box` observation space. + + Args: + env: The environment to apply the wrapper + """ + super().__init__(env) + assert isinstance(env.observation_space, Box) + assert env.observation_space.dtype == np.float32 + low = np.append(self.observation_space.low, 0.0) + high = np.append(self.observation_space.high, np.inf) + self.observation_space = Box(low, high, dtype=np.float32) + self.is_vector_env = getattr(env, "is_vector_env", False) + + def observation(self, observation): + """Adds to the observation with the current time step. + + Args: + observation: The observation to add the time step to + + Returns: + The observation with the time step appended to + """ + return np.append(observation, self.t) + + def step(self, action): + """Steps through the environment, incrementing the time step. + + Args: + action: The action to take + + Returns: + The environment's step using the action. + """ + self.t += 1 + return super().step(action) + + def reset(self, **kwargs): + """Reset the environment setting the time to zero. + + Args: + **kwargs: Kwargs to apply to env.reset() + + Returns: + The reset environment + """ + self.t = 0 + return super().reset(**kwargs) diff --git a/MLPY/Lib/site-packages/gym/wrappers/time_limit.py b/MLPY/Lib/site-packages/gym/wrappers/time_limit.py new file mode 100644 index 0000000000000000000000000000000000000000..854876e3a905eede2ae27fb04dc10e495a000170 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/time_limit.py @@ -0,0 +1,68 @@ +"""Wrapper for limiting the time steps of an environment.""" +from typing import Optional + +import gym + + +class TimeLimit(gym.Wrapper): + """This wrapper will issue a `truncated` signal if a maximum number of timesteps is exceeded. + + If a truncation is not defined inside the environment itself, this is the only place that the truncation signal is issued. + Critically, this is different from the `terminated` signal that originates from the underlying environment as part of the MDP. + + Example: + >>> from gym.envs.classic_control import CartPoleEnv + >>> from gym.wrappers import TimeLimit + >>> env = CartPoleEnv() + >>> env = TimeLimit(env, max_episode_steps=1000) + """ + + def __init__( + self, + env: gym.Env, + max_episode_steps: Optional[int] = None, + ): + """Initializes the :class:`TimeLimit` wrapper with an environment and the number of steps after which truncation will occur. + + Args: + env: The environment to apply the wrapper + max_episode_steps: An optional max episode steps (if ``Ǹone``, ``env.spec.max_episode_steps`` is used) + """ + super().__init__(env) + if max_episode_steps is None and self.env.spec is not None: + max_episode_steps = env.spec.max_episode_steps + if self.env.spec is not None: + self.env.spec.max_episode_steps = max_episode_steps + self._max_episode_steps = max_episode_steps + self._elapsed_steps = None + + def step(self, action): + """Steps through the environment and if the number of steps elapsed exceeds ``max_episode_steps`` then truncate. + + Args: + action: The environment step action + + Returns: + The environment step ``(observation, reward, terminated, truncated, info)`` with `truncated=True` + if the number of steps elapsed >= max episode steps + + """ + observation, reward, terminated, truncated, info = self.env.step(action) + self._elapsed_steps += 1 + + if self._elapsed_steps >= self._max_episode_steps: + truncated = True + + return observation, reward, terminated, truncated, info + + def reset(self, **kwargs): + """Resets the environment with :param:`**kwargs` and sets the number of steps elapsed to zero. + + Args: + **kwargs: The kwargs to reset the environment with + + Returns: + The reset environment + """ + self._elapsed_steps = 0 + return self.env.reset(**kwargs) diff --git a/MLPY/Lib/site-packages/gym/wrappers/transform_observation.py b/MLPY/Lib/site-packages/gym/wrappers/transform_observation.py new file mode 100644 index 0000000000000000000000000000000000000000..2af2e9afb40c61047b46ff526c3ccea8ceef10b8 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/transform_observation.py @@ -0,0 +1,43 @@ +"""Wrapper for transforming observations.""" +from typing import Any, Callable + +import gym + + +class TransformObservation(gym.ObservationWrapper): + """Transform the observation via an arbitrary function :attr:`f`. + + The function :attr:`f` should be defined on the observation space of the base environment, ``env``, and should, ideally, return values in the same space. + + If the transformation you wish to apply to observations returns values in a *different* space, you should subclass :class:`ObservationWrapper`, implement the transformation, and set the new observation space accordingly. If you were to use this wrapper instead, the observation space would be set incorrectly. + + Example: + >>> import gym + >>> import numpy as np + >>> env = gym.make('CartPole-v1') + >>> env = TransformObservation(env, lambda obs: obs + 0.1*np.random.randn(*obs.shape)) + >>> env.reset() + array([-0.08319338, 0.04635121, -0.07394746, 0.20877492]) + """ + + def __init__(self, env: gym.Env, f: Callable[[Any], Any]): + """Initialize the :class:`TransformObservation` wrapper with an environment and a transform function :param:`f`. + + Args: + env: The environment to apply the wrapper + f: A function that transforms the observation + """ + super().__init__(env) + assert callable(f) + self.f = f + + def observation(self, observation): + """Transforms the observations with callable :attr:`f`. + + Args: + observation: The observation to transform + + Returns: + The transformed observation + """ + return self.f(observation) diff --git a/MLPY/Lib/site-packages/gym/wrappers/transform_reward.py b/MLPY/Lib/site-packages/gym/wrappers/transform_reward.py new file mode 100644 index 0000000000000000000000000000000000000000..a17a8ef1bc08afa80e9d8b77a9b65c5c184fbfa5 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/transform_reward.py @@ -0,0 +1,44 @@ +"""Wrapper for transforming the reward.""" +from typing import Callable + +import gym +from gym import RewardWrapper + + +class TransformReward(RewardWrapper): + """Transform the reward via an arbitrary function. + + Warning: + If the base environment specifies a reward range which is not invariant under :attr:`f`, the :attr:`reward_range` of the wrapped environment will be incorrect. + + Example: + >>> import gym + >>> env = gym.make('CartPole-v1') + >>> env = TransformReward(env, lambda r: 0.01*r) + >>> env.reset() + >>> observation, reward, terminated, truncated, info = env.step(env.action_space.sample()) + >>> reward + 0.01 + """ + + def __init__(self, env: gym.Env, f: Callable[[float], float]): + """Initialize the :class:`TransformReward` wrapper with an environment and reward transform function :param:`f`. + + Args: + env: The environment to apply the wrapper + f: A function that transforms the reward + """ + super().__init__(env) + assert callable(f) + self.f = f + + def reward(self, reward): + """Transforms the reward using callable :attr:`f`. + + Args: + reward: The reward to transform + + Returns: + The transformed reward + """ + return self.f(reward) diff --git a/MLPY/Lib/site-packages/gym/wrappers/vector_list_info.py b/MLPY/Lib/site-packages/gym/wrappers/vector_list_info.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab933e957750edc97b7f11793f2286295a02e20 --- /dev/null +++ b/MLPY/Lib/site-packages/gym/wrappers/vector_list_info.py @@ -0,0 +1,111 @@ +"""Wrapper that converts the info format for vec envs into the list format.""" + +from typing import List + +import gym + + +class VectorListInfo(gym.Wrapper): + """Converts infos of vectorized environments from dict to List[dict]. + + This wrapper converts the info format of a + vector environment from a dictionary to a list of dictionaries. + This wrapper is intended to be used around vectorized + environments. If using other wrappers that perform + operation on info like `RecordEpisodeStatistics` this + need to be the outermost wrapper. + + i.e. VectorListInfo(RecordEpisodeStatistics(envs)) + + Example:: + + >>> # actual + >>> { + ... "k": np.array[0., 0., 0.5, 0.3], + ... "_k": np.array[False, False, True, True] + ... } + >>> # classic + >>> [{}, {}, {k: 0.5}, {k: 0.3}] + + """ + + def __init__(self, env): + """This wrapper will convert the info into the list format. + + Args: + env (Env): The environment to apply the wrapper + """ + assert getattr( + env, "is_vector_env", False + ), "This wrapper can only be used in vectorized environments." + super().__init__(env) + + def step(self, action): + """Steps through the environment, convert dict info to list.""" + observation, reward, terminated, truncated, infos = self.env.step(action) + list_info = self._convert_info_to_list(infos) + + return observation, reward, terminated, truncated, list_info + + def reset(self, **kwargs): + """Resets the environment using kwargs.""" + obs, infos = self.env.reset(**kwargs) + list_info = self._convert_info_to_list(infos) + return obs, list_info + + def _convert_info_to_list(self, infos: dict) -> List[dict]: + """Convert the dict info to list. + + Convert the dict info of the vectorized environment + into a list of dictionaries where the i-th dictionary + has the info of the i-th environment. + + Args: + infos (dict): info dict coming from the env. + + Returns: + list_info (list): converted info. + + """ + list_info = [{} for _ in range(self.num_envs)] + list_info = self._process_episode_statistics(infos, list_info) + for k in infos: + if k.startswith("_"): + continue + for i, has_info in enumerate(infos[f"_{k}"]): + if has_info: + list_info[i][k] = infos[k][i] + return list_info + + def _process_episode_statistics(self, infos: dict, list_info: list) -> List[dict]: + """Process episode statistics. + + `RecordEpisodeStatistics` wrapper add extra + information to the info. This information are in + the form of a dict of dict. This method process these + information and add them to the info. + `RecordEpisodeStatistics` info contains the keys + "r", "l", "t" which represents "cumulative reward", + "episode length", "elapsed time since instantiation of wrapper". + + Args: + infos (dict): infos coming from `RecordEpisodeStatistics`. + list_info (list): info of the current vectorized environment. + + Returns: + list_info (list): updated info. + + """ + episode_statistics = infos.pop("episode", False) + if not episode_statistics: + return list_info + + episode_statistics_mask = infos.pop("_episode") + for i, has_info in enumerate(episode_statistics_mask): + if has_info: + list_info[i]["episode"] = {} + list_info[i]["episode"]["r"] = episode_statistics["r"][i] + list_info[i]["episode"]["l"] = episode_statistics["l"][i] + list_info[i]["episode"]["t"] = episode_statistics["t"][i] + + return list_info diff --git a/MLPY/Lib/site-packages/gym_notices/__init__.py b/MLPY/Lib/site-packages/gym_notices/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices/__init__.py @@ -0,0 +1 @@ + diff --git a/MLPY/Lib/site-packages/gym_notices/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/gym_notices/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e95b1218af955901cce310c58450db1529a962 Binary files /dev/null and b/MLPY/Lib/site-packages/gym_notices/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym_notices/__pycache__/notices.cpython-39.pyc b/MLPY/Lib/site-packages/gym_notices/__pycache__/notices.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914345e18085eebe517b54a212b1bb579cbbf0c8 Binary files /dev/null and b/MLPY/Lib/site-packages/gym_notices/__pycache__/notices.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/gym_notices/notices.py b/MLPY/Lib/site-packages/gym_notices/notices.py new file mode 100644 index 0000000000000000000000000000000000000000..18547b815f0a764d37ad59e4106caa4d1948c53b --- /dev/null +++ b/MLPY/Lib/site-packages/gym_notices/notices.py @@ -0,0 +1,9 @@ +notices = { + "0.22.0": "", + "0.23.0": "", + "0.23.1": "", + "0.24.0": "Warning: Gym version v0.24.0 has a number of critical issues with `gym.make` such that the `reset` and `step` functions are called before returning the environment. It is recommend to downgrading to v0.23.1 or upgrading to v0.25.1", + "0.24.1": "Warning: Gym version v0.24.1 has a number of critical issues with `gym.make` such that environment observation and action spaces are incorrectly evaluated, raising incorrect errors and warning . It is recommend to downgrading to v0.23.1 or upgrading to v0.25.1", + "0.25.0": "", + "0.25.1": "", +}