file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_musllinux.py | """PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Optional
from ._elffile import ELFFile
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
| 2,524 | Python | 30.172839 | 80 | 0.650555 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/tags.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import logging
import platform
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from . import _manylinux, _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = sys.maxsize <= 2**32
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self) -> str:
return self._interpreter
@property
def abi(self) -> str:
return self._abi
@property
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def parse_tag(tag: str) -> FrozenSet[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
value: Union[int, str, None] = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
def _abi3_applies(python_version: PythonVersion) -> bool:
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}")
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> List[str]:
"""
Return the ABI tag based on EXT_SUFFIX.
"""
# The following are examples of `EXT_SUFFIX`.
# We want to keep the parts which are related to the ABI and remove the
# parts which are related to the platform:
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
# - mac: '.cpython-310-darwin.so' => cp310
# - win: '.cp310-win_amd64.pyd' => cp310
# - win: '.pyd' => cp37 (uses _cpython_abis())
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
else:
return []
return [_normalize_string(abi)]
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield f"py{_version_nodot(py_version[:2])}"
yield f"py{py_version[0]}"
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield f"py{_version_nodot((py_version[0], minor))}"
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(
version: Optional[MacVersion] = None, arch: Optional[str] = None
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
yield from _manylinux.platform_tags(linux, arch)
yield from _musllinux.platform_tags(arch)
yield linux
def _generic_platforms() -> Iterator[str]:
yield _normalize_string(sysconfig.get_platform())
def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp)
| 18,106 | Python | 32.102377 | 88 | 0.591958 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/metadata.py | import email.feedparser
import email.header
import email.message
import email.parser
import email.policy
import sys
import typing
from typing import Dict, List, Optional, Tuple, Union, cast
if sys.version_info >= (3, 8): # pragma: no cover
from typing import TypedDict
else: # pragma: no cover
if typing.TYPE_CHECKING:
from typing_extensions import TypedDict
else:
try:
from typing_extensions import TypedDict
except ImportError:
class TypedDict:
def __init_subclass__(*_args, **_kwargs):
pass
# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
class RawMetadata(TypedDict, total=False):
"""A dictionary of raw core metadata.
Each field in core metadata maps to a key of this dictionary (when data is
provided). The key is lower-case and underscores are used instead of dashes
compared to the equivalent core metadata field. Any core metadata field that
can be specified multiple times or can hold multiple values in a single
field have a key with a plural name.
Core metadata fields that can be specified multiple times are stored as a
list or dict depending on which is appropriate for the field. Any fields
which hold multiple values in a single field are stored as a list.
"""
# Metadata 1.0 - PEP 241
metadata_version: str
name: str
version: str
platforms: List[str]
summary: str
description: str
keywords: List[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: List[str]
download_url: str
classifiers: List[str]
requires: List[str]
provides: List[str]
obsoletes: List[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: List[str]
provides_dist: List[str]
obsoletes_dist: List[str]
requires_python: str
requires_external: List[str]
project_urls: Dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
# but got stuck without ever being able to build consensus on
# it and ultimately ended up withdrawn.
#
# However, a number of tools had started emiting METADATA with
# `2.0` Metadata-Version, so for historical reasons, this version
# was skipped.
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: List[str]
# Metadata 2.2 - PEP 643
dynamic: List[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
# tightened up to provide better interoptability.
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
}
_LIST_STRING_FIELDS = {
"classifiers",
"dynamic",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
}
def _parse_keywords(data: str) -> List[str]:
"""Split a string of comma-separate keyboards into a list of keywords."""
return [k.strip() for k in data.split(",")]
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
# Our logic is slightly tricky here as we want to try and do
# *something* reasonable with malformed data.
#
# The main thing that we have to worry about, is data that does
# not have a ',' at all to split the label from the Value. There
# isn't a singular right answer here, and we will fail validation
# later on (if the caller is validating) so it doesn't *really*
# matter, but since the missing value has to be an empty str
# and our return value is dict[str, str], if we let the key
# be the missing value, then they'd have multiple '' values that
# overwrite each other in a accumulating dict.
#
# The other potentional issue is that it's possible to have the
# same label multiple times in the metadata, with no solid "right"
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
# be case-preserving and case-insensitive, but doing that
# would open up more cases where we might have duplicate
# entries.
label, url = parts
if label in urls:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
if isinstance(source, str):
payload: str = msg.get_payload()
return payload
# If our source is a bytes, then we're managing the encoding and we need
# to deal with it.
else:
bpayload: bytes = msg.get_payload(decode=True)
try:
return bpayload.decode("utf8", "strict")
except UnicodeDecodeError:
raise ValueError("payload in an invalid encoding")
# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
}
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
"""Parse a distribution's metadata.
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
unparsed: Dict[str, List[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name)
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: List[Tuple[bytes, Optional[str]]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_STRING_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparseable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes))
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast(RawMetadata, raw), unparsed
| 16,397 | Python | 39.092909 | 88 | 0.631152 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_elffile.py | """
ELF file parser.
This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
import enum
import os
import struct
from typing import IO, Optional, Tuple
class ELFInvalid(ValueError):
pass
class EIClass(enum.IntEnum):
C32 = 1
C64 = 2
class EIData(enum.IntEnum):
Lsb = 1
Msb = 2
class EMachine(enum.IntEnum):
I386 = 3
S390 = 22
Arm = 40
X8664 = 62
AArc64 = 183
class ELFFile:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error:
raise ELFInvalid("unable to parse identification")
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
)
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> Optional[str]:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
| 3,266 | Python | 28.972477 | 86 | 0.527863 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_parser.py | """Handwritten parser of dependency specifiers.
The docstring for each __parse_* function contains ENBF-inspired grammar representing
the implementation.
"""
import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class Node:
def __init__(self, value: str) -> None:
self.value = value
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
# mypy does not support recursive type definition
# https://github.com/python/mypy/issues/731
MarkerAtom = Any
MarkerList = List[Any]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: List[str]
specifier: str
marker: Optional[MarkerList]
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if (
env_var == "platform_python_implementation"
or env_var == "python_implementation"
):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| 10,194 | Python | 27.799435 | 88 | 0.593388 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_meta.py | from ._compat import Protocol
from typing import Any, Dict, Iterator, List, TypeVar, Union
_T = TypeVar("_T")
class PackageMetadata(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def __iter__(self) -> Iterator[str]:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
@property
def json(self) -> Dict[str, Union[str, List[str]]]:
"""
A JSON-compatible form of the metadata.
"""
class SimplePath(Protocol[_T]):
"""
A minimal subset of pathlib.Path required by PathDistribution.
"""
def joinpath(self) -> _T:
... # pragma: no cover
def __truediv__(self, other: Union[str, _T]) -> _T:
... # pragma: no cover
@property
def parent(self) -> _T:
... # pragma: no cover
def read_text(self) -> str:
... # pragma: no cover
| 1,165 | Python | 22.32 | 76 | 0.535622 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_itertools.py | from itertools import filterfalse
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
# copied from more_itertools 8.8
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
| 2,068 | Python | 26.959459 | 79 | 0.558027 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_collections.py | import collections
# from jaraco.collections 3.3
class FreezableDefaultDict(collections.defaultdict):
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
class Pair(collections.namedtuple('Pair', 'name value')):
@classmethod
def parse(cls, text):
return cls(*map(str.strip, text.split("=", 1)))
| 743 | Python | 22.999999 | 65 | 0.620458 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_compat.py | import sys
import platform
__all__ = ['install', 'NullFinder', 'Protocol']
try:
from typing import Protocol
except ImportError: # pragma: no cover
# Python 3.7 compatibility
from ..typing_extensions import Protocol # type: ignore
def install(cls):
"""
Class decorator for installation on sys.meta_path.
Adds the backport DistributionFinder to sys.meta_path and
attempts to disable the finder functionality of the stdlib
DistributionFinder.
"""
sys.meta_path.append(cls())
disable_stdlib_finder()
return cls
def disable_stdlib_finder():
"""
Give the backport primacy for discovering path-based distributions
by monkey-patching the stdlib O_O.
See #91 for more background for rationale on this sketchy
behavior.
"""
def matches(finder):
return getattr(
finder, '__module__', None
) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
for finder in filter(matches, sys.meta_path): # pragma: nocover
del finder.find_distributions
class NullFinder:
"""
A "Finder" (aka "MetaClassFinder") that never finds any modules,
but may find distributions.
"""
@staticmethod
def find_spec(*args, **kwargs):
return None
# In Python 2, the import system requires finders
# to have a find_module() method, but this usage
# is deprecated in Python 3 in favor of find_spec().
# For the purposes of this finder (i.e. being present
# on sys.meta_path but having no other import
# system functionality), the two methods are identical.
find_module = find_spec
def pypy_partial(val):
"""
Adjust for variable stacklevel on partial under PyPy.
Workaround for #327.
"""
is_pypy = platform.python_implementation() == 'PyPy'
return val + is_pypy
| 1,859 | Python | 24.479452 | 83 | 0.667025 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/__init__.py | import os
import re
import abc
import csv
import sys
from .. import zipp
import email
import pathlib
import operator
import textwrap
import warnings
import functools
import itertools
import posixpath
import collections
from . import _adapters, _meta, _py39compat
from ._collections import FreezableDefaultDict, Pair
from ._compat import (
NullFinder,
install,
pypy_partial,
)
from ._functools import method_cache, pass_none
from ._itertools import always_iterable, unique_everseen
from ._meta import PackageMetadata, SimplePath
from contextlib import suppress
from importlib import import_module
from importlib.abc import MetaPathFinder
from itertools import starmap
from typing import List, Mapping, Optional
__all__ = [
'Distribution',
'DistributionFinder',
'PackageMetadata',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'packages_distributions',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
return f"No package metadata was found for {self.name}"
@property
def name(self):
(name,) = self.args
return name
class Sectioned:
"""
A simple entry point config parser for performance
>>> for item in Sectioned.read(Sectioned._sample):
... print(item)
Pair(name='sec1', value='# comments ignored')
Pair(name='sec1', value='a = 1')
Pair(name='sec1', value='b = 2')
Pair(name='sec2', value='a = 2')
>>> res = Sectioned.section_pairs(Sectioned._sample)
>>> item = next(res)
>>> item.name
'sec1'
>>> item.value
Pair(name='a', value='1')
>>> item = next(res)
>>> item.value
Pair(name='b', value='2')
>>> item = next(res)
>>> item.name
'sec2'
>>> item.value
Pair(name='a', value='2')
>>> list(res)
[]
"""
_sample = textwrap.dedent(
"""
[sec1]
# comments ignored
a = 1
b = 2
[sec2]
a = 2
"""
).lstrip()
@classmethod
def section_pairs(cls, text):
return (
section._replace(value=Pair.parse(section.value))
for section in cls.read(text, filter_=cls.valid)
if section.name is not None
)
@staticmethod
def read(text, filter_=None):
lines = filter(filter_, map(str.strip, text.splitlines()))
name = None
for value in lines:
section_match = value.startswith('[') and value.endswith(']')
if section_match:
name = value.strip('[]')
continue
yield Pair(name, value)
@staticmethod
def valid(line):
return line and not line.startswith('#')
class DeprecatedTuple:
"""
Provide subscript item access for backward compatibility.
>>> recwarn = getfixture('recwarn')
>>> ep = EntryPoint(name='name', value='value', group='group')
>>> ep[:]
('name', 'value', 'group')
>>> ep[0]
'name'
>>> len(recwarn)
1
"""
# Do not remove prior to 2023-05-01 or Python 3.13
_warn = functools.partial(
warnings.warn,
"EntryPoint tuple interface is deprecated. Access members by name.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
def __getitem__(self, item):
self._warn()
return self._key()[item]
class EntryPoint(DeprecatedTuple):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
>>> ep = EntryPoint(
... name=None, group=None, value='package.module:attr [extra1, extra2]')
>>> ep.module
'package.module'
>>> ep.attr
'attr'
>>> ep.extras
['extra1', 'extra2']
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+)\s*)?'
r'((?P<extras>\[.*\])\s*)?$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
name: str
value: str
group: str
dist: Optional['Distribution'] = None
def __init__(self, name, value, group):
vars(self).update(name=name, value=value, group=group)
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return re.findall(r'\w+', match.group('extras') or '')
def _for(self, dist):
vars(self).update(dist=dist)
return self
def matches(self, **params):
"""
EntryPoint matches the given parameters.
>>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
>>> ep.matches(group='foo')
True
>>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
True
>>> ep.matches(group='foo', name='other')
False
>>> ep.matches()
True
>>> ep.matches(extras=['extra1', 'extra2'])
True
>>> ep.matches(module='bing')
True
>>> ep.matches(attr='bong')
True
"""
attrs = (getattr(self, param) for param in params)
return all(map(operator.eq, params.values(), attrs))
def _key(self):
return self.name, self.value, self.group
def __lt__(self, other):
return self._key() < other._key()
def __eq__(self, other):
return self._key() == other._key()
def __setattr__(self, name, value):
raise AttributeError("EntryPoint objects are immutable.")
def __repr__(self):
return (
f'EntryPoint(name={self.name!r}, value={self.value!r}, '
f'group={self.group!r})'
)
def __hash__(self):
return hash(self._key())
class EntryPoints(tuple):
"""
An immutable collection of selectable EntryPoint objects.
"""
__slots__ = ()
def __getitem__(self, name): # -> EntryPoint:
"""
Get the EntryPoint in self matching name.
"""
try:
return next(iter(self.select(name=name)))
except StopIteration:
raise KeyError(name)
def select(self, **params):
"""
Select entry points from self that match the
given parameters (typically group and/or name).
"""
return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params))
@property
def names(self):
"""
Return the set of all names of all entry points.
"""
return {ep.name for ep in self}
@property
def groups(self):
"""
Return the set of all groups of all entry points.
"""
return {ep.group for ep in self}
@classmethod
def _from_text_for(cls, text, dist):
return cls(ep._for(dist) for ep in cls._from_text(text))
@staticmethod
def _from_text(text):
return (
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
for item in Sectioned.section_pairs(text or '')
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return f'<FileHash mode: {self.mode} value: {self.value}>'
class Distribution(metaclass=abc.ABCMeta):
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name: str):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
:raises ValueError: When an invalid value is supplied for name.
"""
if not name:
raise ValueError("A distribution name is required.")
try:
return next(cls.discover(name=name))
except StopIteration:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self) -> _meta.PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return _adapters.Message(email.message_from_string(text))
@property
def name(self):
"""Return the 'Name' metadata for the distribution package."""
return self.metadata['Name']
@property
def _normalized_name(self):
"""Return a normalized version of the name."""
return Prepared.normalize(self.name)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
@pass_none
def make_files(lines):
return list(starmap(make_file, csv.reader(lines)))
return make_files(self._read_files_distinfo() or self._read_files_egginfo())
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return pass_none(self._deps_from_requires_text)(source)
@classmethod
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and f'extra == "{name}"'
def quoted_marker(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
def url_req_space(req):
"""
PEP 508 requires a space between the url_spec and the quoted_marker.
Ref python/importlib_metadata#357.
"""
# '@' is uniquely indicative of a url_req.
return ' ' * ('@' in req)
for section in sections:
space = url_req_space(section.value)
yield section.value + space + quoted_marker(section.name)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The sequence of directory path that a distribution finder
should search.
Typically refers to Python installed package paths such as
"site-packages" directories and defaults to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
>>> FastPath('').children()
['...']
"""
@functools.lru_cache() # type: ignore
def __new__(cls, root):
return super().__new__(cls)
def __init__(self, root):
self.root = root
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '.')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
def search(self, name):
return self.lookup(self.mtime).search(name)
@property
def mtime(self):
with suppress(OSError):
return os.stat(self.root).st_mtime
self.lookup.cache_clear()
@method_cache
def lookup(self, mtime):
return Lookup(self)
class Lookup:
def __init__(self, path: FastPath):
base = os.path.basename(path.root).lower()
base_is_egg = base.endswith(".egg")
self.infos = FreezableDefaultDict(list)
self.eggs = FreezableDefaultDict(list)
for child in path.children():
low = child.lower()
if low.endswith((".dist-info", ".egg-info")):
# rpartition is faster than splitext and suitable for this purpose.
name = low.rpartition(".")[0].partition("-")[0]
normalized = Prepared.normalize(name)
self.infos[normalized].append(path.joinpath(child))
elif base_is_egg and low == "egg-info":
name = base.rpartition(".")[0].partition("-")[0]
legacy_normalized = Prepared.legacy_normalize(name)
self.eggs[legacy_normalized].append(path.joinpath(child))
self.infos.freeze()
self.eggs.freeze()
def search(self, prepared):
infos = (
self.infos[prepared.normalized]
if prepared
else itertools.chain.from_iterable(self.infos.values())
)
eggs = (
self.eggs[prepared.legacy_normalized]
if prepared
else itertools.chain.from_iterable(self.eggs.values())
)
return itertools.chain(infos, eggs)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = None
legacy_normalized = None
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = self.normalize(name)
self.legacy_normalized = self.legacy_normalize(name)
@staticmethod
def normalize(name):
"""
PEP 503 normalization plus dashes as underscores.
"""
return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
@staticmethod
def legacy_normalize(name):
"""
Normalize the package name as found in the convention in
older packaging tools versions and specs.
"""
return name.lower().replace('-', '_')
def __bool__(self):
return bool(self.name)
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
prepared = Prepared(name)
return itertools.chain.from_iterable(
path.search(prepared) for path in map(FastPath, paths)
)
def invalidate_caches(cls):
FastPath.__new__.cache_clear()
class PathDistribution(Distribution):
def __init__(self, path: SimplePath):
"""Construct a distribution.
:param path: SimplePath indicating the metadata directory.
"""
self._path = path
def read_text(self, filename):
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
@property
def _normalized_name(self):
"""
Performance optimization: where possible, resolve the
normalized name from the file system path.
"""
stem = os.path.basename(str(self._path))
return (
pass_none(Prepared.normalize)(self._name_from_stem(stem))
or super()._normalized_name
)
@staticmethod
def _name_from_stem(stem):
"""
>>> PathDistribution._name_from_stem('foo-3.0.egg-info')
'foo'
>>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info')
'CherryPy'
>>> PathDistribution._name_from_stem('face.egg-info')
'face'
>>> PathDistribution._name_from_stem('foo.bar')
"""
filename, ext = os.path.splitext(stem)
if ext not in ('.dist-info', '.egg-info'):
return
name, sep, rest = filename.partition('-')
return name
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name) -> _meta.PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
_unique = functools.partial(
unique_everseen,
key=_py39compat.normalized_name,
)
"""
Wrapper for ``distributions`` to return unique distributions by name.
"""
def entry_points(**params) -> EntryPoints:
"""Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
:return: EntryPoints for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in _unique(distributions())
)
return EntryPoints(eps).select(**params)
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
def packages_distributions() -> Mapping[str, List[str]]:
"""
Return a mapping of top-level packages to their
distributions.
>>> import collections.abc
>>> pkgs = packages_distributions()
>>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
True
"""
pkg_to_dist = collections.defaultdict(list)
for dist in distributions():
for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
pkg_to_dist[pkg].append(dist.metadata['Name'])
return dict(pkg_to_dist)
def _top_level_declared(dist):
return (dist.read_text('top_level.txt') or '').split()
def _top_level_inferred(dist):
return {
f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
for f in always_iterable(dist.files)
if f.suffix == ".py"
}
| 26,498 | Python | 28.280663 | 88 | 0.606272 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_adapters.py | import functools
import warnings
import re
import textwrap
import email.message
from ._text import FoldedCase
from ._compat import pypy_partial
# Do not remove prior to 2024-01-01 or Python 3.14
_warn = functools.partial(
warnings.warn,
"Implicit None on return values is deprecated and will raise KeyErrors.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
class Message(email.message.Message):
multiple_use_keys = set(
map(
FoldedCase,
[
'Classifier',
'Obsoletes-Dist',
'Platform',
'Project-URL',
'Provides-Dist',
'Provides-Extra',
'Requires-Dist',
'Requires-External',
'Supported-Platform',
'Dynamic',
],
)
)
"""
Keys that may be indicated multiple times per PEP 566.
"""
def __new__(cls, orig: email.message.Message):
res = super().__new__(cls)
vars(res).update(vars(orig))
return res
def __init__(self, *args, **kwargs):
self._headers = self._repair_headers()
# suppress spurious error from mypy
def __iter__(self):
return super().__iter__()
def __getitem__(self, item):
"""
Warn users that a ``KeyError`` can be expected when a
mising key is supplied. Ref python/importlib_metadata#371.
"""
res = super().__getitem__(item)
if res is None:
_warn()
return res
def _repair_headers(self):
def redent(value):
"Correct for RFC822 indentation"
if not value or '\n' not in value:
return value
return textwrap.dedent(' ' * 8 + value)
headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
if self._payload:
headers.append(('Description', self.get_payload()))
return headers
@property
def json(self):
"""
Convert PackageMetadata to a JSON-compatible format
per PEP 0566.
"""
def transform(key):
value = self.get_all(key) if key in self.multiple_use_keys else self[key]
if key == 'Keywords':
value = re.split(r'\s+', value)
tk = key.lower().replace('-', '_')
return tk, value
return dict(map(transform, map(FoldedCase, self)))
| 2,454 | Python | 25.978022 | 85 | 0.537897 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_functools.py | import types
import functools
# from jaraco.functools 3.3
def method_cache(method, cache_wrapper=None):
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
cache_wrapper = cache_wrapper or functools.lru_cache()
def wrapper(self, *args, **kwargs):
# it's the first call, replace the method with a cached, bound method
bound_method = types.MethodType(method, self)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None
return wrapper
# From jaraco.functools 3.3
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
| 2,895 | Python | 26.580952 | 88 | 0.62867 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_py39compat.py | """
Compatibility layer with Python 3.8/3.9
"""
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING: # pragma: no cover
# Prevent circular imports on runtime.
from . import Distribution, EntryPoint
else:
Distribution = EntryPoint = Any
def normalized_name(dist: Distribution) -> Optional[str]:
"""
Honor name normalization for distributions that don't provide ``_normalized_name``.
"""
try:
return dist._normalized_name
except AttributeError:
from . import Prepared # -> delay to prevent circular imports.
return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
def ep_matches(ep: EntryPoint, **params) -> bool:
"""
Workaround for ``EntryPoint`` objects without the ``matches`` method.
"""
try:
return ep.matches(**params)
except AttributeError:
from . import EntryPoint # -> delay to prevent circular imports.
# Reconstruct the EntryPoint object to make sure it is compatible.
return EntryPoint(ep.name, ep.value, ep.group).matches(**params)
| 1,098 | Python | 29.527777 | 87 | 0.672131 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_text.py | import re
from ._functools import method_cache
# from jaraco.text 3.5
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use in_:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
| 2,166 | Python | 20.67 | 62 | 0.576639 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/abc.py | import abc
import io
import itertools
import pathlib
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from ._compat import runtime_checkable, Protocol, StrPath
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta):
"""Abstract base class for loaders to provide resource reading support."""
@abc.abstractmethod
def open_resource(self, resource: Text) -> BinaryIO:
"""Return an opened, file-like object for binary reading.
The 'resource' argument is expected to represent only a file name.
If the resource cannot be found, FileNotFoundError is raised.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def resource_path(self, resource: Text) -> Text:
"""Return the file system path to the specified resource.
The 'resource' argument is expected to represent only a file name.
If the resource does not exist on the file system, raise
FileNotFoundError.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def is_resource(self, path: Text) -> bool:
"""Return True if the named 'path' is a resource.
Files are resources, directories are not.
"""
raise FileNotFoundError
@abc.abstractmethod
def contents(self) -> Iterable[str]:
"""Return an iterable of entries in `package`."""
raise FileNotFoundError
class TraversalError(Exception):
pass
@runtime_checkable
class Traversable(Protocol):
"""
An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
"""
@abc.abstractmethod
def iterdir(self) -> Iterator["Traversable"]:
"""
Yield Traversable objects in self
"""
def read_bytes(self) -> bytes:
"""
Read contents of self as bytes
"""
with self.open('rb') as strm:
return strm.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""
Read contents of self as text
"""
with self.open(encoding=encoding) as strm:
return strm.read()
@abc.abstractmethod
def is_dir(self) -> bool:
"""
Return True if self is a directory
"""
@abc.abstractmethod
def is_file(self) -> bool:
"""
Return True if self is a file
"""
def joinpath(self, *descendants: StrPath) -> "Traversable":
"""
Return Traversable resolved with any descendants applied.
Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
if not descendants:
return self
names = itertools.chain.from_iterable(
path.parts for path in map(pathlib.PurePosixPath, descendants)
)
target = next(names)
matches = (
traversable for traversable in self.iterdir() if traversable.name == target
)
try:
match = next(matches)
except StopIteration:
raise TraversalError(
"Target not found during traversal.", target, list(names)
)
return match.joinpath(*names)
def __truediv__(self, child: StrPath) -> "Traversable":
"""
Return Traversable child in self
"""
return self.joinpath(child)
@abc.abstractmethod
def open(self, mode='r', *args, **kwargs):
"""
mode may be 'r' or 'rb' to open as text or binary. Return a handle
suitable for reading (same as pathlib.Path.open).
When opening as text, accepts encoding parameters such as those
accepted by io.TextIOWrapper.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
The base name of this object without any parent references.
"""
class TraversableResources(ResourceReader):
"""
The required interface for providing traversable
resources.
"""
@abc.abstractmethod
def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package."""
def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().joinpath(resource).open('rb')
def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource)
def is_resource(self, path: StrPath) -> bool:
return self.files().joinpath(path).is_file()
def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir())
| 5,140 | Python | 29.064327 | 87 | 0.628405 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_itertools.py | from itertools import filterfalse
from typing import (
Callable,
Iterable,
Iterator,
Optional,
Set,
TypeVar,
Union,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def unique_everseen(
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
) -> Iterator[_T]:
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen: Set[Union[_T, _U]] = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 884 | Python | 23.583333 | 78 | 0.580317 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_compat.py | # flake8: noqa
import abc
import os
import sys
import pathlib
from contextlib import suppress
from typing import Union
if sys.version_info >= (3, 10):
from zipfile import Path as ZipPath # type: ignore
else:
from ..zipp import Path as ZipPath # type: ignore
try:
from typing import runtime_checkable # type: ignore
except ImportError:
def runtime_checkable(cls): # type: ignore
return cls
try:
from typing import Protocol # type: ignore
except ImportError:
Protocol = abc.ABC # type: ignore
class TraversableResourcesLoader:
"""
Adapt loaders to provide TraversableResources and other
compatibility.
Used primarily for Python 3.9 and earlier where the native
loaders do not yet implement TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
@property
def path(self):
return self.spec.origin
def get_resource_reader(self, name):
from . import readers, _adapters
def _zip_reader(spec):
with suppress(AttributeError):
return readers.ZipReader(spec.loader, spec.name)
def _namespace_reader(spec):
with suppress(AttributeError, ValueError):
return readers.NamespaceReader(spec.submodule_search_locations)
def _available_reader(spec):
with suppress(AttributeError):
return spec.loader.get_resource_reader(spec.name)
def _native_reader(spec):
reader = _available_reader(spec)
return reader if hasattr(reader, 'files') else None
def _file_reader(spec):
try:
path = pathlib.Path(self.path)
except TypeError:
return None
if path.exists():
return readers.FileReader(self)
return (
# native reader if it supplies 'files'
_native_reader(self.spec)
or
# local ZipReader if a zip module
_zip_reader(self.spec)
or
# local NamespaceReader if a namespace module
_namespace_reader(self.spec)
or
# local FileReader
_file_reader(self.spec)
# fallback - adapt the spec ResourceReader to TraversableReader
or _adapters.CompatibilityFiles(self.spec)
)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
from above for older Python compatibility (<3.10).
"""
from . import _adapters
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
if sys.version_info >= (3, 9):
StrPath = Union[str, os.PathLike[str]]
else:
# PathLike is only subscriptable at runtime in 3.9+
StrPath = Union[str, "os.PathLike[str]"]
| 2,925 | Python | 25.844036 | 84 | 0.627009 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/__init__.py | """Read resources contained within a package."""
from ._common import (
as_file,
files,
Package,
)
from ._legacy import (
contents,
open_binary,
read_binary,
open_text,
read_text,
is_resource,
path,
Resource,
)
from .abc import ResourceReader
__all__ = [
'Package',
'Resource',
'ResourceReader',
'as_file',
'contents',
'files',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
]
| 506 | Python | 12.702702 | 48 | 0.55336 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_common.py | import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable
from ._compat import wrap_spec
Package = Union[types.ModuleType, str]
Anchor = Package
def package_to_anchor(func):
"""
Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
"""
undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
@package_to_anchor
def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
"""
Return the package's loader if it's a ResourceReader.
"""
# We can't use
# a issubclass() check here because apparently abc.'s __subclasscheck__()
# hook wants to create a weak reference to the object, but
# zipimport.zipimporter does not support weak references, resulting in a
# TypeError. That seems terrible.
spec = package.__spec__
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
if reader is None:
return None
return reader(spec.name) # type: ignore
@functools.singledispatch
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cast(types.ModuleType, cand)
@resolve.register
def _(cand: str) -> types.ModuleType:
return importlib.import_module(cand)
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
"""
Walk the stack and find the frame of the first caller not in this module.
"""
def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package: types.ModuleType):
"""
Return a Traversable object for the given package.
"""
spec = wrap_spec(package)
reader = spec.loader.get_resource_reader(spec.name)
return reader.files()
@contextlib.contextmanager
def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows
# properly.
fd, raw_path = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
yield pathlib.Path(raw_path)
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch
def as_file(path):
"""
Given a Traversable object, return that object as a
path on the local file system in a context manager.
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path)
@contextlib.contextmanager
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.write_bytes(source.read_bytes())
return child
| 5,457 | Python | 25.240384 | 79 | 0.658604 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_legacy.py | import functools
import os
import pathlib
import types
import warnings
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
from . import _common
Package = Union[types.ModuleType, str]
Resource = str
def deprecated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated. Use files() instead. "
"Refer to https://importlib-resources.readthedocs.io"
"/en/latest/using.html#migrating-from-legacy for migration advice.",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
def normalize_path(path: Any) -> str:
"""Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised.
"""
str_path = str(path)
parent, file_name = os.path.split(str_path)
if parent:
raise ValueError(f'{path!r} must be only a file name')
return file_name
@deprecated
def open_binary(package: Package, resource: Resource) -> BinaryIO:
"""Return a file-like object opened for binary reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open('rb')
@deprecated
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
return (_common.files(package) / normalize_path(resource)).read_bytes()
@deprecated
def open_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open(
'r', encoding=encoding, errors=errors
)
@deprecated
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read()
@deprecated
def contents(package: Package) -> Iterable[str]:
"""Return an iterable of entries in `package`.
Note that not all entries are resources. Specifically, directories are
not considered resources. Use `is_resource()` on each entry returned here
to check if it is a resource or not.
"""
return [path.name for path in _common.files(package).iterdir()]
@deprecated
def is_resource(package: Package, name: str) -> bool:
"""True if `name` is a resource inside `package`.
Directories are *not* resources.
"""
resource = normalize_path(name)
return any(
traversable.name == resource and traversable.is_file()
for traversable in _common.files(package).iterdir()
)
@deprecated
def path(
package: Package,
resource: Resource,
) -> ContextManager[pathlib.Path]:
"""A context manager providing a file path object to the resource.
If the resource does not already exist on its own on the file system,
a temporary file will be created. If the file was created, the file
will be deleted upon exiting the context manager (no exception is
raised if the file was deleted prior to the context manager
exiting).
"""
return _common.as_file(_common.files(package) / normalize_path(resource))
| 3,481 | Python | 27.776859 | 80 | 0.673657 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_adapters.py | from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec)._native()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def _native(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
| 4,504 | Python | 25.345029 | 87 | 0.571714 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/simple.py | """
Interface adapters for low-level readers.
"""
import abc
import io
import itertools
from typing import BinaryIO, List
from .abc import Traversable, TraversableResources
class SimpleReader(abc.ABC):
"""
The minimum, low-level interface required from a resource
provider.
"""
@property
@abc.abstractmethod
def package(self) -> str:
"""
The name of the package for which this reader loads resources.
"""
@abc.abstractmethod
def children(self) -> List['SimpleReader']:
"""
Obtain an iterable of SimpleReader for available
child containers (e.g. directories).
"""
@abc.abstractmethod
def resources(self) -> List[str]:
"""
Obtain available named resources for this virtual package.
"""
@abc.abstractmethod
def open_binary(self, resource: str) -> BinaryIO:
"""
Obtain a File-like for a named resource.
"""
@property
def name(self):
return self.package.split('.')[-1]
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader: SimpleReader):
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
class ResourceHandle(Traversable):
"""
Handle to a named resource in a ResourceReader.
"""
def __init__(self, parent: ResourceContainer, name: str):
self.parent = parent
self.name = name # type: ignore
def is_file(self):
return True
def is_dir(self):
return False
def open(self, mode='r', *args, **kwargs):
stream = self.parent.reader.open_binary(self.name)
if 'b' not in mode:
stream = io.TextIOWrapper(*args, **kwargs)
return stream
def joinpath(self, name):
raise RuntimeError("Cannot traverse into a resource")
class TraversableReader(TraversableResources, SimpleReader):
"""
A TraversableResources based on SimpleReader. Resource providers
may derive from this class to provide the TraversableResources
interface by supplying the SimpleReader interface.
"""
def files(self):
return ResourceContainer(self)
| 2,576 | Python | 23.084112 | 78 | 0.63354 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/readers.py | import collections
import pathlib
import operator
from . import abc
from ._itertools import unique_everseen
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
# workaround for `zipfile.Path.is_file` returning true
# for non-existent paths.
target = self.files().joinpath(path)
return target.is_file() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
files = (file for path in self._paths for file in path.iterdir())
return unique_everseen(files, key=operator.attrgetter('name'))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def is_file(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError:
# One of the paths did not resolve (a directory does not exist).
# Just return something that will not exist.
return self._paths[0].joinpath(*descendants)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
| 3,581 | Python | 28.603306 | 81 | 0.63055 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/functools.py | import functools
import time
import inspect
import collections
import types
import itertools
import warnings
import setuptools.extern.more_itertools
from typing import Callable, TypeVar
CallableT = TypeVar("CallableT", bound=Callable[..., object])
def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
>>> strip_and_dedent(compose.__doc__) == expected
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs)
def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper
def method_cache(
method: CallableT,
cache_wrapper: Callable[
[CallableT], CallableT
] = functools.lru_cache(), # type: ignore[assignment]
) -> CallableT:
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
def wrapper(self: object, *args: object, **kwargs: object) -> object:
# it's the first call, replace the method with a cached, bound method
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
method, self
)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
return ( # type: ignore[return-value]
_special_method_cache(method, cache_wrapper) or wrapper
)
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
def apply(transform):
"""
Decorate a function with a transform function that is
invoked on results returned from the decorated function.
>>> @apply(reversed)
... def get_numbers(start):
... "doc for get_numbers"
... return range(start, start+3)
>>> list(get_numbers(4))
[6, 5, 4]
>>> get_numbers.__doc__
'doc for get_numbers'
"""
def wrap(func):
return functools.wraps(func)(compose(transform, func))
return wrap
def result_invoke(action):
r"""
Decorate a function with an action function that is
invoked on the results returned from the decorated
function (for its side-effect), then return the original
result.
>>> @result_invoke(print)
... def add_two(a, b):
... return a + b
>>> x = add_two(2, 3)
5
>>> x
5
"""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
action(result)
return result
return wrapper
return wrap
def invoke(f, *args, **kwargs):
"""
Call a function for its side effect after initialization.
The benefit of using the decorator instead of simply invoking a function
after defining it is that it makes explicit the author's intent for the
function to be called immediately. Whereas if one simply calls the
function immediately, it's less obvious if that was intentional or
incidental. It also avoids repeating the name - the two actions, defining
the function and calling it immediately are modeled separately, but linked
by the decorator construct.
The benefit of having a function construct (opposed to just invoking some
behavior inline) is to serve as a scope in which the behavior occurs. It
avoids polluting the global namespace with local variables, provides an
anchor on which to attach documentation (docstring), keeps the behavior
logically separated (instead of conceptually separated or not separated at
all), and provides potential to re-use the behavior for testing or other
purposes.
This function is named as a pithy way to communicate, "call this function
primarily for its side effect", or "while defining this function, also
take it aside and call it". It exists because there's no Python construct
for "define and call" (nor should there be, as decorators serve this need
just fine). The behavior happens immediately and synchronously.
>>> @invoke
... def func(): print("called")
called
>>> func()
called
Use functools.partial to pass parameters to the initial call
>>> @functools.partial(invoke, name='bingo')
... def func(name): print("called with", name)
called with bingo
"""
f(*args, **kwargs)
return f
def call_aside(*args, **kwargs):
"""
Deprecated name for invoke.
"""
warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning)
return invoke(*args, **kwargs)
class Throttler:
"""
Rate-limit a function (or other callable)
"""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
self._wait()
return self.func(*args, **kwargs)
def _wait(self):
"ensure at least 1/max_rate seconds from last call"
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
def __get__(self, obj, type=None):
return first_invoke(self._wait, functools.partial(self.func, obj))
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = itertools.count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func()
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results)
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns)
def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate
| 15,053 | Python | 26.02693 | 88 | 0.613433 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/context.py | import os
import subprocess
import contextlib
import functools
import tempfile
import shutil
import operator
import warnings
@contextlib.contextmanager
def pushd(dir):
"""
>>> tmp_path = getfixture('tmp_path')
>>> with pushd(tmp_path):
... assert os.getcwd() == os.fspath(tmp_path)
>>> assert os.getcwd() != os.fspath(tmp_path)
"""
orig = os.getcwd()
os.chdir(dir)
try:
yield dir
finally:
os.chdir(orig)
@contextlib.contextmanager
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
"""
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
"""
if target_dir is None:
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
if runner is None:
runner = functools.partial(subprocess.check_call, shell=True)
else:
warnings.warn("runner parameter is deprecated", DeprecationWarning)
# In the tar command, use --strip-components=1 to strip the first path and
# then
# use -C to cause the files to be extracted to {target_dir}. This ensures
# that we always know where the files were extracted.
runner('mkdir {target_dir}'.format(**vars()))
try:
getter = 'wget {url} -O -'
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
cmd = ' | '.join((getter, extract))
runner(cmd.format(compression=infer_compression(url), **vars()))
with pushd(target_dir):
yield target_dir
finally:
runner('rm -Rf {target_dir}'.format(**vars()))
def infer_compression(url):
"""
Given a URL or filename, infer the compression code for tar.
>>> infer_compression('http://foo/bar.tar.gz')
'z'
>>> infer_compression('http://foo/bar.tgz')
'z'
>>> infer_compression('file.bz')
'j'
>>> infer_compression('file.xz')
'J'
"""
# cheat and just assume it's the last two characters
compression_indicator = url[-2:]
mapping = dict(gz='z', bz='j', xz='J')
# Assume 'z' (gzip) if no match
return mapping.get(compression_indicator, 'z')
@contextlib.contextmanager
def temp_dir(remover=shutil.rmtree):
"""
Create a temporary directory context. Pass a custom remover
to override the removal behavior.
>>> import pathlib
>>> with temp_dir() as the_dir:
... assert os.path.isdir(the_dir)
... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents')
>>> assert not os.path.exists(the_dir)
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
remover(temp_dir)
@contextlib.contextmanager
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
"""
Check out the repo indicated by url.
If dest_ctx is supplied, it should be a context manager
to yield the target directory for the check out.
"""
exe = 'git' if 'git' in url else 'hg'
with dest_ctx() as repo_dir:
cmd = [exe, 'clone', url, repo_dir]
if branch:
cmd.extend(['--branch', branch])
devnull = open(os.path.devnull, 'w')
stdout = devnull if quiet else None
subprocess.check_call(cmd, stdout=stdout)
yield repo_dir
@contextlib.contextmanager
def null():
"""
A null context suitable to stand in for a meaningful context.
>>> with null() as value:
... assert value is None
"""
yield
class ExceptionTrap:
"""
A context manager that will catch certain exceptions and provide an
indication they occurred.
>>> with ExceptionTrap() as trap:
... raise Exception()
>>> bool(trap)
True
>>> with ExceptionTrap() as trap:
... pass
>>> bool(trap)
False
>>> with ExceptionTrap(ValueError) as trap:
... raise ValueError("1 + 1 is not 3")
>>> bool(trap)
True
>>> trap.value
ValueError('1 + 1 is not 3')
>>> trap.tb
<traceback object at ...>
>>> with ExceptionTrap(ValueError) as trap:
... raise Exception()
Traceback (most recent call last):
...
Exception
>>> bool(trap)
False
"""
exc_info = None, None, None
def __init__(self, exceptions=(Exception,)):
self.exceptions = exceptions
def __enter__(self):
return self
@property
def type(self):
return self.exc_info[0]
@property
def value(self):
return self.exc_info[1]
@property
def tb(self):
return self.exc_info[2]
def __exit__(self, *exc_info):
type = exc_info[0]
matches = type and issubclass(type, self.exceptions)
if matches:
self.exc_info = exc_info
return matches
def __bool__(self):
return bool(self.type)
def raises(self, func, *, _test=bool):
"""
Wrap func and replace the result with the truth
value of the trap (True if an exception occurred).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> raises = ExceptionTrap(ValueError).raises
Now decorate a function that always fails.
>>> @raises
... def fail():
... raise ValueError('failed')
>>> fail()
True
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with ExceptionTrap(self.exceptions) as trap:
func(*args, **kwargs)
return _test(trap)
return wrapper
def passes(self, func):
"""
Wrap func and replace the result with the truth
value of the trap (True if no exception).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> passes = ExceptionTrap(ValueError).passes
Now decorate a function that always fails.
>>> @passes
... def fail():
... raise ValueError('failed')
>>> fail()
False
"""
return self.raises(func, _test=operator.not_)
class suppress(contextlib.suppress, contextlib.ContextDecorator):
"""
A version of contextlib.suppress with decorator support.
>>> @suppress(KeyError)
... def key_error():
... {}['']
>>> key_error()
"""
class on_interrupt(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(
self,
action='error',
# py3.7 compat
# /,
code=1,
):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'
| 7,460 | Python | 24.816609 | 85 | 0.586863 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/text/__init__.py | import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from setuptools.extern.importlib_resources import files # type: ignore
from setuptools.extern.jaraco.functools import compose, method_cache
from setuptools.extern.jaraco.context import ExceptionTrap
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
# Python 3.8 compatibility
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
@_unicode_trap.passes
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
value.decode()
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
def _nonblank(str):
return str and not str.startswith('#')
@functools.singledispatch
def yield_lines(iterable):
r"""
Yield valid lines of a string or iterable.
>>> list(yield_lines(''))
[]
>>> list(yield_lines(['foo', 'bar']))
['foo', 'bar']
>>> list(yield_lines('foo\nbar'))
['foo', 'bar']
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
['foo', 'baz #comment']
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
['foo', 'bar', 'baz', 'bing']
"""
return itertools.chain.from_iterable(map(yield_lines, iterable))
@yield_lines.register(str)
def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
def drop_comment(line):
"""
Drop comments.
>>> drop_comment('foo # bar')
'foo'
A hash without a space may be in a URL.
>>> drop_comment('http://example.com/foo#bar')
'http://example.com/foo#bar'
"""
return line.partition(' #')[0]
def join_continuation(lines):
r"""
Join lines continued by a trailing backslash.
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
['foobarbaz']
Not sure why, but...
The character preceeding the backslash is also elided.
>>> list(join_continuation(['goo\\', 'dly']))
['godly']
A terrible idea, but...
If no line is available to continue, suppress the lines.
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
['foo']
"""
lines = iter(lines)
for item in lines:
while item.endswith('\\'):
try:
item = item[:-2].strip() + next(lines)
except StopIteration:
return
yield item
| 15,517 | Python | 24.863333 | 77 | 0.598634 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_types.py | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from typing import Any, Callable, Tuple
# Type annotations
ParseFloat = Callable[[str], Any]
Key = Tuple[str, ...]
Pos = int
| 254 | Python | 22.181816 | 48 | 0.748031 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/__init__.py | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
__all__ = ("loads", "load", "TOMLDecodeError")
__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
from ._parser import TOMLDecodeError, load, loads
# Pretend this exception was created here.
TOMLDecodeError.__module__ = __name__
| 396 | Python | 32.083331 | 87 | 0.729798 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_re.py | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any
from ._types import ParseFloat
# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
RE_NUMBER = re.compile(
r"""
0
(?:
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
b[01](?:_?[01])* # bin
|
o[0-7](?:_?[0-7])* # oct
)
|
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
(?P<floatpart>
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
)
""",
flags=re.VERBOSE,
)
RE_LOCALTIME = re.compile(_TIME_RE_STR)
RE_DATETIME = re.compile(
rf"""
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
(?:
[Tt ]
{_TIME_RE_STR}
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
)?
""",
flags=re.VERBOSE,
)
def match_to_datetime(match: re.Match) -> datetime | date:
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
Raises ValueError if the match does not correspond to a valid date
or datetime.
"""
(
year_str,
month_str,
day_str,
hour_str,
minute_str,
sec_str,
micros_str,
zulu_time,
offset_sign_str,
offset_hour_str,
offset_minute_str,
) = match.groups()
year, month, day = int(year_str), int(month_str), int(day_str)
if hour_str is None:
return date(year, month, day)
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
if offset_sign_str:
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
)
elif zulu_time:
tz = timezone.utc
else: # local date-time
tz = None
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
sign = 1 if sign_str == "+" else -1
return timezone(
timedelta(
hours=sign * int(hour_str),
minutes=sign * int(minute_str),
)
)
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
return parse_float(match.group())
return int(match.group(), 0)
| 2,943 | Python | 26.259259 | 87 | 0.561672 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_parser.py | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from collections.abc import Iterable
import string
from types import MappingProxyType
from typing import Any, BinaryIO, NamedTuple
from ._re import (
RE_DATETIME,
RE_LOCALTIME,
RE_NUMBER,
match_to_datetime,
match_to_localtime,
match_to_number,
)
from ._types import Key, ParseFloat, Pos
ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
# Neither of these sets include quotation mark or backslash. They are
# currently handled as separate cases in the parser functions.
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
TOML_WS = frozenset(" \t")
TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
HEXDIGIT_CHARS = frozenset(string.hexdigits)
BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
{
"\\b": "\u0008", # backspace
"\\t": "\u0009", # tab
"\\n": "\u000A", # linefeed
"\\f": "\u000C", # form feed
"\\r": "\u000D", # carriage return
'\\"': "\u0022", # quote
"\\\\": "\u005C", # backslash
}
)
class TOMLDecodeError(ValueError):
"""An error raised if a document is not valid TOML."""
def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = __fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float)
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = __s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
class Flags:
"""Flags that map to parsed keys/namespaces."""
# Marks an immutable namespace (inline array or inline table).
FROZEN = 0
# Marks a nest that has been explicitly created and can no longer
# be opened using the "[table]" syntax.
EXPLICIT_NEST = 1
def __init__(self) -> None:
self._flags: dict[str, dict] = {}
self._pending_flags: set[tuple[Key, int]] = set()
def add_pending(self, key: Key, flag: int) -> None:
self._pending_flags.add((key, flag))
def finalize_pending(self) -> None:
for key, flag in self._pending_flags:
self.set(key, flag, recursive=False)
self._pending_flags.clear()
def unset_all(self, key: Key) -> None:
cont = self._flags
for k in key[:-1]:
if k not in cont:
return
cont = cont[k]["nested"]
cont.pop(key[-1], None)
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
cont = self._flags
key_parent, key_stem = key[:-1], key[-1]
for k in key_parent:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
if key_stem not in cont:
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
def is_(self, key: Key, flag: int) -> bool:
if not key:
return False # document root has no flags
cont = self._flags
for k in key[:-1]:
if k not in cont:
return False
inner_cont = cont[k]
if flag in inner_cont["recursive_flags"]:
return True
cont = inner_cont["nested"]
key_stem = key[-1]
if key_stem in cont:
cont = cont[key_stem]
return flag in cont["flags"] or flag in cont["recursive_flags"]
return False
class NestedDict:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: dict[str, Any] = {}
def get_or_create_nest(
self,
key: Key,
*,
access_lists: bool = True,
) -> dict:
cont: Any = self.dict
for k in key:
if k not in cont:
cont[k] = {}
cont = cont[k]
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:-1])
last_key = key[-1]
if last_key in cont:
list_ = cont[last_key]
if not isinstance(list_, list):
raise KeyError("An object other than list found behind this key")
list_.append({})
else:
cont[last_key] = [{}]
class Output(NamedTuple):
data: NestedDict
flags: Flags
def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
try:
while src[pos] in chars:
pos += 1
except IndexError:
pass
return pos
def skip_until(
src: str,
pos: Pos,
expect: str,
*,
error_on: frozenset[str],
error_on_eof: bool,
) -> Pos:
try:
new_pos = src.index(expect, pos)
except ValueError:
new_pos = len(src)
if error_on_eof:
raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
if not error_on.isdisjoint(src[pos:new_pos]):
while src[pos] not in error_on:
pos += 1
raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
return new_pos
def skip_comment(src: str, pos: Pos) -> Pos:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char == "#":
return skip_until(
src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
)
return pos
def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
while True:
pos_before_skip = pos
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
pos = skip_comment(src, pos)
if pos == pos_before_skip:
return pos
def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 1 # Skip "["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot declare {key} twice")
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.get_or_create_nest(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if not src.startswith("]", pos):
raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
return pos + 1, key
def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 2 # Skip "[["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
# Free the namespace now that it points to another empty list item...
out.flags.unset_all(key)
# ...but this key precisely is still prohibited from table declaration
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.append_nest_to_list(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if not src.startswith("]]", pos):
raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
return pos + 2, key
def key_value_rule(
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
) -> Pos:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
abs_key_parent = header + key_parent
relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
for cont_key in relative_path_cont_keys:
# Check that dotted key syntax does not redefine an existing table
if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
# Containers in the relative path can't be opened with the table syntax or
# dotted key/value syntax in following table sections.
out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
if out.flags.is_(abs_key_parent, Flags.FROZEN):
raise suffixed_err(
src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
)
try:
nest = out.data.get_or_create_nest(abs_key_parent)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if key_stem in nest:
raise suffixed_err(src, pos, "Cannot overwrite a value")
# Mark inline table and array namespaces recursively immutable
if isinstance(value, (dict, list)):
out.flags.set(header + key, Flags.FROZEN, recursive=True)
nest[key_stem] = value
return pos
def parse_key_value_pair(
src: str, pos: Pos, parse_float: ParseFloat
) -> tuple[Pos, Key, Any]:
pos, key = parse_key(src, pos)
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != "=":
raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, value = parse_value(src, pos, parse_float)
return pos, key, value
def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
pos, key_part = parse_key_part(src, pos)
key: Key = (key_part,)
pos = skip_chars(src, pos, TOML_WS)
while True:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != ".":
return pos, key
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, key_part = parse_key_part(src, pos)
key += (key_part,)
pos = skip_chars(src, pos, TOML_WS)
def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char in BARE_KEY_CHARS:
start_pos = pos
pos = skip_chars(src, pos, BARE_KEY_CHARS)
return pos, src[start_pos:pos]
if char == "'":
return parse_literal_str(src, pos)
if char == '"':
return parse_one_line_basic_str(src, pos)
raise suffixed_err(src, pos, "Invalid initial character for a key part")
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1
return parse_basic_str(src, pos, multiline=False)
def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
pos += 1
array: list = []
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
while True:
pos, val = parse_value(src, pos, parse_float)
array.append(val)
pos = skip_comments_and_array_ws(src, pos)
c = src[pos : pos + 1]
if c == "]":
return pos + 1, array
if c != ",":
raise suffixed_err(src, pos, "Unclosed array")
pos += 1
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
pos += 1
nested_dict = NestedDict()
flags = Flags()
pos = skip_chars(src, pos, TOML_WS)
if src.startswith("}", pos):
return pos + 1, nested_dict.dict
while True:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
if flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
try:
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if key_stem in nest:
raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
nest[key_stem] = value
pos = skip_chars(src, pos, TOML_WS)
c = src[pos : pos + 1]
if c == "}":
return pos + 1, nested_dict.dict
if c != ",":
raise suffixed_err(src, pos, "Unclosed inline table")
if isinstance(value, (dict, list)):
flags.set(key, Flags.FROZEN, recursive=True)
pos += 1
pos = skip_chars(src, pos, TOML_WS)
def parse_basic_str_escape(
src: str, pos: Pos, *, multiline: bool = False
) -> tuple[Pos, str]:
escape_id = src[pos : pos + 2]
pos += 2
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
# Skip whitespace until next non-whitespace character or end of
# the doc. Error if non-whitespace is found before newline.
if escape_id != "\\\n":
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
return pos, ""
if char != "\n":
raise suffixed_err(src, pos, "Unescaped '\\' in a string")
pos += 1
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
return pos, ""
if escape_id == "\\u":
return parse_hex_char(src, pos, 4)
if escape_id == "\\U":
return parse_hex_char(src, pos, 8)
try:
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
except KeyError:
raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
return parse_basic_str_escape(src, pos, multiline=True)
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
hex_str = src[pos : pos + hex_len]
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
raise suffixed_err(src, pos, "Invalid hex value")
pos += hex_len
hex_int = int(hex_str, 16)
if not is_unicode_scalar_value(hex_int):
raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
return pos, chr(hex_int)
def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1 # Skip starting apostrophe
start_pos = pos
pos = skip_until(
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
)
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
pos += 3
if src.startswith("\n", pos):
pos += 1
if literal:
delim = "'"
end_pos = skip_until(
src,
pos,
"'''",
error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
error_on_eof=True,
)
result = src[pos:end_pos]
pos = end_pos + 3
else:
delim = '"'
pos, result = parse_basic_str(src, pos, multiline=True)
# Add at maximum two extra apostrophes/quotes if the end sequence
# is 4 or 5 chars long instead of just 3.
if not src.startswith(delim, pos):
return pos, result
pos += 1
if not src.startswith(delim, pos):
return pos, result + delim
pos += 1
return pos, result + (delim * 2)
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
if multiline:
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape_multiline
else:
error_on = ILLEGAL_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape
result = ""
start_pos = pos
while True:
try:
char = src[pos]
except IndexError:
raise suffixed_err(src, pos, "Unterminated string") from None
if char == '"':
if not multiline:
return pos + 1, result + src[start_pos:pos]
if src.startswith('"""', pos):
return pos + 3, result + src[start_pos:pos]
pos += 1
continue
if char == "\\":
result += src[start_pos:pos]
pos, parsed_escape = parse_escapes(src, pos)
result += parsed_escape
start_pos = pos
continue
if char in error_on:
raise suffixed_err(src, pos, f"Illegal character {char!r}")
pos += 1
def parse_value( # noqa: C901
src: str, pos: Pos, parse_float: ParseFloat
) -> tuple[Pos, Any]:
try:
char: str | None = src[pos]
except IndexError:
char = None
# IMPORTANT: order conditions based on speed of checking and likelihood
# Basic strings
if char == '"':
if src.startswith('"""', pos):
return parse_multiline_str(src, pos, literal=False)
return parse_one_line_basic_str(src, pos)
# Literal strings
if char == "'":
if src.startswith("'''", pos):
return parse_multiline_str(src, pos, literal=True)
return parse_literal_str(src, pos)
# Booleans
if char == "t":
if src.startswith("true", pos):
return pos + 4, True
if char == "f":
if src.startswith("false", pos):
return pos + 5, False
# Arrays
if char == "[":
return parse_array(src, pos, parse_float)
# Inline tables
if char == "{":
return parse_inline_table(src, pos, parse_float)
# Dates and times
datetime_match = RE_DATETIME.match(src, pos)
if datetime_match:
try:
datetime_obj = match_to_datetime(datetime_match)
except ValueError as e:
raise suffixed_err(src, pos, "Invalid date or datetime") from e
return datetime_match.end(), datetime_obj
localtime_match = RE_LOCALTIME.match(src, pos)
if localtime_match:
return localtime_match.end(), match_to_localtime(localtime_match)
# Integers and "normal" floats.
# The regex will greedily match any type starting with a decimal
# char, so needs to be located after handling of dates and times.
number_match = RE_NUMBER.match(src, pos)
if number_match:
return number_match.end(), match_to_number(number_match, parse_float)
# Special floats
first_three = src[pos : pos + 3]
if first_three in {"inf", "nan"}:
return pos + 3, parse_float(first_three)
first_four = src[pos : pos + 4]
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
return pos + 4, parse_float(first_four)
raise suffixed_err(src, pos, "Invalid value")
def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
"""Return a `TOMLDecodeError` where error message is suffixed with
coordinates in source."""
def coord_repr(src: str, pos: Pos) -> str:
if pos >= len(src):
return "end of document"
line = src.count("\n", 0, pos) + 1
if line == 1:
column = pos + 1
else:
column = pos - src.rindex("\n", 0, pos)
return f"line {line}, column {column}"
return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
def is_unicode_scalar_value(codepoint: int) -> bool:
return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
"""A decorator to make `parse_float` safe.
`parse_float` must not return dicts or lists, because these types
would be mixed with parsed TOML tables and arrays, thus confusing
the parser. The returned decorated callable raises `ValueError`
instead of returning illegal types.
"""
# The default `float` callable never returns illegal types. Optimize it.
if parse_float is float: # type: ignore[comparison-overlap]
return float
def safe_parse_float(float_str: str) -> Any:
float_value = parse_float(float_str)
if isinstance(float_value, (dict, list)):
raise ValueError("parse_float must not return dicts or lists")
return float_value
return safe_parse_float
| 22,633 | Python | 31.708092 | 88 | 0.576901 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/extern/__init__.py | import importlib.util
import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
"""Figure out if the target module is vendored."""
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
"""Return a module spec for vendored names."""
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname) else None
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging',
'ordered_set',
'more_itertools',
'importlib_metadata',
'zipp',
'importlib_resources',
'jaraco',
'typing_extensions',
'tomli',
)
VendorImporter(__name__, names, 'setuptools._vendor').install()
| 2,527 | Python | 29.095238 | 78 | 0.576573 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_version.py | # file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '1.1.2'
__version_tuple__ = version_tuple = (1, 1, 2)
| 160 | Python | 31.199994 | 46 | 0.66875 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/__init__.py | __all__ = [
"BaseExceptionGroup",
"ExceptionGroup",
"catch",
"format_exception",
"format_exception_only",
"print_exception",
"print_exc",
]
import os
import sys
from ._catch import catch
from ._version import version as __version__ # noqa: F401
if sys.version_info < (3, 11):
from ._exceptions import BaseExceptionGroup, ExceptionGroup
from ._formatting import (
format_exception,
format_exception_only,
print_exc,
print_exception,
)
if os.getenv("EXCEPTIONGROUP_NO_PATCH") != "1":
from . import _formatting # noqa: F401
BaseExceptionGroup.__module__ = __name__
ExceptionGroup.__module__ = __name__
else:
from traceback import (
format_exception,
format_exception_only,
print_exc,
print_exception,
)
BaseExceptionGroup = BaseExceptionGroup
ExceptionGroup = ExceptionGroup
| 920 | Python | 21.463414 | 63 | 0.626087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_exceptions.py | from __future__ import annotations
from collections.abc import Callable, Sequence
from functools import partial
from inspect import getmro, isclass
from typing import TYPE_CHECKING, Generic, Type, TypeVar, cast, overload
if TYPE_CHECKING:
from typing import Self
_BaseExceptionT_co = TypeVar("_BaseExceptionT_co", bound=BaseException, covariant=True)
_BaseExceptionT = TypeVar("_BaseExceptionT", bound=BaseException)
_ExceptionT_co = TypeVar("_ExceptionT_co", bound=Exception, covariant=True)
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
def check_direct_subclass(
exc: BaseException, parents: tuple[type[BaseException]]
) -> bool:
for cls in getmro(exc.__class__)[:-1]:
if cls in parents:
return True
return False
def get_condition_filter(
condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool]
) -> Callable[[_BaseExceptionT_co], bool]:
if isclass(condition) and issubclass(
cast(Type[BaseException], condition), BaseException
):
return partial(check_direct_subclass, parents=(condition,))
elif isinstance(condition, tuple):
if all(isclass(x) and issubclass(x, BaseException) for x in condition):
return partial(check_direct_subclass, parents=condition)
elif callable(condition):
return cast("Callable[[BaseException], bool]", condition)
raise TypeError("expected a function, exception type or tuple of exception types")
class BaseExceptionGroup(BaseException, Generic[_BaseExceptionT_co]):
"""A combination of multiple unrelated exceptions."""
def __new__(
cls, __message: str, __exceptions: Sequence[_BaseExceptionT_co]
) -> Self:
if not isinstance(__message, str):
raise TypeError(f"argument 1 must be str, not {type(__message)}")
if not isinstance(__exceptions, Sequence):
raise TypeError("second argument (exceptions) must be a sequence")
if not __exceptions:
raise ValueError(
"second argument (exceptions) must be a non-empty sequence"
)
for i, exc in enumerate(__exceptions):
if not isinstance(exc, BaseException):
raise ValueError(
f"Item {i} of second argument (exceptions) is not an exception"
)
if cls is BaseExceptionGroup:
if all(isinstance(exc, Exception) for exc in __exceptions):
cls = ExceptionGroup
if issubclass(cls, Exception):
for exc in __exceptions:
if not isinstance(exc, Exception):
if cls is ExceptionGroup:
raise TypeError(
"Cannot nest BaseExceptions in an ExceptionGroup"
)
else:
raise TypeError(
f"Cannot nest BaseExceptions in {cls.__name__!r}"
)
instance = super().__new__(cls, __message, __exceptions)
instance._message = __message
instance._exceptions = __exceptions
return instance
def add_note(self, note: str) -> None:
if not isinstance(note, str):
raise TypeError(
f"Expected a string, got note={note!r} (type {type(note).__name__})"
)
if not hasattr(self, "__notes__"):
self.__notes__: list[str] = []
self.__notes__.append(note)
@property
def message(self) -> str:
return self._message
@property
def exceptions(
self,
) -> tuple[_BaseExceptionT_co | BaseExceptionGroup[_BaseExceptionT_co], ...]:
return tuple(self._exceptions)
@overload
def subgroup(
self, __condition: type[_BaseExceptionT] | tuple[type[_BaseExceptionT], ...]
) -> BaseExceptionGroup[_BaseExceptionT] | None:
...
@overload
def subgroup(
self: Self, __condition: Callable[[_BaseExceptionT_co], bool]
) -> Self | None:
...
def subgroup(
self: Self,
__condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool],
) -> BaseExceptionGroup[_BaseExceptionT] | Self | None:
condition = get_condition_filter(__condition)
modified = False
if condition(self):
return self
exceptions: list[BaseException] = []
for exc in self.exceptions:
if isinstance(exc, BaseExceptionGroup):
subgroup = exc.subgroup(__condition)
if subgroup is not None:
exceptions.append(subgroup)
if subgroup is not exc:
modified = True
elif condition(exc):
exceptions.append(exc)
else:
modified = True
if not modified:
return self
elif exceptions:
group = self.derive(exceptions)
group.__cause__ = self.__cause__
group.__context__ = self.__context__
group.__traceback__ = self.__traceback__
return group
else:
return None
@overload
def split(
self: Self,
__condition: type[_BaseExceptionT] | tuple[type[_BaseExceptionT], ...],
) -> tuple[BaseExceptionGroup[_BaseExceptionT] | None, Self | None]:
...
@overload
def split(
self: Self, __condition: Callable[[_BaseExceptionT_co], bool]
) -> tuple[Self | None, Self | None]:
...
def split(
self: Self,
__condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool],
) -> (
tuple[BaseExceptionGroup[_BaseExceptionT] | None, Self | None]
| tuple[Self | None, Self | None]
):
condition = get_condition_filter(__condition)
if condition(self):
return self, None
matching_exceptions: list[BaseException] = []
nonmatching_exceptions: list[BaseException] = []
for exc in self.exceptions:
if isinstance(exc, BaseExceptionGroup):
matching, nonmatching = exc.split(condition)
if matching is not None:
matching_exceptions.append(matching)
if nonmatching is not None:
nonmatching_exceptions.append(nonmatching)
elif condition(exc):
matching_exceptions.append(exc)
else:
nonmatching_exceptions.append(exc)
matching_group: Self | None = None
if matching_exceptions:
matching_group = self.derive(matching_exceptions)
matching_group.__cause__ = self.__cause__
matching_group.__context__ = self.__context__
matching_group.__traceback__ = self.__traceback__
nonmatching_group: Self | None = None
if nonmatching_exceptions:
nonmatching_group = self.derive(nonmatching_exceptions)
nonmatching_group.__cause__ = self.__cause__
nonmatching_group.__context__ = self.__context__
nonmatching_group.__traceback__ = self.__traceback__
return matching_group, nonmatching_group
def derive(self: Self, __excs: Sequence[_BaseExceptionT_co]) -> Self:
eg = BaseExceptionGroup(self.message, __excs)
if hasattr(self, "__notes__"):
# Create a new list so that add_note() only affects one exceptiongroup
eg.__notes__ = list(self.__notes__)
return eg
def __str__(self) -> str:
suffix = "" if len(self._exceptions) == 1 else "s"
return f"{self.message} ({len(self._exceptions)} sub-exception{suffix})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.message!r}, {self._exceptions!r})"
class ExceptionGroup(BaseExceptionGroup[_ExceptionT_co], Exception):
def __new__(cls, __message: str, __exceptions: Sequence[_ExceptionT_co]) -> Self:
return super().__new__(cls, __message, __exceptions)
if TYPE_CHECKING:
@property
def exceptions(
self,
) -> tuple[_ExceptionT_co | ExceptionGroup[_ExceptionT_co], ...]:
...
@overload # type: ignore[override]
def subgroup(
self, __condition: type[_ExceptionT] | tuple[type[_ExceptionT], ...]
) -> ExceptionGroup[_ExceptionT] | None:
...
@overload
def subgroup(
self: Self, __condition: Callable[[_ExceptionT_co], bool]
) -> Self | None:
...
def subgroup(
self: Self,
__condition: type[_ExceptionT]
| tuple[type[_ExceptionT], ...]
| Callable[[_ExceptionT_co], bool],
) -> ExceptionGroup[_ExceptionT] | Self | None:
return super().subgroup(__condition)
@overload # type: ignore[override]
def split(
self: Self, __condition: type[_ExceptionT] | tuple[type[_ExceptionT], ...]
) -> tuple[ExceptionGroup[_ExceptionT] | None, Self | None]:
...
@overload
def split(
self: Self, __condition: Callable[[_ExceptionT_co], bool]
) -> tuple[Self | None, Self | None]:
...
def split(
self: Self,
__condition: type[_ExceptionT]
| tuple[type[_ExceptionT], ...]
| Callable[[_ExceptionT_co], bool],
) -> (
tuple[ExceptionGroup[_ExceptionT] | None, Self | None]
| tuple[Self | None, Self | None]
):
return super().split(__condition)
| 9,768 | Python | 33.519435 | 87 | 0.566544 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_catch.py | from __future__ import annotations
import sys
from collections.abc import Callable, Iterable, Mapping
from contextlib import AbstractContextManager
from types import TracebackType
from typing import TYPE_CHECKING, Any
if sys.version_info < (3, 11):
from ._exceptions import BaseExceptionGroup
if TYPE_CHECKING:
_Handler = Callable[[BaseException], Any]
class _Catcher:
def __init__(self, handler_map: Mapping[tuple[type[BaseException], ...], _Handler]):
self._handler_map = handler_map
def __enter__(self) -> None:
pass
def __exit__(
self,
etype: type[BaseException] | None,
exc: BaseException | None,
tb: TracebackType | None,
) -> bool:
if exc is not None:
unhandled = self.handle_exception(exc)
if unhandled is exc:
return False
elif unhandled is None:
return True
else:
raise unhandled from None
return False
def handle_exception(self, exc: BaseException) -> BaseException | None:
excgroup: BaseExceptionGroup | None
if isinstance(exc, BaseExceptionGroup):
excgroup = exc
else:
excgroup = BaseExceptionGroup("", [exc])
new_exceptions: list[BaseException] = []
for exc_types, handler in self._handler_map.items():
matched, excgroup = excgroup.split(exc_types)
if matched:
try:
handler(matched)
except BaseException as new_exc:
new_exceptions.append(new_exc)
if not excgroup:
break
if new_exceptions:
if len(new_exceptions) == 1:
return new_exceptions[0]
if excgroup:
new_exceptions.append(excgroup)
return BaseExceptionGroup("", new_exceptions)
elif (
excgroup and len(excgroup.exceptions) == 1 and excgroup.exceptions[0] is exc
):
return exc
else:
return excgroup
def catch(
__handlers: Mapping[type[BaseException] | Iterable[type[BaseException]], _Handler]
) -> AbstractContextManager[None]:
if not isinstance(__handlers, Mapping):
raise TypeError("the argument must be a mapping")
handler_map: dict[
tuple[type[BaseException], ...], Callable[[BaseExceptionGroup]]
] = {}
for type_or_iterable, handler in __handlers.items():
iterable: tuple[type[BaseException]]
if isinstance(type_or_iterable, type) and issubclass(
type_or_iterable, BaseException
):
iterable = (type_or_iterable,)
elif isinstance(type_or_iterable, Iterable):
iterable = tuple(type_or_iterable)
else:
raise TypeError(
"each key must be either an exception classes or an iterable thereof"
)
if not callable(handler):
raise TypeError("handlers must be callable")
for exc_type in iterable:
if not isinstance(exc_type, type) or not issubclass(
exc_type, BaseException
):
raise TypeError(
"each key must be either an exception classes or an iterable "
"thereof"
)
if issubclass(exc_type, BaseExceptionGroup):
raise TypeError(
"catching ExceptionGroup with catch() is not allowed. "
"Use except instead."
)
handler_map[iterable] = handler
return _Catcher(handler_map)
| 3,656 | Python | 29.991525 | 88 | 0.571937 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_formatting.py | # traceback_exception_init() adapted from trio
#
# _ExceptionPrintContext and traceback_exception_format() copied from the standard
# library
from __future__ import annotations
import collections.abc
import sys
import textwrap
import traceback
from functools import singledispatch
from types import TracebackType
from typing import Any, List, Optional
from ._exceptions import BaseExceptionGroup
max_group_width = 15
max_group_depth = 10
_cause_message = (
"\nThe above exception was the direct cause of the following exception:\n\n"
)
_context_message = (
"\nDuring handling of the above exception, another exception occurred:\n\n"
)
def _format_final_exc_line(etype, value):
valuestr = _safe_string(value, "exception")
if value is None or not valuestr:
line = f"{etype}\n"
else:
line = f"{etype}: {valuestr}\n"
return line
def _safe_string(value, what, func=str):
try:
return func(value)
except BaseException:
return f"<{what} {func.__name__}() failed>"
class _ExceptionPrintContext:
def __init__(self):
self.seen = set()
self.exception_group_depth = 0
self.need_close = False
def indent(self):
return " " * (2 * self.exception_group_depth)
def emit(self, text_gen, margin_char=None):
if margin_char is None:
margin_char = "|"
indent_str = self.indent()
if self.exception_group_depth:
indent_str += margin_char + " "
if isinstance(text_gen, str):
yield textwrap.indent(text_gen, indent_str, lambda line: True)
else:
for text in text_gen:
yield textwrap.indent(text, indent_str, lambda line: True)
def exceptiongroup_excepthook(
etype: type[BaseException], value: BaseException, tb: TracebackType | None
) -> None:
sys.stderr.write("".join(traceback.format_exception(etype, value, tb)))
class PatchedTracebackException(traceback.TracebackException):
def __init__(
self,
exc_type: type[BaseException],
exc_value: BaseException,
exc_traceback: TracebackType | None,
*,
limit: int | None = None,
lookup_lines: bool = True,
capture_locals: bool = False,
compact: bool = False,
_seen: set[int] | None = None,
) -> None:
kwargs: dict[str, Any] = {}
if sys.version_info >= (3, 10):
kwargs["compact"] = compact
is_recursive_call = _seen is not None
if _seen is None:
_seen = set()
_seen.add(id(exc_value))
self.stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_traceback),
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
)
self.exc_type = exc_type
# Capture now to permit freeing resources: only complication is in the
# unofficial API _format_final_exc_line
self._str = _safe_string(exc_value, "exception")
try:
self.__notes__ = getattr(exc_value, "__notes__", None)
except KeyError:
# Workaround for https://github.com/python/cpython/issues/98778 on Python
# <= 3.9, and some 3.10 and 3.11 patch versions.
HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ())
if sys.version_info[:2] <= (3, 11) and isinstance(exc_value, HTTPError):
self.__notes__ = None
else:
raise
if exc_type and issubclass(exc_type, SyntaxError):
# Handle SyntaxError's specially
self.filename = exc_value.filename
lno = exc_value.lineno
self.lineno = str(lno) if lno is not None else None
self.text = exc_value.text
self.offset = exc_value.offset
self.msg = exc_value.msg
if sys.version_info >= (3, 10):
end_lno = exc_value.end_lineno
self.end_lineno = str(end_lno) if end_lno is not None else None
self.end_offset = exc_value.end_offset
elif (
exc_type
and issubclass(exc_type, (NameError, AttributeError))
and getattr(exc_value, "name", None) is not None
):
suggestion = _compute_suggestion_error(exc_value, exc_traceback)
if suggestion:
self._str += f". Did you mean: '{suggestion}'?"
if lookup_lines:
# Force all lines in the stack to be loaded
for frame in self.stack:
frame.line
self.__suppress_context__ = (
exc_value.__suppress_context__ if exc_value is not None else False
)
# Convert __cause__ and __context__ to `TracebackExceptions`s, use a
# queue to avoid recursion (only the top-level call gets _seen == None)
if not is_recursive_call:
queue = [(self, exc_value)]
while queue:
te, e = queue.pop()
if e and e.__cause__ is not None and id(e.__cause__) not in _seen:
cause = PatchedTracebackException(
type(e.__cause__),
e.__cause__,
e.__cause__.__traceback__,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
else:
cause = None
if compact:
need_context = (
cause is None and e is not None and not e.__suppress_context__
)
else:
need_context = True
if (
e
and e.__context__ is not None
and need_context
and id(e.__context__) not in _seen
):
context = PatchedTracebackException(
type(e.__context__),
e.__context__,
e.__context__.__traceback__,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
else:
context = None
# Capture each of the exceptions in the ExceptionGroup along with each
# of their causes and contexts
if e and isinstance(e, BaseExceptionGroup):
exceptions = []
for exc in e.exceptions:
texc = PatchedTracebackException(
type(exc),
exc,
exc.__traceback__,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
exceptions.append(texc)
else:
exceptions = None
te.__cause__ = cause
te.__context__ = context
te.exceptions = exceptions
if cause:
queue.append((te.__cause__, e.__cause__))
if context:
queue.append((te.__context__, e.__context__))
if exceptions:
queue.extend(zip(te.exceptions, e.exceptions))
def format(self, *, chain=True, _ctx=None):
if _ctx is None:
_ctx = _ExceptionPrintContext()
output = []
exc = self
if chain:
while exc:
if exc.__cause__ is not None:
chained_msg = _cause_message
chained_exc = exc.__cause__
elif exc.__context__ is not None and not exc.__suppress_context__:
chained_msg = _context_message
chained_exc = exc.__context__
else:
chained_msg = None
chained_exc = None
output.append((chained_msg, exc))
exc = chained_exc
else:
output.append((None, exc))
for msg, exc in reversed(output):
if msg is not None:
yield from _ctx.emit(msg)
if exc.exceptions is None:
if exc.stack:
yield from _ctx.emit("Traceback (most recent call last):\n")
yield from _ctx.emit(exc.stack.format())
yield from _ctx.emit(exc.format_exception_only())
elif _ctx.exception_group_depth > max_group_depth:
# exception group, but depth exceeds limit
yield from _ctx.emit(f"... (max_group_depth is {max_group_depth})\n")
else:
# format exception group
is_toplevel = _ctx.exception_group_depth == 0
if is_toplevel:
_ctx.exception_group_depth += 1
if exc.stack:
yield from _ctx.emit(
"Exception Group Traceback (most recent call last):\n",
margin_char="+" if is_toplevel else None,
)
yield from _ctx.emit(exc.stack.format())
yield from _ctx.emit(exc.format_exception_only())
num_excs = len(exc.exceptions)
if num_excs <= max_group_width:
n = num_excs
else:
n = max_group_width + 1
_ctx.need_close = False
for i in range(n):
last_exc = i == n - 1
if last_exc:
# The closing frame may be added by a recursive call
_ctx.need_close = True
if max_group_width is not None:
truncated = i >= max_group_width
else:
truncated = False
title = f"{i + 1}" if not truncated else "..."
yield (
_ctx.indent()
+ ("+-" if i == 0 else " ")
+ f"+---------------- {title} ----------------\n"
)
_ctx.exception_group_depth += 1
if not truncated:
yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx)
else:
remaining = num_excs - max_group_width
plural = "s" if remaining > 1 else ""
yield from _ctx.emit(
f"and {remaining} more exception{plural}\n"
)
if last_exc and _ctx.need_close:
yield _ctx.indent() + "+------------------------------------\n"
_ctx.need_close = False
_ctx.exception_group_depth -= 1
if is_toplevel:
assert _ctx.exception_group_depth == 1
_ctx.exception_group_depth = 0
def format_exception_only(self):
"""Format the exception part of the traceback.
The return value is a generator of strings, each ending in a newline.
Normally, the generator emits a single string; however, for
SyntaxError exceptions, it emits several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the output.
"""
if self.exc_type is None:
yield traceback._format_final_exc_line(None, self._str)
return
stype = self.exc_type.__qualname__
smod = self.exc_type.__module__
if smod not in ("__main__", "builtins"):
if not isinstance(smod, str):
smod = "<unknown>"
stype = smod + "." + stype
if not issubclass(self.exc_type, SyntaxError):
yield _format_final_exc_line(stype, self._str)
elif traceback_exception_format_syntax_error is not None:
yield from traceback_exception_format_syntax_error(self, stype)
else:
yield from traceback_exception_original_format_exception_only(self)
if isinstance(self.__notes__, collections.abc.Sequence):
for note in self.__notes__:
note = _safe_string(note, "note")
yield from [line + "\n" for line in note.split("\n")]
elif self.__notes__ is not None:
yield _safe_string(self.__notes__, "__notes__", func=repr)
traceback_exception_original_format = traceback.TracebackException.format
traceback_exception_original_format_exception_only = (
traceback.TracebackException.format_exception_only
)
traceback_exception_format_syntax_error = getattr(
traceback.TracebackException, "_format_syntax_error", None
)
if sys.excepthook is sys.__excepthook__:
traceback.TracebackException.__init__ = ( # type: ignore[assignment]
PatchedTracebackException.__init__
)
traceback.TracebackException.format = ( # type: ignore[assignment]
PatchedTracebackException.format
)
traceback.TracebackException.format_exception_only = ( # type: ignore[assignment]
PatchedTracebackException.format_exception_only
)
sys.excepthook = exceptiongroup_excepthook
@singledispatch
def format_exception_only(__exc: BaseException) -> List[str]:
return list(
PatchedTracebackException(
type(__exc), __exc, None, compact=True
).format_exception_only()
)
@format_exception_only.register
def _(__exc: type, value: BaseException) -> List[str]:
return format_exception_only(value)
@singledispatch
def format_exception(
__exc: BaseException,
limit: Optional[int] = None,
chain: bool = True,
) -> List[str]:
return list(
PatchedTracebackException(
type(__exc), __exc, __exc.__traceback__, limit=limit, compact=True
).format(chain=chain)
)
@format_exception.register
def _(
__exc: type,
value: BaseException,
tb: TracebackType,
limit: Optional[int] = None,
chain: bool = True,
) -> List[str]:
return format_exception(value, limit, chain)
@singledispatch
def print_exception(
__exc: BaseException,
limit: Optional[int] = None,
file: Any = None,
chain: bool = True,
) -> None:
if file is None:
file = sys.stderr
for line in PatchedTracebackException(
type(__exc), __exc, __exc.__traceback__, limit=limit
).format(chain=chain):
print(line, file=file, end="")
@print_exception.register
def _(
__exc: type,
value: BaseException,
tb: TracebackType,
limit: Optional[int] = None,
file: Any = None,
chain: bool = True,
) -> None:
print_exception(value, limit, file, chain)
def print_exc(
limit: Optional[int] = None,
file: Any | None = None,
chain: bool = True,
) -> None:
value = sys.exc_info()[1]
print_exception(value, limit, file, chain)
# Python levenshtein edit distance code for NameError/AttributeError
# suggestions, backported from 3.12
_MAX_CANDIDATE_ITEMS = 750
_MAX_STRING_SIZE = 40
_MOVE_COST = 2
_CASE_COST = 1
_SENTINEL = object()
def _substitution_cost(ch_a, ch_b):
if ch_a == ch_b:
return 0
if ch_a.lower() == ch_b.lower():
return _CASE_COST
return _MOVE_COST
def _compute_suggestion_error(exc_value, tb):
wrong_name = getattr(exc_value, "name", None)
if wrong_name is None or not isinstance(wrong_name, str):
return None
if isinstance(exc_value, AttributeError):
obj = getattr(exc_value, "obj", _SENTINEL)
if obj is _SENTINEL:
return None
obj = exc_value.obj
try:
d = dir(obj)
except Exception:
return None
else:
assert isinstance(exc_value, NameError)
# find most recent frame
if tb is None:
return None
while tb.tb_next is not None:
tb = tb.tb_next
frame = tb.tb_frame
d = list(frame.f_locals) + list(frame.f_globals) + list(frame.f_builtins)
if len(d) > _MAX_CANDIDATE_ITEMS:
return None
wrong_name_len = len(wrong_name)
if wrong_name_len > _MAX_STRING_SIZE:
return None
best_distance = wrong_name_len
suggestion = None
for possible_name in d:
if possible_name == wrong_name:
# A missing attribute is "found". Don't suggest it (see GH-88821).
continue
# No more than 1/3 of the involved characters should need changed.
max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6
# Don't take matches we've already beaten.
max_distance = min(max_distance, best_distance - 1)
current_distance = _levenshtein_distance(
wrong_name, possible_name, max_distance
)
if current_distance > max_distance:
continue
if not suggestion or current_distance < best_distance:
suggestion = possible_name
best_distance = current_distance
return suggestion
def _levenshtein_distance(a, b, max_cost):
# A Python implementation of Python/suggestions.c:levenshtein_distance.
# Both strings are the same
if a == b:
return 0
# Trim away common affixes
pre = 0
while a[pre:] and b[pre:] and a[pre] == b[pre]:
pre += 1
a = a[pre:]
b = b[pre:]
post = 0
while a[: post or None] and b[: post or None] and a[post - 1] == b[post - 1]:
post -= 1
a = a[: post or None]
b = b[: post or None]
if not a or not b:
return _MOVE_COST * (len(a) + len(b))
if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE:
return max_cost + 1
# Prefer shorter buffer
if len(b) < len(a):
a, b = b, a
# Quick fail when a match is impossible
if (len(b) - len(a)) * _MOVE_COST > max_cost:
return max_cost + 1
# Instead of producing the whole traditional len(a)-by-len(b)
# matrix, we can update just one row in place.
# Initialize the buffer row
row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST))
result = 0
for bindex in range(len(b)):
bchar = b[bindex]
distance = result = bindex * _MOVE_COST
minimum = sys.maxsize
for index in range(len(a)):
# 1) Previous distance in this row is cost(b[:b_index], a[:index])
substitute = distance + _substitution_cost(bchar, a[index])
# 2) cost(b[:b_index], a[:index+1]) from previous row
distance = row[index]
# 3) existing result is cost(b[:b_index+1], a[index])
insert_delete = min(result, distance) + _MOVE_COST
result = min(insert_delete, substitute)
# cost(b[:b_index+1], a[:index+1])
row[index] = result
if result < minimum:
minimum = result
if minimum > max_cost:
# Everything in this row is too big, so bail early.
return max_cost + 1
return result
| 19,475 | Python | 33.531915 | 87 | 0.532478 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/__version__.py | __version__ = "0.7.1"
| 22 | Python | 10.499995 | 21 | 0.454545 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/helpers.py | # Copyright 2018 John Reese
# Licensed under the MIT license
import inspect
from typing import Awaitable, Union
from typing_extensions import Protocol
from .types import T
class Orderable(Protocol): # pragma: no cover
def __lt__(self, other):
...
def __gt__(self, other):
...
async def maybe_await(object: Union[Awaitable[T], T]) -> T:
if inspect.isawaitable(object):
return await object # type: ignore
return object # type: ignore
| 483 | Python | 19.166666 | 59 | 0.662526 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/builtins.py | # Copyright 2018 John Reese
# Licensed under the MIT license
"""
Async-compatible versions of builtin functions for iterables.
These functions intentionally shadow their builtins counterparts,
enabling use with both standard iterables and async iterables, without
needing to use if/else clauses or awkward logic. Standard iterables
get wrapped in async generators, and all functions are designed for
use with `await`, `async for`, etc.
"""
import asyncio
import builtins
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Callable,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
overload,
)
from .helpers import Orderable, maybe_await
from .types import T1, T2, T3, T4, T5, AnyIterable, AnyIterator, AnyStop, R, T
def iter(itr: AnyIterable[T]) -> AsyncIterator[T]:
"""
Get an async iterator from any mixed iterable.
Async iterators will be returned directly.
Async iterables will return an async iterator.
Standard iterables will be wrapped in an async generator yielding
each item in the iterable in the same order.
Examples:
async for iter(range(10)):
...
"""
if isinstance(itr, AsyncIterator):
return itr
if isinstance(itr, AsyncIterable):
return itr.__aiter__()
async def gen() -> AsyncIterator[T]:
for item in cast(Iterable[T], itr):
yield item
return gen()
async def next(itr: AnyIterator[T]) -> T:
"""
Return the next item of any mixed iterator.
Calls builtins.next() on standard iterators, and awaits itr.__anext__()
on async iterators.
Example:
value = await next(it)
"""
if isinstance(itr, AsyncIterator):
return await itr.__anext__()
try:
return builtins.next(itr)
except StopIteration:
raise StopAsyncIteration
async def list(itr: AnyIterable[T]) -> List[T]:
"""
Consume a mixed iterable and return a list of items in order.
Example:
await list(range(5))
-> [0, 1, 2, 3, 4]
"""
return [item async for item in iter(itr)]
async def set(itr: AnyIterable[T]) -> Set[T]:
"""
Consume a mixed iterable and return a set of items.
Example:
await set([0, 1, 2, 3, 0, 1, 2, 3])
-> {0, 1, 2, 3}
"""
return {item async for item in iter(itr)}
async def enumerate(
itr: AnyIterable[T], start: int = 0
) -> AsyncIterator[Tuple[int, T]]:
"""
Consume a mixed iterable and yield the current index and item.
Example:
async for index, value in enumerate(...):
...
"""
index = start
async for item in iter(itr):
yield index, item
index += 1
async def map(fn: Callable[[T], R], itr: AnyIterable[T]) -> AsyncIterator[R]:
"""
Modify item of a mixed iterable using the given function or coroutine.
Example:
async for response in map(func, data):
...
"""
# todo: queue items eagerly
async for item in iter(itr):
yield await maybe_await(fn(item))
@overload
async def max(
itr: AnyIterable[Orderable], *, key: Optional[Callable] = None
) -> Orderable: # pragma: no cover
pass
@overload
async def max(
itr: AnyIterable[Orderable], *, default: T, key: Optional[Callable] = None
) -> Union[Orderable, T]: # pragma: no cover
pass
async def max(itr: AnyIterable[Orderable], **kwargs: Any) -> Any:
"""
Return the largest item in an iterable or the largest of two or more arguments.
Example:
await min(range(5))
-> 4
"""
for k in kwargs:
if k not in ("key", "default"):
raise ValueError(f"kwarg {k} not supported")
value: Orderable
vkey: Any
keyfunc = kwargs.get("key", None)
it = iter(itr)
try:
value = await next(it)
if keyfunc:
vkey = keyfunc(value)
except StopAsyncIteration:
if "default" in kwargs:
return kwargs["default"]
raise ValueError("iterable is empty and no default value given")
if keyfunc:
async for item in it:
ikey = keyfunc(item)
if ikey > vkey:
value = item
vkey = ikey
else:
async for item in it:
if item > value:
value = item
return value
@overload
async def min(
itr: AnyIterable[Orderable], *, key: Optional[Callable] = None
) -> Orderable: # pragma: no cover
pass
@overload
async def min(
itr: AnyIterable[Orderable], *, default: T, key: Optional[Callable] = None
) -> Union[Orderable, T]: # pragma: no cover
pass
async def min(itr: AnyIterable[Orderable], **kwargs: Any) -> Any:
"""
Return the smallest item in an iterable or the smallest of two or more arguments.
Example:
await min(range(5))
-> 0
"""
for k in kwargs:
if k not in ("key", "default"):
raise ValueError(f"kwarg {k} not supported")
value: Orderable
vkey: Any
keyfunc = kwargs.get("key", None)
it = iter(itr)
try:
value = await next(it)
if keyfunc:
vkey = keyfunc(value)
except StopAsyncIteration:
if "default" in kwargs:
return kwargs["default"]
raise ValueError("iterable is empty and no default value given")
if keyfunc:
async for item in it:
ikey = keyfunc(item)
if ikey < vkey:
value = item
vkey = ikey
else:
async for item in it:
if item < value:
value = item
return value
async def sum(itr: AnyIterable[T], start: T = None) -> T:
"""
Compute the sum of a mixed iterable, adding each value with the start value.
Example:
await sum(generator())
-> 1024
"""
value: T
if start is None:
value = cast(T, 0) # emulate stdlib but still type nicely for non-ints
else:
value = start
async for item in iter(itr):
value += item # type: ignore # mypy doesn't know T + T
return value
# pylint: disable=undefined-variable,multiple-statements,too-many-arguments
@overload
def zip(__iter1: AnyIterable[T1]) -> AsyncIterator[Tuple[T1]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1], __iter2: AnyIterable[T2]
) -> AsyncIterator[Tuple[T1, T2]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1], __iter2: AnyIterable[T2], __iter3: AnyIterable[T3]
) -> AsyncIterator[Tuple[T1, T2, T3]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1],
__iter2: AnyIterable[T2],
__iter3: AnyIterable[T3],
__iter4: AnyIterable[T4],
) -> AsyncIterator[Tuple[T1, T2, T3, T4]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1],
__iter2: AnyIterable[T2],
__iter3: AnyIterable[T3],
__iter4: AnyIterable[T4],
__iter5: AnyIterable[T5],
) -> AsyncIterator[Tuple[T1, T2, T3, T4, T5]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[Any],
__iter2: AnyIterable[Any],
__iter3: AnyIterable[Any],
__iter4: AnyIterable[Any],
__iter5: AnyIterable[Any],
__iter6: AnyIterable[Any],
*__iterables: AnyIterable[Any],
) -> AsyncIterator[Tuple[Any, ...]]: # pragma: no cover
pass
# pylint: enable=undefined-variable,multiple-statements,too-many-arguments
async def zip(*itrs: AnyIterable[Any]) -> AsyncIterator[Tuple[Any, ...]]:
"""
Yield a tuple of items from mixed iterables until the shortest is consumed.
Example:
async for a, b, c in zip(i, j, k):
...
"""
its: List[AsyncIterator[Any]] = [iter(itr) for itr in itrs]
while True:
try:
values = await asyncio.gather(*[it.__anext__() for it in its])
yield values
except AnyStop:
break
| 7,968 | Python | 21.384831 | 85 | 0.599523 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/more_itertools.py | # Copyright 2020 John Reese
# Licensed under the MIT license
from typing import AsyncIterable, List, TypeVar
from .builtins import iter
from .itertools import islice
from .types import AnyIterable
T = TypeVar("T")
async def take(n: int, iterable: AnyIterable[T]) -> List[T]:
"""
Return the first n items of iterable as a list.
If there are too few items in iterable, all of them are returned.
n needs to be at least 0. If it is 0, an empty list is returned.
Example:
first_two = await take(2, [1, 2, 3, 4, 5])
"""
if n < 0:
raise ValueError("take's first parameter can't be negative")
return [item async for item in islice(iterable, n)]
async def chunked(iterable: AnyIterable[T], n: int) -> AsyncIterable[List[T]]:
"""
Break iterable into chunks of length n.
The last chunk will be shorter if the total number of items is not
divisible by n.
Example:
async for chunk in chunked([1, 2, 3, 4, 5], n=2):
... # first iteration: chunk == [1, 2]; last one: chunk == [5]
"""
it = iter(iterable)
chunk = await take(n, it)
while chunk != []:
yield chunk
chunk = await take(n, it)
| 1,207 | Python | 24.702127 | 78 | 0.628003 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/__init__.py | # Copyright 2018 John Reese
# Licensed under the MIT license
"""
itertools and builtins for AsyncIO and mixed iterables
"""
__author__ = "John Reese"
from . import asyncio
from .__version__ import __version__
from .builtins import enumerate, iter, list, map, max, min, next, set, sum, zip
from .itertools import (
accumulate,
chain,
combinations,
combinations_with_replacement,
compress,
count,
cycle,
dropwhile,
filterfalse,
groupby,
islice,
permutations,
product,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
| 588 | Python | 17.406249 | 79 | 0.656463 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/asyncio.py | # Copyright 2019 John Reese
# Licensed under the MIT license
"""
Friendlier version of asyncio standard library.
Provisional library. Must be imported as `aioitertools.asyncio`.
"""
import asyncio
import time
from typing import Any, Awaitable, Dict, Iterable, List, Optional, Set, Tuple, cast
from .types import AsyncIterator, T
async def as_completed(
aws: Iterable[Awaitable[T]],
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
timeout: Optional[float] = None
) -> AsyncIterator[T]:
"""
Run awaitables in `aws` concurrently, and yield results as they complete.
Unlike `asyncio.as_completed`, this yields actual results, and does not require
awaiting each item in the iterable.
Example:
async for value in as_completed(futures):
... # use value immediately
"""
done: Set[Awaitable[T]] = set()
pending: Set[Awaitable[T]] = set(aws)
remaining: Optional[float] = None
if timeout and timeout > 0:
threshold = time.time() + timeout
else:
timeout = None
while pending:
if timeout:
remaining = threshold - time.time()
if remaining <= 0:
raise asyncio.TimeoutError()
# asyncio.Future inherits from typing.Awaitable
# asyncio.wait takes Iterable[Union[Future, Generator, Awaitable]], but
# returns Tuple[Set[Future], Set[Future]. Because mypy doesn't like assigning
# these values to existing Set[Awaitable] or even Set[Union[Awaitable, Future]],
# we need to first cast the results to something that we can actually use
# asyncio.Future: https://github.com/python/typeshed/blob/72ff7b94e534c610ddf8939bacbc55343e9465d2/stdlib/3/asyncio/futures.pyi#L30
# asyncio.wait(): https://github.com/python/typeshed/blob/72ff7b94e534c610ddf8939bacbc55343e9465d2/stdlib/3/asyncio/tasks.pyi#L89
done, pending = cast(
Tuple[Set[Awaitable[T]], Set[Awaitable[T]]],
await asyncio.wait(
pending,
loop=loop,
timeout=remaining,
return_when=asyncio.FIRST_COMPLETED,
),
)
for item in done:
yield await item
async def gather(
*args: Awaitable[T],
loop: Optional[asyncio.AbstractEventLoop] = None,
return_exceptions: bool = False,
limit: int = -1
) -> List[Any]:
"""Like asyncio.gather but with a limit on concurrency.
Much of the complexity of gather comes with it support for cancel, which we
omit here. Note that all results are buffered.
"""
# For detecting input duplicates and reconciling them at the end
input_map: Dict[Awaitable[T], List[int]] = {}
# This is keyed on what we'll get back from asyncio.wait
pos: Dict[asyncio.Future[T], int] = {}
ret: List[Any] = [None] * len(args)
pending: Set[asyncio.Future[T]] = set()
done: Set[asyncio.Future[T]] = set()
next_arg = 0
while True:
while next_arg < len(args) and (limit == -1 or len(pending) < limit):
# We have to defer the creation of the Task as long as possible
# because once we do, it starts executing, regardless of what we
# have in the pending set.
if args[next_arg] in input_map:
input_map[args[next_arg]].append(next_arg)
else:
# We call ensure_future directly to ensure that we have a Task
# because the return value of asyncio.wait will be an implicit
# task otherwise, and we won't be able to know which input it
# corresponds to.
task: asyncio.Future[T] = asyncio.ensure_future(args[next_arg])
pending.add(task)
pos[task] = next_arg
input_map[args[next_arg]] = [next_arg]
next_arg += 1
# pending might be empty if the last items of args were dupes;
# asyncio.wait([]) will raise an exception.
if pending:
done, pending = await asyncio.wait(
pending, loop=loop, return_when=asyncio.FIRST_COMPLETED
)
for x in done:
if return_exceptions and x.exception():
ret[pos[x]] = x.exception()
else:
ret[pos[x]] = x.result()
if not pending and next_arg == len(args):
break
for lst in input_map.values():
for i in range(1, len(lst)):
ret[lst[i]] = ret[lst[0]]
return ret
| 4,562 | Python | 33.568182 | 139 | 0.604998 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/types.py | # Copyright 2018 John Reese
# Licensed under the MIT license
from typing import (
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Iterable,
Iterator,
TypeVar,
Union,
)
R = TypeVar("R")
T = TypeVar("T")
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
N = TypeVar("N", int, float)
AnyFunction = Union[Callable[..., R], Callable[..., Awaitable[R]]]
AnyIterable = Union[Iterable[T], AsyncIterable[T]]
AnyIterableIterable = Union[Iterable[AnyIterable[T]], AsyncIterable[AnyIterable[T]]]
AnyIterator = Union[Iterator[T], AsyncIterator[T]]
AnyStop = (StopIteration, StopAsyncIteration)
Accumulator = Union[Callable[[T, T], T], Callable[[T, T], Awaitable[T]]]
KeyFunction = Union[Callable[[T], R], Callable[[T], Awaitable[R]]]
Predicate = Union[Callable[[T], object], Callable[[T], Awaitable[object]]]
| 879 | Python | 26.499999 | 84 | 0.680319 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/itertools.py | # Copyright 2018 John Reese
# Licensed under the MIT license
"""
Async-compatible version of itertools standard library functions.
These functions build on top of the async builtins components,
enabling use of both standard iterables and async iterables, without
needing to use if/else clauses or awkward logic. Standard iterables
get wrapped in async generators, and all functions are designed for
use with `await`, `async for`, etc.
See https://docs.python.org/3/library/itertools.html for reference.
"""
import asyncio
import builtins
import itertools
import operator
from typing import Any, AsyncIterator, List, Optional, Tuple, overload
from .builtins import enumerate, iter, list, next, zip
from .helpers import maybe_await
from .types import (
Accumulator,
AnyFunction,
AnyIterable,
AnyIterableIterable,
AnyStop,
KeyFunction,
N,
Predicate,
R,
T,
)
async def accumulate(
itr: AnyIterable[T], func: Accumulator[T] = operator.add
) -> AsyncIterator[T]:
"""
Yield the running accumulation of an iterable and operator.
Accepts both a standard function or a coroutine for accumulation.
Example:
data = [1, 2, 3, 4]
async def mul(a, b):
return a * b
async for total in accumulate(data, func=mul):
... # 1, 2, 6, 24
"""
itr = iter(itr)
try:
total: T = await next(itr)
except AnyStop:
return
yield total
async for item in itr:
total = await maybe_await(func(total, item))
yield total
class Chain:
def __call__(self, *itrs: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield values from one or more iterables in series.
Consumes the first iterable lazily, in entirety, then the second, and so on.
Example:
async for value in chain([1, 2, 3], [7, 8, 9]):
... # 1, 2, 3, 7, 8, 9
"""
return self.from_iterable(itrs)
async def from_iterable(self, itrs: AnyIterableIterable[T]) -> AsyncIterator[T]:
"""
Like chain, but takes an iterable of iterables.
Alias for chain(*itrs)
"""
async for itr in iter(itrs):
async for item in iter(itr):
yield item
chain = Chain()
async def combinations(itr: AnyIterable[T], r: int) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations(range(4), 3):
... # (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)
"""
pool: List[T] = await list(itr)
for value in itertools.combinations(pool, r):
yield value
async def combinations_with_replacement(
itr: AnyIterable[T], r: int
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable with replacement.
Simple wrapper around itertools.combinations_with_replacement.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations_with_replacement("ABC", 2):
... # ("A", "A"), ("A", "B"), ("A", "C"), ("B", "B"), ...
"""
pool: List[T] = await list(itr)
for value in itertools.combinations_with_replacement(pool, r):
yield value
async def compress(
itr: AnyIterable[T], selectors: AnyIterable[Any]
) -> AsyncIterator[T]:
"""
Yield elements only when the corresponding selector evaluates to True.
Stops when either the iterable or the selectors have been exhausted.
Example:
async for value in compress(range(5), [1, 0, 0, 1, 1]):
... # 0, 3, 4
"""
async for value, selector in zip(itr, selectors):
if selector:
yield value
async def count(start: N = 0, step: N = 1) -> AsyncIterator[N]:
"""
Yield an infinite series, starting at the given value and increasing by step.
Example:
async for value in counter(10, -1):
... # 10, 9, 8, 7, ...
"""
value = start
while True:
yield value
value += step
async def cycle(itr: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield a repeating series from the given iterable.
Lazily consumes the iterable when the next value is needed, and caching
the values in memory for future iterations of the series.
Example:
async for value in cycle([1, 2]):
... # 1, 2, 1, 2, 1, 2, ...
"""
items = []
async for item in iter(itr):
yield item
items.append(item)
while True:
for item in items:
yield item
async def dropwhile(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Drops all items until the predicate evaluates False; yields all items afterwards.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in dropwhile(pred, range(6)):
... # 4, 5, 6
"""
itr = iter(iterable)
async for item in itr:
if not await maybe_await(predicate(item)):
yield item
break
async for item in itr:
yield item
async def filterfalse(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Yield items from the iterable only when the predicate evaluates to False.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in filterfalse(pred, range(6)):
... # 4, 5
"""
async for item in iter(iterable):
if not await maybe_await(predicate(item)):
yield item
# pylint: disable=undefined-variable,multiple-statements
@overload
def groupby(itr: AnyIterable[T]) -> AsyncIterator[Tuple[T, List[T]]]: # pragma: nocover
pass
@overload
def groupby(
itr: AnyIterable[T], key: KeyFunction[T, R]
) -> AsyncIterator[Tuple[R, List[T]]]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def groupby(
itr: AnyIterable[T], key: Optional[KeyFunction[T, R]] = None
) -> AsyncIterator[Tuple[Any, List[T]]]:
"""
Yield consecutive keys and groupings from the given iterable.
Items will be grouped based on the key function, which defaults to
the identity of each item. Accepts both standard functions and
coroutines for the key function. Suggest sorting by the key
function before using groupby.
Example:
data = ["A", "a", "b", "c", "C", "c"]
async for key, group in groupby(data, key=str.lower):
key # "a", "b", "c"
group # ["A", "a"], ["b"], ["c", "C", "c"]
"""
if key is None:
key = lambda x: x
grouping: List[T] = []
it = iter(itr)
try:
item = await next(it)
except StopAsyncIteration:
return
grouping = [item]
j = await maybe_await(key(item))
async for item in it:
k = await maybe_await(key(item))
if k != j:
yield j, grouping
grouping = [item]
else:
grouping.append(item)
j = k
yield j, grouping
# pylint: disable=undefined-variable,multiple-statements
@overload
def islice(
itr: AnyIterable[T], __stop: Optional[int]
) -> AsyncIterator[T]: # pragma: nocover
pass
@overload
def islice(
itr: AnyIterable[T], __start: int, __stop: Optional[int], __step: int = 1
) -> AsyncIterator[T]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def islice(itr: AnyIterable[T], *args: Optional[int]) -> AsyncIterator[T]:
"""
Yield selected items from the given iterable.
islice(iterable, stop)
islice(iterable, start, stop[, step])
Starting from the start index (or zero), stopping at the stop
index (or until exhausted), skipping items if step > 0.
Example:
data = range(10)
async for item in islice(data, 5):
... # 0, 1, 2, 3, 4
async for item in islice(data, 2, 5):
... # 2, 3, 4
async for item in islice(data, 1, 7, 2):
... # 1, 3, 5
"""
start = 0
step = 1
if not args:
raise ValueError("must pass stop index")
if len(args) == 1:
(stop,) = args
elif len(args) == 2:
start, stop = args # type: ignore
elif len(args) == 3:
start, stop, step = args # type: ignore
else:
raise ValueError("too many arguments given")
assert start >= 0 and (stop is None or stop >= 0) and step >= 0
step = max(1, step)
if stop == 0:
return
async for index, item in enumerate(itr):
if index >= start and (index - start) % step == 0:
yield item
if stop is not None and index + 1 >= stop:
break
async def permutations(
itr: AnyIterable[T], r: Optional[int] = None
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length permutations of elements in the iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in permutations(range(3)):
... # (0, 1, 2), (0, 2, 1), (1, 0, 2), ...
"""
pool: List[T] = await list(itr)
for value in itertools.permutations(pool, r):
yield value
async def product(
*itrs: AnyIterable[T], repeat: int = 1 # pylint: disable=redefined-outer-name
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield cartesian products of all iterables.
Simple wrapper around itertools.combinations for asyncio.
This will consume all iterables before yielding any values.
Example:
async for value in product("abc", "xy"):
... # ("a", "x"), ("a", "y"), ("b", "x"), ...
async for value in product(range(3), repeat=3):
... # (0, 0, 0), (0, 0, 1), (0, 0, 2), ...
"""
pools = await asyncio.gather(*[list(itr) for itr in itrs])
for value in itertools.product(*pools, repeat=repeat):
yield value
async def repeat(elem: T, n: int = -1) -> AsyncIterator[T]:
"""
Yield the given value repeatedly, forever or up to n times.
Example:
async for value in repeat(7):
... # 7, 7, 7, 7, 7, 7, ...
"""
while True:
if n == 0:
break
yield elem
n -= 1
async def starmap(
fn: AnyFunction[R], iterable: AnyIterableIterable[Any]
) -> AsyncIterator[R]:
"""
Yield values from a function using an iterable of iterables for arguments.
Each iterable contained within will be unpacked and consumed before
executing the function or coroutine.
Example:
data = [(1, 1), (1, 1, 1), (2, 2)]
async for value in starmap(operator.add, data):
... # 2, 3, 4
"""
async for itr in iter(iterable):
args = await list(itr)
yield await maybe_await(fn(*args))
async def takewhile(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Yield values from the iterable until the predicate evaluates False.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for value in takewhile(pred, range(8)):
... # 0, 1, 2, 3
"""
async for item in iter(iterable):
if await maybe_await(predicate(item)):
yield item
else:
break
def tee(itr: AnyIterable[T], n: int = 2) -> Tuple[AsyncIterator[T], ...]:
"""
Return n iterators that each yield items from the given iterable.
The first iterator lazily fetches from the original iterable, and then
queues the values for the other iterators to yield when needed.
Caveat: all iterators are dependent on the first iterator – if it is
consumed more slowly than the rest, the other consumers will be blocked
until the first iterator continues forward. Similarly, if the first
iterator is consumed more quickly than the rest, more memory will be
used in keeping values in the queues until the other iterators finish
consuming them.
Example:
it1, it2 = tee(range(5), n=2)
async for value in it1:
... # 0, 1, 2, 3, 4
async for value in it2:
... # 0, 1, 2, 3, 4
"""
assert n > 0
sentinel = object()
queues: List[asyncio.Queue] = [asyncio.Queue() for k in range(n)]
async def gen(k: int, q: asyncio.Queue) -> AsyncIterator[T]:
if k == 0:
async for value in iter(itr):
await asyncio.gather(*[queue.put(value) for queue in queues[1:]])
yield value
await asyncio.gather(*[queue.put(sentinel) for queue in queues[1:]])
else:
while True:
value = await q.get()
if value is sentinel:
break
yield value
return tuple(gen(k, q) for k, q in builtins.enumerate(queues))
async def zip_longest(
*itrs: AnyIterable[Any], fillvalue: Any = None
) -> AsyncIterator[Tuple[Any, ...]]:
"""
Yield a tuple of items from mixed iterables until all are consumed.
If shorter iterables are exhausted, the default value will be used
until all iterables are exhausted.
Example:
a = range(3)
b = range(5)
async for a, b in zip_longest(a, b, fillvalue=-1):
a # 0, 1, 2, -1, -1
b # 0, 1, 2, 3, 4
"""
its: List[AsyncIterator[Any]] = [iter(itr) for itr in itrs]
itr_count = len(its)
finished = 0
while True:
values = await asyncio.gather(
*[it.__anext__() for it in its], return_exceptions=True
)
for idx, value in builtins.enumerate(values):
if isinstance(value, AnyStop):
finished += 1
values[idx] = fillvalue
its[idx] = repeat(fillvalue)
elif isinstance(value, BaseException):
raise value
if finished >= itr_count:
break
yield tuple(values)
| 14,405 | Python | 24.910072 | 88 | 0.592294 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/helpers.py | # Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import functools
from unittest import TestCase
from aioitertools.helpers import maybe_await
def async_test(fn):
def wrapped(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(fn(*args, **kwargs))
return wrapped
class HelpersTest(TestCase):
# aioitertools.helpers.maybe_await()
@async_test
async def test_maybe_await(self):
self.assertEqual(await maybe_await(42), 42)
@async_test
async def test_maybe_await_async_def(self):
async def forty_two():
await asyncio.sleep(0.0001)
return 42
self.assertEqual(await maybe_await(forty_two()), 42)
@async_test
async def test_maybe_await_coroutine(self):
@asyncio.coroutine
def forty_two():
yield from asyncio.sleep(0.0001)
return 42
self.assertEqual(await maybe_await(forty_two()), 42)
@async_test
async def test_maybe_await_partial(self):
async def multiply(a, b):
await asyncio.sleep(0.0001)
return a * b
self.assertEqual(await maybe_await(functools.partial(multiply, 6)(7)), 42)
| 1,238 | Python | 23.294117 | 82 | 0.640549 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/builtins.py | # Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
from typing import AsyncIterator
from unittest import TestCase
import aioitertools as ait
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(3)
class BuiltinsTest(TestCase):
# aioitertools.iter()
@async_test
async def test_iter_list(self):
it = ait.iter(slist)
self.assertIsInstance(it, AsyncIterator)
idx = 0
async for item in it:
self.assertEqual(item, slist[idx])
idx += 1
@async_test
async def test_iter_range(self):
it = ait.iter(srange)
self.assertIsInstance(it, AsyncIterator)
idx = 0
async for item in it:
self.assertEqual(item, srange[idx])
idx += 1
@async_test
async def test_iter_iterable(self):
sentinel = object()
class async_iterable:
def __aiter__(self):
return sentinel
aiter = async_iterable()
self.assertEqual(ait.iter(aiter), sentinel)
@async_test
async def test_iter_iterator(self):
sentinel = object()
class async_iterator:
def __aiter__(self):
return sentinel
def __anext__(self):
return sentinel
aiter = async_iterator()
self.assertEqual(ait.iter(aiter), aiter)
@async_test
async def test_iter_async_generator(self):
async def async_gen():
yield 1
yield 2
agen = async_gen()
self.assertEqual(ait.iter(agen), agen)
# aioitertools.next()
@async_test
async def test_next_list(self):
it = ait.iter(slist)
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_range(self):
it = ait.iter(srange)
self.assertEqual(await ait.next(it), 0)
self.assertEqual(await ait.next(it), 1)
self.assertEqual(await ait.next(it), 2)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_iterable(self):
class async_iter:
def __init__(self):
self.index = 0
def __aiter__(self):
return self
def __anext__(self):
if self.index > 2:
raise StopAsyncIteration()
return self.fake_next()
async def fake_next(self):
value = slist[self.index]
self.index += 1
return value
it = ait.iter(async_iter())
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
it = iter(slist)
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_async_generator(self):
async def async_gen():
for item in slist:
yield item
it = ait.iter(async_gen())
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
# aioitertools.list()
@async_test
async def test_list(self):
self.assertEqual(await ait.list(ait.iter(slist)), slist)
# aioitertools.set()
@async_test
async def test_set(self):
self.assertEqual(await ait.set(ait.iter(slist)), set(slist))
# aioitertools.enumerate()
@async_test
async def test_enumerate(self):
async for index, value in ait.enumerate(slist):
self.assertEqual(value, slist[index])
@async_test
async def test_enumerate_start(self):
async for index, value in ait.enumerate(slist, 4):
self.assertEqual(value, slist[index - 4])
# aioitertools.map()
@async_test
async def test_map_function_list(self):
idx = 0
async for value in ait.map(str.lower, slist):
self.assertEqual(value, slist[idx].lower())
idx += 1
@async_test
async def test_map_function_async_generator(self):
async def gen():
for item in slist:
yield item
idx = 0
async for value in ait.map(str.lower, gen()):
self.assertEqual(value, slist[idx].lower())
idx += 1
@async_test
async def test_map_coroutine_list(self):
async def double(x):
await asyncio.sleep(0.0001)
return x * 2
idx = 0
async for value in ait.map(double, slist):
self.assertEqual(value, slist[idx] * 2)
idx += 1
@async_test
async def test_map_coroutine_generator(self):
async def gen():
for item in slist:
yield item
async def double(x):
await asyncio.sleep(0.0001)
return x * 2
idx = 0
async for value in ait.map(double, gen()):
self.assertEqual(value, slist[idx] * 2)
idx += 1
# aioitertools.max()
@async_test
async def test_max_basic(self):
async def gen():
for item in slist:
yield item
self.assertEqual(await ait.max(gen()), "C")
self.assertEqual(await ait.max(range(4)), 3)
with self.assertRaisesRegex(ValueError, "iterable is empty"):
await ait.max([])
with self.assertRaisesRegex(ValueError, "kwarg .+ not supported"):
await ait.max(None, foo="foo")
@async_test
async def test_max_default(self):
self.assertEqual(await ait.max(range(2), default="x"), 1)
self.assertEqual(await ait.max([], default="x"), "x")
self.assertEqual(await ait.max([], default=None), None)
@async_test
async def test_max_key(self):
words = ["star", "buzz", "guard"]
def reverse(s):
return s[::-1]
self.assertEqual(reverse("python"), "nohtyp")
self.assertEqual(await ait.max(words), "star")
self.assertEqual(await ait.max(words, key=reverse), "buzz")
# aioitertools.min()
@async_test
async def test_min_basic(self):
async def gen():
for item in slist:
yield item
self.assertEqual(await ait.min(gen()), "A")
self.assertEqual(await ait.min(range(4)), 0)
with self.assertRaisesRegex(ValueError, "iterable is empty"):
await ait.min([])
with self.assertRaisesRegex(ValueError, "kwarg .+ not supported"):
await ait.min(None, foo="foo")
@async_test
async def test_min_default(self):
self.assertEqual(await ait.min(range(2), default="x"), 0)
self.assertEqual(await ait.min([], default="x"), "x")
self.assertEqual(await ait.min([], default=None), None)
@async_test
async def test_min_key(self):
words = ["star", "buzz", "guard"]
def reverse(s):
return s[::-1]
self.assertEqual(reverse("python"), "nohtyp")
self.assertEqual(await ait.min(words), "buzz")
self.assertEqual(await ait.min(words, key=reverse), "guard")
# aioitertools.sum()
@async_test
async def test_sum_range_default(self):
self.assertEqual(await ait.sum(srange), sum(srange))
@async_test
async def test_sum_list_string(self):
self.assertEqual(await ait.sum(slist, "foo"), "fooABC")
# aioitertools.zip()
@async_test
async def test_zip_equal(self):
idx = 0
async for a, b in ait.zip(slist, srange):
self.assertEqual(a, slist[idx])
self.assertEqual(b, srange[idx])
idx += 1
| 8,223 | Python | 26.783784 | 74 | 0.568284 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/more_itertools.py | # Copyright 2020 John Reese
# Licensed under the MIT license
from typing import AsyncIterable
from unittest import TestCase
import aioitertools.more_itertools as mit
from .helpers import async_test
async def _gen() -> AsyncIterable[int]:
for i in range(5):
yield i
async def _empty() -> AsyncIterable[int]:
return
yield 0 # pylint: disable=unreachable
class MoreItertoolsTest(TestCase):
@async_test
async def test_take(self) -> None:
self.assertEqual(await mit.take(2, _gen()), [0, 1])
self.assertEqual(await mit.take(2, range(5)), [0, 1])
@async_test
async def test_take_zero(self) -> None:
self.assertEqual(await mit.take(0, _gen()), [])
@async_test
async def test_take_negative(self) -> None:
with self.assertRaises(ValueError):
await mit.take(-1, _gen())
@async_test
async def test_take_more_than_iterable(self) -> None:
self.assertEqual(await mit.take(10, _gen()), list(range(5)))
@async_test
async def test_take_empty(self) -> None:
it = _gen()
self.assertEqual(len(await mit.take(5, it)), 5)
self.assertEqual(await mit.take(1, it), [])
self.assertEqual(await mit.take(1, _empty()), [])
@async_test
async def test_chunked(self) -> None:
self.assertEqual(
[chunk async for chunk in mit.chunked(_gen(), 2)], [[0, 1], [2, 3], [4]]
)
self.assertEqual(
[chunk async for chunk in mit.chunked(range(5), 2)], [[0, 1], [2, 3], [4]]
)
@async_test
async def test_chunked_empty(self) -> None:
self.assertEqual([], [chunk async for chunk in mit.chunked(_empty(), 2)])
| 1,700 | Python | 27.830508 | 86 | 0.607059 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/__init__.py | # Copyright 2018 John Reese
# Licensed under the MIT license
from .asyncio import AsyncioTest
from .builtins import BuiltinsTest
from .helpers import HelpersTest
from .itertools import ItertoolsTest
from .more_itertools import MoreItertoolsTest
| 246 | Python | 26.444442 | 45 | 0.837398 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/asyncio.py | # Copyright 2019 John Reese
# Licensed under the MIT license
import asyncio
from unittest import TestCase
import aioitertools as ait
import aioitertools.asyncio as aio
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(3)
class AsyncioTest(TestCase):
def test_import(self):
self.assertEqual(ait.asyncio, aio)
@async_test
async def test_as_completed(self):
async def sleepy(number, duration):
await asyncio.sleep(duration)
return number
pairs = [(1, 0.3), (2, 0.1), (3, 0.5)]
expected = [2, 1, 3]
futures = [sleepy(*pair) for pair in pairs]
results = await ait.list(aio.as_completed(futures))
self.assertEqual(results, expected)
futures = [sleepy(*pair) for pair in pairs]
results = []
async for value in aio.as_completed(futures):
results.append(value)
self.assertEqual(results, expected)
@async_test
async def test_as_completed_timeout(self):
calls = [(1.0,), (0.1,)]
futures = [asyncio.sleep(*args) for args in calls]
with self.assertRaises(asyncio.TimeoutError):
await ait.list(aio.as_completed(futures, timeout=0.5))
futures = [asyncio.sleep(*args) for args in calls]
results = 0
with self.assertRaises(asyncio.TimeoutError):
async for _ in aio.as_completed(futures, timeout=0.5):
results += 1
self.assertEqual(results, 1)
@async_test
async def test_gather_input_types(self):
async def fn(arg):
await asyncio.sleep(0.001)
return arg
fns = [fn(1), asyncio.ensure_future(fn(2))]
if hasattr(asyncio, "create_task"):
# 3.7 only
fns.append(asyncio.create_task(fn(3))) # pylint: disable=no-member
else:
fns.append(fn(3))
result = await aio.gather(*fns)
self.assertEqual([1, 2, 3], result)
@async_test
async def test_gather_limited(self):
max_counter = 0
counter = 0
async def fn(arg):
nonlocal counter, max_counter
counter += 1
if max_counter < counter:
max_counter = counter
await asyncio.sleep(0.001)
counter -= 1
return arg
# Limit of 2
result = await aio.gather(*[fn(i) for i in range(10)], limit=2)
self.assertEqual(2, max_counter)
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result)
# No limit
result = await aio.gather(*[fn(i) for i in range(10)])
self.assertEqual(
10, max_counter
) # TODO: on a loaded machine this might be less?
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result)
@async_test
async def test_gather_limited_dupes(self):
async def fn(arg):
await asyncio.sleep(0.001)
return arg
f = fn(1)
g = fn(2)
result = await aio.gather(f, f, f, g, f, g, limit=2)
self.assertEqual([1, 1, 1, 2, 1, 2], result)
f = fn(1)
g = fn(2)
result = await aio.gather(f, f, f, g, f, g)
self.assertEqual([1, 1, 1, 2, 1, 2], result)
@async_test
async def test_gather_with_exceptions(self):
class MyException(Exception):
pass
async def fn(arg, fail=False):
await asyncio.sleep(arg)
if fail:
raise MyException(arg)
return arg
with self.assertRaises(MyException):
await aio.gather(fn(0.002, fail=True), fn(0.001))
result = await aio.gather(
fn(0.002, fail=True), fn(0.001), return_exceptions=True
)
self.assertEqual(result[1], 0.001)
self.assertIsInstance(result[0], MyException)
| 3,846 | Python | 28.592307 | 79 | 0.561362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/itertools.py | # Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import operator
from unittest import TestCase
import aioitertools as ait
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(1, 4)
class ItertoolsTest(TestCase):
@async_test
async def test_accumulate_range_default(self):
it = ait.accumulate(srange)
for k in [1, 3, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_range_function(self):
it = ait.accumulate(srange, func=operator.mul)
for k in [1, 2, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_range_coroutine(self):
async def mul(a, b):
return a * b
it = ait.accumulate(srange, func=mul)
for k in [1, 2, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_gen_function(self):
async def gen():
yield 1
yield 2
yield 4
it = ait.accumulate(gen(), func=operator.mul)
for k in [1, 2, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_gen_coroutine(self):
async def mul(a, b):
return a * b
async def gen():
yield 1
yield 2
yield 4
it = ait.accumulate(gen(), func=mul)
for k in [1, 2, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_empty(self):
values = []
async for value in ait.accumulate([]):
values.append(value)
self.assertEqual(values, [])
@async_test
async def test_chain_lists(self):
it = ait.chain(slist, srange)
for k in ["A", "B", "C", 1, 2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_list_gens(self):
async def gen():
for k in range(2, 9, 2):
yield k
it = ait.chain(slist, gen())
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_from_iterable(self):
async def gen():
for k in range(2, 9, 2):
yield k
it = ait.chain.from_iterable([slist, gen()])
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_from_iterable_parameter_expansion_gen(self):
async def gen():
for k in range(2, 9, 2):
yield k
async def parameters_gen():
yield slist
yield gen()
it = ait.chain.from_iterable(parameters_gen())
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_combinations(self):
it = ait.combinations(range(4), 3)
for k in [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_combinations_with_replacement(self):
it = ait.combinations_with_replacement(slist, 2)
for k in [
("A", "A"),
("A", "B"),
("A", "C"),
("B", "B"),
("B", "C"),
("C", "C"),
]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_compress_list(self):
data = range(10)
selectors = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0]
it = ait.compress(data, selectors)
for k in [1, 2, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_compress_gen(self):
data = "abcdefghijkl"
selectors = ait.cycle([1, 0, 0])
it = ait.compress(data, selectors)
for k in ["a", "d", "g", "j"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_count_bare(self):
it = ait.count()
for k in [0, 1, 2, 3]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_start(self):
it = ait.count(42)
for k in [42, 43, 44, 45]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_start_step(self):
it = ait.count(42, 3)
for k in [42, 45, 48, 51]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_negative(self):
it = ait.count(step=-2)
for k in [0, -2, -4, -6]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_cycle_list(self):
it = ait.cycle(slist)
for k in ["A", "B", "C", "A", "B", "C", "A", "B"]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_cycle_gen(self):
async def gen():
yield 1
yield 2
yield 42
it = ait.cycle(gen())
for k in [1, 2, 42, 1, 2, 42, 1, 2]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_dropwhile_empty(self):
def pred(x):
return x < 2
result = await ait.list(ait.dropwhile(pred, []))
self.assertEqual(result, [])
@async_test
async def test_dropwhile_function_list(self):
def pred(x):
return x < 2
it = ait.dropwhile(pred, srange)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_function_gen(self):
def pred(x):
return x < 2
async def gen():
yield 1
yield 2
yield 42
it = ait.dropwhile(pred, gen())
for k in [2, 42]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_coroutine_list(self):
async def pred(x):
return x < 2
it = ait.dropwhile(pred, srange)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_coroutine_gen(self):
async def pred(x):
return x < 2
async def gen():
yield 1
yield 2
yield 42
it = ait.dropwhile(pred, gen())
for k in [2, 42]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_filterfalse_function_list(self):
def pred(x):
return x % 2 == 0
it = ait.filterfalse(pred, srange)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_filterfalse_coroutine_list(self):
async def pred(x):
return x % 2 == 0
it = ait.filterfalse(pred, srange)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_list(self):
data = "aaabba"
it = ait.groupby(data)
for k in [("a", ["a", "a", "a"]), ("b", ["b", "b"]), ("a", ["a"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_list_key(self):
data = "aAabBA"
it = ait.groupby(data, key=str.lower)
for k in [("a", ["a", "A", "a"]), ("b", ["b", "B"]), ("a", ["A"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_gen(self):
async def gen():
for c in "aaabba":
yield c
it = ait.groupby(gen())
for k in [("a", ["a", "a", "a"]), ("b", ["b", "b"]), ("a", ["a"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_gen_key(self):
async def gen():
for c in "aAabBA":
yield c
it = ait.groupby(gen(), key=str.lower)
for k in [("a", ["a", "A", "a"]), ("b", ["b", "B"]), ("a", ["A"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_empty(self):
async def gen():
for _ in range(0):
yield # Force generator with no actual iteration
async for _ in ait.groupby(gen()):
self.fail("No iteration should have happened")
@async_test
async def test_islice_bad_range(self):
with self.assertRaisesRegex(ValueError, "must pass stop index"):
async for _ in ait.islice([1, 2]):
pass
with self.assertRaisesRegex(ValueError, "too many arguments"):
async for _ in ait.islice([1, 2], 1, 2, 3, 4):
pass
@async_test
async def test_islice_stop_zero(self):
values = []
async for value in ait.islice(range(5), 0):
values.append(value)
self.assertEqual(values, [])
@async_test
async def test_islice_range_stop(self):
it = ait.islice(srange, 2)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_step(self):
it = ait.islice(srange, 0, None, 2)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_stop(self):
it = ait.islice(srange, 1, 3)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_stop_step(self):
it = ait.islice(srange, 1, 3, 2)
for k in [2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_stop(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
gen_it = gen()
it = ait.islice(gen_it, 2)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
assert await ait.list(gen_it) == [3, 4]
@async_test
async def test_islice_gen_start_step(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
it = ait.islice(gen(), 1, None, 2)
for k in [2, 4]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_start_stop(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
it = ait.islice(gen(), 1, 3)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_start_stop_step(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
gen_it = gen()
it = ait.islice(gen_it, 1, 3, 2)
for k in [2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
assert await ait.list(gen_it) == [4]
@async_test
async def test_permutations_list(self):
it = ait.permutations(srange, r=2)
for k in [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_permutations_gen(self):
async def gen():
yield 1
yield 2
yield 3
it = ait.permutations(gen(), r=2)
for k in [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_product_list(self):
it = ait.product([1, 2], [6, 7])
for k in [(1, 6), (1, 7), (2, 6), (2, 7)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_product_gen(self):
async def gen(x):
yield x
yield x + 1
it = ait.product(gen(1), gen(6))
for k in [(1, 6), (1, 7), (2, 6), (2, 7)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_repeat(self):
it = ait.repeat(42)
for k in [42] * 10:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_repeat_limit(self):
it = ait.repeat(42, 5)
for k in [42] * 5:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_function_list(self):
data = [slist[:2], slist[1:], slist]
def concat(*args):
return "".join(args)
it = ait.starmap(concat, data)
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_function_gen(self):
def gen():
yield slist[:2]
yield slist[1:]
yield slist
def concat(*args):
return "".join(args)
it = ait.starmap(concat, gen())
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_coroutine_list(self):
data = [slist[:2], slist[1:], slist]
async def concat(*args):
return "".join(args)
it = ait.starmap(concat, data)
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_coroutine_gen(self):
async def gen():
yield slist[:2]
yield slist[1:]
yield slist
async def concat(*args):
return "".join(args)
it = ait.starmap(concat, gen())
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_empty(self):
def pred(x):
return x < 3
values = await ait.list(ait.takewhile(pred, []))
self.assertEqual(values, [])
@async_test
async def test_takewhile_function_list(self):
def pred(x):
return x < 3
it = ait.takewhile(pred, srange)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_function_gen(self):
async def gen():
yield 1
yield 2
yield 3
def pred(x):
return x < 3
it = ait.takewhile(pred, gen())
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_coroutine_list(self):
async def pred(x):
return x < 3
it = ait.takewhile(pred, srange)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_coroutine_gen(self):
def gen():
yield 1
yield 2
yield 3
async def pred(x):
return x < 3
it = ait.takewhile(pred, gen())
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_list_two(self):
it1, it2 = ait.tee(slist * 2)
for k in slist * 2:
a, b = await asyncio.gather(ait.next(it1), ait.next(it2))
self.assertEqual(a, b)
self.assertEqual(a, k)
self.assertEqual(b, k)
for it in [it1, it2]:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_list_six(self):
itrs = ait.tee(slist * 2, n=6)
for k in slist * 2:
values = await asyncio.gather(*[ait.next(it) for it in itrs])
for value in values:
self.assertEqual(value, k)
for it in itrs:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_gen_two(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
it1, it2 = ait.tee(gen())
for k in [1, 4, 9, 16]:
a, b = await asyncio.gather(ait.next(it1), ait.next(it2))
self.assertEqual(a, b)
self.assertEqual(a, k)
self.assertEqual(b, k)
for it in [it1, it2]:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_gen_six(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
itrs = ait.tee(gen(), n=6)
for k in [1, 4, 9, 16]:
values = await asyncio.gather(*[ait.next(it) for it in itrs])
for value in values:
self.assertEqual(value, k)
for it in itrs:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_range(self):
a = range(3)
b = range(5)
it = ait.zip_longest(a, b)
for k in [(0, 0), (1, 1), (2, 2), (None, 3), (None, 4)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_fillvalue(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
a = gen()
b = range(5)
it = ait.zip_longest(a, b, fillvalue=42)
for k in [(1, 0), (4, 1), (9, 2), (16, 3), (42, 4)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_exception(self):
async def gen():
yield 1
yield 2
raise Exception("fake error")
a = gen()
b = ait.repeat(5)
it = ait.zip_longest(a, b)
for k in [(1, 5), (2, 5)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaisesRegex(Exception, "fake error"):
await ait.next(it)
| 21,574 | Python | 28.037685 | 75 | 0.527348 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/__main__.py | # Copyright 2019 John Reese
# Licensed under the MIT license
import unittest
if __name__ == "__main__": # pragma: no cover
unittest.main(module="aioitertools.tests", verbosity=2)
| 186 | Python | 22.374997 | 59 | 0.698925 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/discovery.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import logging
import weakref
from botocore import xform_name
from botocore.exceptions import BotoCoreError, HTTPClientError, ConnectionError
from botocore.model import OperationNotFoundError
from botocore.utils import CachedProperty
logger = logging.getLogger(__name__)
class EndpointDiscoveryException(BotoCoreError):
pass
class EndpointDiscoveryRequired(EndpointDiscoveryException):
""" Endpoint Discovery is disabled but is required for this operation. """
fmt = 'Endpoint Discovery is not enabled but this operation requires it.'
class EndpointDiscoveryRefreshFailed(EndpointDiscoveryException):
""" Endpoint Discovery failed to the refresh the known endpoints. """
fmt = 'Endpoint Discovery failed to refresh the required endpoints.'
def block_endpoint_discovery_required_operations(model, **kwargs):
endpoint_discovery = model.endpoint_discovery
if endpoint_discovery and endpoint_discovery.get('required'):
raise EndpointDiscoveryRequired()
class EndpointDiscoveryModel(object):
def __init__(self, service_model):
self._service_model = service_model
@CachedProperty
def discovery_operation_name(self):
discovery_operation = self._service_model.endpoint_discovery_operation
return xform_name(discovery_operation.name)
@CachedProperty
def discovery_operation_keys(self):
discovery_operation = self._service_model.endpoint_discovery_operation
keys = []
if discovery_operation.input_shape:
keys = list(discovery_operation.input_shape.members.keys())
return keys
def discovery_required_for(self, operation_name):
try:
operation_model = self._service_model.operation_model(operation_name)
return operation_model.endpoint_discovery.get('required', False)
except OperationNotFoundError:
return False
def discovery_operation_kwargs(self, **kwargs):
input_keys = self.discovery_operation_keys
# Operation and Identifiers are only sent if there are Identifiers
if not kwargs.get('Identifiers'):
kwargs.pop('Operation', None)
kwargs.pop('Identifiers', None)
return dict((k, v) for k, v in kwargs.items() if k in input_keys)
def gather_identifiers(self, operation, params):
return self._gather_ids(operation.input_shape, params)
def _gather_ids(self, shape, params, ids=None):
# Traverse the input shape and corresponding parameters, gathering
# any input fields labeled as an endpoint discovery id
if ids is None:
ids = {}
for member_name, member_shape in shape.members.items():
if member_shape.metadata.get('endpointdiscoveryid'):
ids[member_name] = params[member_name]
elif member_shape.type_name == 'structure' and member_name in params:
self._gather_ids(member_shape, params[member_name], ids)
return ids
class EndpointDiscoveryManager(object):
def __init__(self, client, cache=None, current_time=None, always_discover=True):
if cache is None:
cache = {}
self._cache = cache
self._failed_attempts = {}
if current_time is None:
current_time = time.time
self._time = current_time
self._always_discover = always_discover
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
self._model = EndpointDiscoveryModel(client.meta.service_model)
def _parse_endpoints(self, response):
endpoints = response['Endpoints']
current_time = self._time()
for endpoint in endpoints:
cache_time = endpoint.get('CachePeriodInMinutes')
endpoint['Expiration'] = current_time + cache_time * 60
return endpoints
def _cache_item(self, value):
if isinstance(value, dict):
return tuple(sorted(value.items()))
else:
return value
def _create_cache_key(self, **kwargs):
kwargs = self._model.discovery_operation_kwargs(**kwargs)
return tuple(self._cache_item(v) for k, v in sorted(kwargs.items()))
def gather_identifiers(self, operation, params):
return self._model.gather_identifiers(operation, params)
def delete_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
if cache_key in self._cache:
del self._cache[cache_key]
def _describe_endpoints(self, **kwargs):
# This is effectively a proxy to whatever name/kwargs the service
# supports for endpoint discovery.
kwargs = self._model.discovery_operation_kwargs(**kwargs)
operation_name = self._model.discovery_operation_name
discovery_operation = getattr(self._client, operation_name)
logger.debug('Discovering endpoints with kwargs: %s', kwargs)
return discovery_operation(**kwargs)
def _get_current_endpoints(self, key):
if key not in self._cache:
return None
now = self._time()
return [e for e in self._cache[key] if now < e['Expiration']]
def _refresh_current_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
try:
response = self._describe_endpoints(**kwargs)
endpoints = self._parse_endpoints(response)
self._cache[cache_key] = endpoints
self._failed_attempts.pop(cache_key, None)
return endpoints
except (ConnectionError, HTTPClientError):
self._failed_attempts[cache_key] = self._time() + 60
return None
def _recently_failed(self, cache_key):
if cache_key in self._failed_attempts:
now = self._time()
if now < self._failed_attempts[cache_key]:
return True
del self._failed_attempts[cache_key]
return False
def _select_endpoint(self, endpoints):
return endpoints[0]['Address']
def describe_endpoint(self, **kwargs):
operation = kwargs['Operation']
discovery_required = self._model.discovery_required_for(operation)
if not self._always_discover and not discovery_required:
# Discovery set to only run on required operations
logger.debug(
'Optional discovery disabled. Skipping discovery for Operation: %s'
% operation
)
return None
# Get the endpoint for the provided operation and identifiers
cache_key = self._create_cache_key(**kwargs)
endpoints = self._get_current_endpoints(cache_key)
if endpoints:
return self._select_endpoint(endpoints)
# All known endpoints are stale
recently_failed = self._recently_failed(cache_key)
if not recently_failed:
# We haven't failed to discover recently, go ahead and refresh
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# Discovery has failed recently, do our best to get an endpoint
logger.debug('Endpoint Discovery has failed for: %s', kwargs)
stale_entries = self._cache.get(cache_key, None)
if stale_entries:
# We have stale entries, use those while discovery is failing
return self._select_endpoint(stale_entries)
if discovery_required:
# It looks strange to be checking recently_failed again but,
# this informs us as to whether or not we tried to refresh earlier
if recently_failed:
# Discovery is required and we haven't already refreshed
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# No endpoints even refresh, raise hard error
raise EndpointDiscoveryRefreshFailed()
# Discovery is optional, just use the default endpoint for now
return None
class EndpointDiscoveryHandler(object):
def __init__(self, manager):
self._manager = manager
def register(self, events, service_id):
events.register(
'before-parameter-build.%s' % service_id, self.gather_identifiers
)
events.register_first(
'request-created.%s' % service_id, self.discover_endpoint
)
events.register('needs-retry.%s' % service_id, self.handle_retries)
def gather_identifiers(self, params, model, context, **kwargs):
endpoint_discovery = model.endpoint_discovery
# Only continue if the operation supports endpoint discovery
if endpoint_discovery is None:
return
ids = self._manager.gather_identifiers(model, params)
context['discovery'] = {'identifiers': ids}
def discover_endpoint(self, request, operation_name, **kwargs):
ids = request.context.get('discovery', {}).get('identifiers')
if ids is None:
return
endpoint = self._manager.describe_endpoint(
Operation=operation_name, Identifiers=ids
)
if endpoint is None:
logger.debug('Failed to discover and inject endpoint')
return
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
logger.debug('Injecting discovered endpoint: %s', endpoint)
request.url = endpoint
def handle_retries(self, request_dict, response, operation, **kwargs):
if response is None:
return None
_, response = response
status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')
error_code = response.get('Error', {}).get('Code')
if status != 421 and error_code != 'InvalidEndpointException':
return None
context = request_dict.get('context', {})
ids = context.get('discovery', {}).get('identifiers')
if ids is None:
return None
# Delete the cached endpoints, forcing a refresh on retry
# TODO: Improve eviction behavior to only evict the bad endpoint if
# there are multiple. This will almost certainly require a lock.
self._manager.delete_endpoints(
Operation=operation.name, Identifiers=ids
)
return 0
| 11,031 | Python | 39.116363 | 84 | 0.646995 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/configloader.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shlex
import copy
import sys
from botocore.compat import six
import botocore.exceptions
def multi_file_load_config(*filenames):
"""Load and combine multiple INI configs with profiles.
This function will take a list of filesnames and return
a single dictionary that represents the merging of the loaded
config files.
If any of the provided filenames does not exist, then that file
is ignored. It is therefore ok to provide a list of filenames,
some of which may not exist.
Configuration files are **not** deep merged, only the top level
keys are merged. The filenames should be passed in order of
precedence. The first config file has precedence over the
second config file, which has precedence over the third config file,
etc. The only exception to this is that the "profiles" key is
merged to combine profiles from multiple config files into a
single profiles mapping. However, if a profile is defined in
multiple config files, then the config file with the highest
precedence is used. Profile values themselves are not merged.
For example::
FileA FileB FileC
[foo] [foo] [bar]
a=1 a=2 a=3
b=2
[bar] [baz] [profile a]
a=2 a=3 region=e
[profile a] [profile b] [profile c]
region=c region=d region=f
The final result of ``multi_file_load_config(FileA, FileB, FileC)``
would be::
{"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3},
"profiles": {"a": {"region": "c"}}, {"b": {"region": d"}},
{"c": {"region": "f"}}}
Note that the "foo" key comes from A, even though it's defined in both
FileA and FileB. Because "foo" was defined in FileA first, then the values
for "foo" from FileA are used and the values for "foo" from FileB are
ignored. Also note where the profiles originate from. Profile "a"
comes FileA, profile "b" comes from FileB, and profile "c" comes
from FileC.
"""
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except botocore.exceptions.ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
def _merge_list_of_dicts(list_of_dicts):
merged_dicts = {}
for single_dict in list_of_dicts:
for key, value in single_dict.items():
if key not in merged_dicts:
merged_dicts[key] = value
return merged_dicts
def load_config(config_filename):
"""Parse a INI config with profiles.
This will parse an INI config file and map top level profiles
into a top level "profile" key.
If you want to parse an INI file and map all section names to
top level keys, use ``raw_config_parse`` instead.
"""
parsed = raw_config_parse(config_filename)
return build_profile_map(parsed)
def raw_config_parse(config_filename, parse_subsections=True):
"""Returns the parsed INI config contents.
Each section name is a top level key.
:param config_filename: The name of the INI file to parse
:param parse_subsections: If True, parse indented blocks as
subsections that represent their own configuration dictionary.
For example, if the config file had the contents::
s3 =
signature_version = s3v4
addressing_style = path
The resulting ``raw_config_parse`` would be::
{'s3': {'signature_version': 's3v4', 'addressing_style': 'path'}}
If False, do not try to parse subsections and return the indented
block as its literal value::
{'s3': '\nsignature_version = s3v4\naddressing_style = path'}
:returns: A dict with keys for each profile found in the config
file and the value of each key being a dict containing name
value pairs found in that profile.
:raises: ConfigNotFound, ConfigParseError
"""
config = {}
path = config_filename
if path is not None:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise botocore.exceptions.ConfigNotFound(path=_unicode_path(path))
cp = six.moves.configparser.RawConfigParser()
try:
cp.read([path])
except (six.moves.configparser.Error, UnicodeDecodeError):
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path))
else:
for section in cp.sections():
config[section] = {}
for option in cp.options(section):
config_value = cp.get(section, option)
if parse_subsections and config_value.startswith('\n'):
# Then we need to parse the inner contents as
# hierarchical. We support a single level
# of nesting for now.
try:
config_value = _parse_nested(config_value)
except ValueError:
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path))
config[section][option] = config_value
return config
def _unicode_path(path):
if isinstance(path, six.text_type):
return path
# According to the documentation getfilesystemencoding can return None
# on unix in which case the default encoding is used instead.
filesystem_encoding = sys.getfilesystemencoding()
if filesystem_encoding is None:
filesystem_encoding = sys.getdefaultencoding()
return path.decode(filesystem_encoding, 'replace')
def _parse_nested(config_value):
# Given a value like this:
# \n
# foo = bar
# bar = baz
# We need to parse this into
# {'foo': 'bar', 'bar': 'baz}
parsed = {}
for line in config_value.splitlines():
line = line.strip()
if not line:
continue
# The caller will catch ValueError
# and raise an appropriate error
# if this fails.
key, value = line.split('=', 1)
parsed[key.strip()] = value.strip()
return parsed
def build_profile_map(parsed_ini_config):
"""Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
aws_... = foo
aws_... = bar
[profile bar]
aws_... = foo
aws_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"aws_...": "foo", "aws...": "bar"},
"profile bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
"bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
"""
parsed_config = copy.deepcopy(parsed_ini_config)
profiles = {}
final_config = {}
for key, values in parsed_config.items():
if key.startswith("profile"):
try:
parts = shlex.split(key)
except ValueError:
continue
if len(parts) == 2:
profiles[parts[1]] = values
elif key == 'default':
# default section is special and is considered a profile
# name but we don't require you use 'profile "default"'
# as a section.
profiles[key] = values
else:
final_config[key] = values
final_config['profiles'] = profiles
return final_config
| 9,580 | Python | 34.095238 | 79 | 0.599269 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/config.py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.compat import OrderedDict
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.exceptions import InvalidRetryConfigurationError
from botocore.exceptions import InvalidMaxRetryAttemptsError
from botocore.exceptions import InvalidRetryModeError
class Config(object):
"""Advanced configuration for Botocore clients.
:type region_name: str
:param region_name: The region to use in instantiating the client
:type signature_version: str
:param signature_version: The signature version when signing requests.
:type user_agent: str
:param user_agent: The value to use in the User-Agent header.
:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.
:type connect_timeout: float or int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection. The default is 60
seconds.
:type read_timeout: float or int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection. The default is
60 seconds.
:type parameter_validation: bool
:param parameter_validation: Whether parameter validation should occur
when serializing requests. The default is True. You can disable
parameter validation for performance reasons. Otherwise, it's
recommended to leave parameter validation enabled.
:type max_pool_connections: int
:param max_pool_connections: The maximum number of connections to
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type proxies: dict
:param proxies: A dictionary of proxy servers to use by protocol or
endpoint, e.g.:
{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
:type proxies_config: dict
:param proxies_config: A dictionary of additional proxy configurations.
Valid keys are:
* 'proxy_ca_bundle' -- The path to a custom certificate bundle to use
when establishing SSL/TLS connections with proxy.
* 'proxy_client_cert' -- The path to a certificate for proxy
TLS client authentication.
When a str is provided it is treated as a path to a proxy client
certificate. When a two element tuple is provided, it will be
interpreted as the path to the client certificate, and the path
to the certificate key.
* 'proxy_use_forwarding_for_https' -- For HTTPS proxies,
forward your requests to HTTPS destinations with an absolute
URI. We strongly recommend you only use this option with
trusted or corporate proxies. Value must be boolean.
:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
* 'use_accelerate_endpoint' -- Refers to whether to use the S3
Accelerate endpoint. The value must be a boolean. If True, the
client will use the S3 Accelerate endpoint. If the S3 Accelerate
endpoint is being used then the addressing style will always
be virtual.
* 'payload_signing_enabled' -- Refers to whether or not to SHA256
sign sigv4 payloads. By default, this is disabled for streaming
uploads (UploadPart and PutObject).
* 'addressing_style' -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals:
* auto -- Addressing style is chosen for user. Depending
on the configuration of client, the endpoint may be addressed in
the virtual or the path style. Note that this is the default
behavior if no style is specified.
* virtual -- Addressing style is always virtual. The name of the
bucket must be DNS compatible or an exception will be thrown.
Endpoints will be addressed as such: mybucket.s3.amazonaws.com
* path -- Addressing style is always by path. Endpoints will be
addressed as such: s3.amazonaws.com/mybucket
* 'us_east_1_regional_endpoint' - Refers to what S3 endpoint to use
when the region is configured to be us-east-1. Values must be a
string that equals:
* regional -- Use the us-east-1.amazonaws.com endpoint if the
client is configured to use the us-east-1 region.
* legacy -- Use the s3.amazonaws.com endpoint if the client is
configured to use the us-east-1 region. This is the default if
the configuration option is not specified.
:type retries: dict
:param retries: A dictionary for retry specific configurations.
Valid keys are:
* 'total_max_attempts' -- An integer representing the maximum number of
total attempts that will be made on a single request. This includes
the initial request, so a value of 1 indicates that no requests
will be retried. If ``total_max_attempts`` and ``max_attempts``
are both provided, ``total_max_attempts`` takes precedence.
``total_max_attempts`` is preferred over ``max_attempts`` because
it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and
the ``max_attempts`` config file value.
* 'max_attempts' -- An integer representing the maximum number of
retry attempts that will be made on a single request. For
example, setting this value to 2 will result in the request
being retried at most two times after the initial request. Setting
this value to 0 will result in no retries ever being attempted on
the initial request. If not provided, the number of retries will
default to whatever is modeled, which is typically four retries.
* 'mode' -- A string representing the type of retry mode botocore
should use. Valid values are:
* ``legacy`` - The pre-existing retry behavior.
* ``standard`` - The standardized set of retry rules. This
will also default to 3 max attempts unless overridden.
* ``adaptive`` - Retries with additional client side throttling.
:type client_cert: str, (str, str)
:param client_cert: The path to a certificate for TLS client authentication.
When a str is provided it is treated as a path to a client certificate
to be used when creating a TLS connection.
If a client key is to be provided alongside the client certificate the
client_cert should be set to a tuple of length two where the first
element is the path to the client certificate and the second element is
the path to the certificate key.
:type inject_host_prefix: bool
:param inject_host_prefix: Whether host prefix injection should occur.
Defaults to True.
Setting this to False disables the injection of operation parameters
into the prefix of the hostname. This is useful for clients providing
custom endpoints that should not have their host prefix modified.
"""
OPTION_DEFAULTS = OrderedDict([
('region_name', None),
('signature_version', None),
('user_agent', None),
('user_agent_extra', None),
('connect_timeout', DEFAULT_TIMEOUT),
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('proxies_config', None),
('s3', None),
('retries', None),
('client_cert', None),
('inject_host_prefix', True),
('endpoint_discovery_enabled', None),
])
def __init__(self, *args, **kwargs):
self._user_provided_options = self._record_user_provided_options(
args, kwargs)
# Merge the user_provided options onto the default options
config_vars = copy.copy(self.OPTION_DEFAULTS)
config_vars.update(self._user_provided_options)
# Set the attributes based on the config_vars
for key, value in config_vars.items():
setattr(self, key, value)
# Validate the s3 options
self._validate_s3_configuration(self.s3)
self._validate_retry_configuration(self.retries)
def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
# Iterate through the kwargs passed through to the constructor and
# map valid keys to the dictionary
for key, value in kwargs.items():
if key in self.OPTION_DEFAULTS:
user_provided_options[key] = value
# The key must exist in the available options
else:
raise TypeError(
'Got unexpected keyword argument \'%s\'' % key)
# The number of args should not be longer than the allowed
# options
if len(args) > len(option_order):
raise TypeError(
'Takes at most %s arguments (%s given)' % (
len(option_order), len(args)))
# Iterate through the args passed through to the constructor and map
# them to appropriate keys.
for i, arg in enumerate(args):
# If it a kwarg was specified for the arg, then error out
if option_order[i] in user_provided_options:
raise TypeError(
'Got multiple values for keyword argument \'%s\'' % (
option_order[i]))
user_provided_options[option_order[i]] = arg
return user_provided_options
def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)
def _validate_retry_configuration(self, retries):
if retries is not None:
for key, value in retries.items():
if key not in ['max_attempts', 'mode', 'total_max_attempts']:
raise InvalidRetryConfigurationError(
retry_config_option=key)
if key == 'max_attempts' and value < 0:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=0,
)
if key == 'total_max_attempts' and value < 1:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=1,
)
if key == 'mode' and value not in ['legacy', 'standard',
'adaptive']:
raise InvalidRetryModeError(
provided_retry_mode=value
)
def merge(self, other_config):
"""Merges the config object with another config object
This will merge in all non-default values from the provided config
and return a new config object
:type other_config: botocore.config.Config
:param other config: Another config object to merge with. The values
in the provided config object will take precedence in the merging
:returns: A config object built from the merged values of both
config objects.
"""
# Make a copy of the current attributes in the config object.
config_options = copy.copy(self._user_provided_options)
# Merge in the user provided options from the other config
config_options.update(other_config._user_provided_options)
# Return a new config object with the merged properties.
return Config(**config_options)
| 12,721 | Python | 42.718213 | 80 | 0.642324 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/response.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import logging
from botocore import ScalarTypes
from botocore.hooks import first_non_none_response
from botocore.compat import json, set_socket_timeout, XMLParseError
from botocore.exceptions import IncompleteReadError, ReadTimeoutError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from botocore import parsers
logger = logging.getLogger(__name__)
class StreamingBody(object):
"""Wrapper class for an http response body.
This provides a few additional conveniences that do not exist
in the urllib3 model:
* Set the timeout on the socket (i.e read() timeouts)
* Auto validation of content length, if the amount of bytes
we read does not match the content length, an exception
is raised.
"""
_DEFAULT_CHUNK_SIZE = 1024
def __init__(self, raw_stream, content_length):
self._raw_stream = raw_stream
self._content_length = content_length
self._amount_read = 0
def set_socket_timeout(self, timeout):
"""Set the timeout seconds on the socket."""
# The problem we're trying to solve is to prevent .read() calls from
# hanging. This can happen in rare cases. What we'd like to ideally
# do is set a timeout on the .read() call so that callers can retry
# the request.
# Unfortunately, this isn't currently possible in requests.
# See: https://github.com/kennethreitz/requests/issues/1803
# So what we're going to do is reach into the guts of the stream and
# grab the socket object, which we can set the timeout on. We're
# putting in a check here so in case this interface goes away, we'll
# know.
try:
# To further complicate things, the way to grab the
# underlying socket object from an HTTPResponse is different
# in py2 and py3. So this code has been pushed to botocore.compat.
set_socket_timeout(self._raw_stream, timeout)
except AttributeError:
logger.error("Cannot access the socket object of "
"a streaming response. It's possible "
"the interface has changed.", exc_info=True)
raise
def read(self, amt=None):
"""Read at most amt bytes from the stream.
If the amt argument is omitted, read all data.
"""
try:
chunk = self._raw_stream.read(amt)
except URLLib3ReadTimeoutError as e:
# TODO: the url will be None as urllib3 isn't setting it yet
raise ReadTimeoutError(endpoint_url=e.url, error=e)
self._amount_read += len(chunk)
if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
def __iter__(self):
"""Return an iterator to yield 1k chunks from the raw stream.
"""
return self.iter_chunks(self._DEFAULT_CHUNK_SIZE)
def __next__(self):
"""Return the next 1k chunk from the raw stream.
"""
current_chunk = self.read(self._DEFAULT_CHUNK_SIZE)
if current_chunk:
return current_chunk
raise StopIteration()
next = __next__
def iter_lines(self, chunk_size=1024, keepends=False):
"""Return an iterator to yield lines from the raw stream.
This is achieved by reading chunk of bytes (of size chunk_size) at a
time from the raw stream, and then yielding lines from there.
"""
pending = b''
for chunk in self.iter_chunks(chunk_size):
lines = (pending + chunk).splitlines(True)
for line in lines[:-1]:
yield line.splitlines(keepends)[0]
pending = lines[-1]
if pending:
yield pending.splitlines(keepends)[0]
def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE):
"""Return an iterator to yield chunks of chunk_size bytes from the raw
stream.
"""
while True:
current_chunk = self.read(chunk_size)
if current_chunk == b"":
break
yield current_chunk
def _verify_content_length(self):
# See: https://github.com/kennethreitz/requests/issues/1855
# Basically, our http library doesn't do this for us, so we have
# to do this ourself.
if self._content_length is not None and \
self._amount_read != int(self._content_length):
raise IncompleteReadError(
actual_bytes=self._amount_read,
expected_bytes=int(self._content_length))
def close(self):
"""Close the underlying http response stream."""
self._raw_stream.close()
def get_response(operation_model, http_response):
protocol = operation_model.metadata['protocol']
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
}
# TODO: Unfortunately, we have to have error logic here.
# If it looks like an error, in the streaming response case we
# need to actually grab the contents.
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_streaming_output:
response_dict['body'] = StreamingBody(
http_response.raw, response_dict['headers'].get('content-length'))
else:
response_dict['body'] = http_response.content
parser = parsers.create_parser(protocol)
return http_response, parser.parse(response_dict,
operation_model.output_shape)
| 6,434 | Python | 38.237805 | 79 | 0.635996 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/handlers.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Builtin event handlers.
This module contains builtin handlers for events emitted by botocore.
"""
import base64
import logging
import copy
import re
import warnings
import uuid
from botocore.compat import (
unquote, json, six, unquote_str, ensure_bytes, get_md5,
MD5_AVAILABLE, OrderedDict, urlsplit, urlunsplit, XMLParseError,
ETree,
)
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
from botocore.docs.utils import AppendParamDocumentation
from botocore.signers import add_generate_presigned_url
from botocore.signers import add_generate_presigned_post
from botocore.signers import add_generate_db_auth_token
from botocore.exceptions import ParamValidationError
from botocore.exceptions import AliasConflictParameterError
from botocore.exceptions import UnsupportedTLSVersionWarning
from botocore.exceptions import MissingServiceIdError
from botocore.utils import percent_encode, SAFE_CHARS
from botocore.utils import switch_host_with_param
from botocore.utils import hyphenize_service_id
from botocore.utils import conditionally_calculate_md5
from botocore import retryhandler
from botocore import utils
from botocore import translate
import botocore
import botocore.auth
logger = logging.getLogger(__name__)
REGISTER_FIRST = object()
REGISTER_LAST = object()
# From the S3 docs:
# The rules for bucket names in the US Standard region allow bucket names
# to be as long as 255 characters, and bucket names can contain any
# combination of uppercase letters, lowercase letters, numbers, periods
# (.), hyphens (-), and underscores (_).
VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$')
_ACCESSPOINT_ARN = (
r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]+:[0-9]{12}:accesspoint[/:]'
r'[a-zA-Z0-9\-]{1,63}$'
)
_OUTPOST_ARN = (
r'^arn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]'
r'[a-zA-Z0-9\-]{1,63}[/:]accesspoint[/:][a-zA-Z0-9\-]{1,63}$'
)
VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN]))
VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$')
SERVICE_NAME_ALIASES = {
'runtime.sagemaker': 'sagemaker-runtime'
}
def handle_service_name_alias(service_name, **kwargs):
return SERVICE_NAME_ALIASES.get(service_name, service_name)
def escape_xml_payload(params, **kwargs):
# Replace \r and \n with the escaped sequence over the whole XML document
# to avoid linebreak normalization modifying customer input when the
# document is parsed. Ideally, we would do this in ElementTree.tostring,
# but it doesn't allow us to override entity escaping for text fields. For
# this operation \r and \n can only appear in the XML document if they were
# passed as part of the customer input.
body = params['body']
replaced = False
if b'\r' in body:
replaced = True
body = body.replace(b'\r', b'
')
if b'\n' in body:
replaced = True
body = body.replace(b'\n', b'
')
if not replaced:
return
params['body'] = body
if 'Content-MD5' in params['headers']:
# The Content-MD5 is now wrong, so we'll need to recalculate it
del params['headers']['Content-MD5']
conditionally_calculate_md5(params, **kwargs)
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500
def _looks_like_special_case_error(http_response):
if http_response.status_code == 200:
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(http_response.content)
root = parser.close()
except XMLParseError:
# In cases of network disruptions, we may end up with a partial
# streamed response from S3. We need to treat these cases as
# 500 Service Errors and try again.
return True
if root.tag == 'Error':
return True
return False
def set_operation_specific_signer(context, signing_name, **kwargs):
""" Choose the operation-specific signer.
Individual operations may have a different auth type than the service as a
whole. This will most often manifest as operations that should not be
authenticated at all, but can include other auth modes such as sigv4
without body signing.
"""
auth_type = context.get('auth_type')
# Auth type will be None if the operation doesn't have a configured auth
# type.
if not auth_type:
return
# Auth type will be the string value 'none' if the operation should not
# be signed at all.
if auth_type == 'none':
return botocore.UNSIGNED
if auth_type.startswith('v4'):
signature_version = 'v4'
if signing_name == 's3':
signature_version = 's3v4'
# If the operation needs an unsigned body, we set additional context
# allowing the signer to be aware of this.
if auth_type == 'v4-unsigned-body':
context['payload_signing_enabled'] = False
return signature_version
def decode_console_output(parsed, **kwargs):
if 'Output' in parsed:
try:
# We're using 'replace' for errors because it is
# possible that console output contains non string
# chars we can't utf-8 decode.
value = base64.b64decode(six.b(parsed['Output'])).decode(
'utf-8', 'replace')
parsed['Output'] = value
except (ValueError, TypeError, AttributeError):
logger.debug('Error decoding base64', exc_info=True)
def generate_idempotent_uuid(params, model, **kwargs):
for name in model.idempotent_members:
if name not in params:
params[name] = str(uuid.uuid4())
logger.debug("injecting idempotency token (%s) into param '%s'." %
(params[name], name))
def decode_quoted_jsondoc(value):
try:
value = json.loads(unquote(value))
except (ValueError, TypeError):
logger.debug('Error loading quoted JSON', exc_info=True)
return value
def json_decode_template_body(parsed, **kwargs):
if 'TemplateBody' in parsed:
try:
value = json.loads(
parsed['TemplateBody'], object_pairs_hook=OrderedDict)
parsed['TemplateBody'] = value
except (ValueError, TypeError):
logger.debug('error loading JSON', exc_info=True)
def validate_bucket_name(params, **kwargs):
if 'Bucket' not in params:
return
bucket = params['Bucket']
if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
error_msg = (
'Invalid bucket name "%s": Bucket name must match '
'the regex "%s" or be an ARN matching the regex "%s"' % (
bucket, VALID_BUCKET.pattern, VALID_S3_ARN.pattern))
raise ParamValidationError(report=error_msg)
def sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller.
"""
_sse_md5(params, 'SSECustomer')
def copy_source_sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller specifically if the parameter is for the copy-source sse-c key.
"""
_sse_md5(params, 'CopySourceSSECustomer')
def _sse_md5(params, sse_member_prefix='SSECustomer'):
if not _needs_s3_sse_customization(params, sse_member_prefix):
return
sse_key_member = sse_member_prefix + 'Key'
sse_md5_member = sse_member_prefix + 'KeyMD5'
key_as_bytes = params[sse_key_member]
if isinstance(key_as_bytes, six.text_type):
key_as_bytes = key_as_bytes.encode('utf-8')
key_md5_str = base64.b64encode(
get_md5(key_as_bytes).digest()).decode('utf-8')
key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8')
params[sse_key_member] = key_b64_encoded
params[sse_md5_member] = key_md5_str
def _needs_s3_sse_customization(params, sse_member_prefix):
return (params.get(sse_member_prefix + 'Key') is not None and
sse_member_prefix + 'KeyMD5' not in params)
def disable_signing(**kwargs):
"""
This handler disables request signing by setting the signer
name to a special sentinel value.
"""
return botocore.UNSIGNED
def add_expect_header(model, params, **kwargs):
if model.http.get('method', '') not in ['PUT', 'POST']:
return
if 'body' in params:
body = params['body']
if hasattr(body, 'read'):
# Any file like object will use an expect 100-continue
# header regardless of size.
logger.debug("Adding expect 100 continue header to request.")
params['headers']['Expect'] = '100-continue'
class DeprecatedServiceDocumenter(object):
def __init__(self, replacement_service_name):
self._replacement_service_name = replacement_service_name
def inject_deprecation_notice(self, section, event_name, **kwargs):
section.style.start_important()
section.write('This service client is deprecated. Please use ')
section.style.ref(
self._replacement_service_name,
self._replacement_service_name,
)
section.write(' instead.')
section.style.end_important()
def document_copy_source_form(section, event_name, **kwargs):
if 'request-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('CopySource')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write("'string' or {'Bucket': 'string', "
"'Key': 'string', 'VersionId': 'string'}")
elif 'request-params' in event_name:
param_section = section.get_section('CopySource')
type_section = param_section.get_section('param-type')
type_section.clear_text()
type_section.write(':type CopySource: str or dict')
doc_section = param_section.get_section('param-documentation')
doc_section.clear_text()
doc_section.write(
"The name of the source bucket, key name of the source object, "
"and optional version ID of the source object. You can either "
"provide this value as a string or a dictionary. The "
"string form is {bucket}/{key} or "
"{bucket}/{key}?versionId={versionId} if you want to copy a "
"specific version. You can also provide this value as a "
"dictionary. The dictionary format is recommended over "
"the string format because it is more explicit. The dictionary "
"format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}."
" Note that the VersionId key is optional and may be omitted."
" To specify an S3 access point, provide the access point"
" ARN for the ``Bucket`` key in the copy source dictionary. If you"
" want to provide the copy source for an S3 access point as a"
" string instead of a dictionary, the ARN provided must be the"
" full S3 access point object ARN"
" (i.e. {accesspoint_arn}/object/{key})"
)
def handle_copy_source_param(params, **kwargs):
"""Convert CopySource param for CopyObject/UploadPartCopy.
This handler will deal with two cases:
* CopySource provided as a string. We'll make a best effort
to URL encode the key name as required. This will require
parsing the bucket and version id from the CopySource value
and only encoding the key.
* CopySource provided as a dict. In this case we're
explicitly given the Bucket, Key, and VersionId so we're
able to encode the key and ensure this value is serialized
and correctly sent to S3.
"""
source = params.get('CopySource')
if source is None:
# The call will eventually fail but we'll let the
# param validator take care of this. It will
# give a better error message.
return
if isinstance(source, six.string_types):
params['CopySource'] = _quote_source_header(source)
elif isinstance(source, dict):
params['CopySource'] = _quote_source_header_from_dict(source)
def _quote_source_header_from_dict(source_dict):
try:
bucket = source_dict['Bucket']
key = source_dict['Key']
version_id = source_dict.get('VersionId')
if VALID_S3_ARN.search(bucket):
final = '%s/object/%s' % (bucket, key)
else:
final = '%s/%s' % (bucket, key)
except KeyError as e:
raise ParamValidationError(
report='Missing required parameter: %s' % str(e))
final = percent_encode(final, safe=SAFE_CHARS + '/')
if version_id is not None:
final += '?versionId=%s' % version_id
return final
def _quote_source_header(value):
result = VERSION_ID_SUFFIX.search(value)
if result is None:
return percent_encode(value, safe=SAFE_CHARS + '/')
else:
first, version_id = value[:result.start()], value[result.start():]
return percent_encode(first, safe=SAFE_CHARS + '/') + version_id
def _get_cross_region_presigned_url(request_signer, request_dict, model,
source_region, destination_region):
# The better way to do this is to actually get the
# endpoint_resolver and get the endpoint_url given the
# source region. In this specific case, we know that
# we can safely replace the dest region with the source
# region because of the supported EC2 regions, but in
# general this is not a safe assumption to make.
# I think eventually we should try to plumb through something
# that allows us to resolve endpoints from regions.
request_dict_copy = copy.deepcopy(request_dict)
request_dict_copy['body']['DestinationRegion'] = destination_region
request_dict_copy['url'] = request_dict['url'].replace(
destination_region, source_region)
request_dict_copy['method'] = 'GET'
request_dict_copy['headers'] = {}
return request_signer.generate_presigned_url(
request_dict_copy, region_name=source_region,
operation_name=model.name)
def _get_presigned_url_source_and_destination_regions(request_signer, params):
# Gets the source and destination regions to be used
destination_region = request_signer._region_name
source_region = params.get('SourceRegion')
return source_region, destination_region
def inject_presigned_url_ec2(params, request_signer, model, **kwargs):
# The customer can still provide this, so we should pass if they do.
if 'PresignedUrl' in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PresignedUrl'] = url
# EC2 Requires that the destination region be sent over the wire in
# addition to the source region.
params['body']['DestinationRegion'] = dest
def inject_presigned_url_rds(params, request_signer, model, **kwargs):
# SourceRegion is not required for RDS operations, so it's possible that
# it isn't set. In that case it's probably a local copy so we don't need
# to do anything else.
if 'SourceRegion' not in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
# Since SourceRegion isn't actually modeled for RDS, it needs to be
# removed from the request params before we send the actual request.
del params['body']['SourceRegion']
if 'PreSignedUrl' in params['body']:
return
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PreSignedUrl'] = url
def json_decode_policies(parsed, model, **kwargs):
# Any time an IAM operation returns a policy document
# it is a string that is json that has been urlencoded,
# i.e urlencode(json.dumps(policy_document)).
# To give users something more useful, we will urldecode
# this value and json.loads() the result so that they have
# the policy document as a dictionary.
output_shape = model.output_shape
if output_shape is not None:
_decode_policy_types(parsed, model.output_shape)
def _decode_policy_types(parsed, shape):
# IAM consistently uses the policyDocumentType shape to indicate
# strings that have policy documents.
shape_name = 'policyDocumentType'
if shape.type_name == 'structure':
for member_name, member_shape in shape.members.items():
if member_shape.type_name == 'string' and \
member_shape.name == shape_name and \
member_name in parsed:
parsed[member_name] = decode_quoted_jsondoc(
parsed[member_name])
elif member_name in parsed:
_decode_policy_types(parsed[member_name], member_shape)
if shape.type_name == 'list':
shape_member = shape.member
for item in parsed:
_decode_policy_types(item, shape_member)
def parse_get_bucket_location(parsed, http_response, **kwargs):
# s3.GetBucketLocation cannot be modeled properly. To
# account for this we just manually parse the XML document.
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if http_response.raw is None:
return
response_body = http_response.content
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(response_body)
root = parser.close()
region = root.text
parsed['LocationConstraint'] = region
def base64_encode_user_data(params, **kwargs):
if 'UserData' in params:
if isinstance(params['UserData'], six.text_type):
# Encode it to bytes if it is text.
params['UserData'] = params['UserData'].encode('utf-8')
params['UserData'] = base64.b64encode(
params['UserData']).decode('utf-8')
def document_base64_encoding(param):
description = ('**This value will be base64 encoded automatically. Do '
'not base64 encode this value prior to performing the '
'operation.**')
append = AppendParamDocumentation(param, description)
return append.append_documentation
def validate_ascii_metadata(params, **kwargs):
"""Verify S3 Metadata only contains ascii characters.
From: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
"Amazon S3 stores user-defined metadata in lowercase. Each name, value pair
must conform to US-ASCII when using REST and UTF-8 when using SOAP or
browser-based uploads via POST."
"""
metadata = params.get('Metadata')
if not metadata or not isinstance(metadata, dict):
# We have to at least type check the metadata as a dict type
# because this handler is called before param validation.
# We'll go ahead and return because the param validator will
# give a descriptive error message for us.
# We might need a post-param validation event.
return
for key, value in metadata.items():
try:
key.encode('ascii')
value.encode('ascii')
except UnicodeEncodeError as e:
error_msg = (
'Non ascii characters found in S3 metadata '
'for key "%s", value: "%s". \nS3 metadata can only '
'contain ASCII characters. ' % (key, value)
)
raise ParamValidationError(
report=error_msg)
def fix_route53_ids(params, model, **kwargs):
"""
Check for and split apart Route53 resource IDs, setting
only the last piece. This allows the output of one operation
(e.g. ``'foo/1234'``) to be used as input in another
operation (e.g. it expects just ``'1234'``).
"""
input_shape = model.input_shape
if not input_shape or not hasattr(input_shape, 'members'):
return
members = [name for (name, shape) in input_shape.members.items()
if shape.name in ['ResourceId', 'DelegationSetId']]
for name in members:
if name in params:
orig_value = params[name]
params[name] = orig_value.split('/')[-1]
logger.debug('%s %s -> %s', name, orig_value, params[name])
def inject_account_id(params, **kwargs):
if params.get('accountId') is None:
# Glacier requires accountId, but allows you
# to specify '-' for the current owners account.
# We add this default value if the user does not
# provide the accountId as a convenience.
params['accountId'] = '-'
def add_glacier_version(model, params, **kwargs):
request_dict = params
request_dict['headers']['x-amz-glacier-version'] = model.metadata[
'apiVersion']
def add_accept_header(model, params, **kwargs):
if params['headers'].get('Accept', None) is None:
request_dict = params
request_dict['headers']['Accept'] = 'application/json'
def add_glacier_checksums(params, **kwargs):
"""Add glacier checksums to the http request.
This will add two headers to the http request:
* x-amz-content-sha256
* x-amz-sha256-tree-hash
These values will only be added if they are not present
in the HTTP request.
"""
request_dict = params
headers = request_dict['headers']
body = request_dict['body']
if isinstance(body, six.binary_type):
# If the user provided a bytes type instead of a file
# like object, we're temporarily create a BytesIO object
# so we can use the util functions to calculate the
# checksums which assume file like objects. Note that
# we're not actually changing the body in the request_dict.
body = six.BytesIO(body)
starting_position = body.tell()
if 'x-amz-content-sha256' not in headers:
headers['x-amz-content-sha256'] = utils.calculate_sha256(
body, as_hex=True)
body.seek(starting_position)
if 'x-amz-sha256-tree-hash' not in headers:
headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body)
body.seek(starting_position)
def document_glacier_tree_hash_checksum():
doc = '''
This is a required field.
Ideally you will want to compute this value with checksums from
previous uploaded parts, using the algorithm described in
`Glacier documentation <http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html>`_.
But if you prefer, you can also use botocore.utils.calculate_tree_hash()
to compute it from raw file by::
checksum = calculate_tree_hash(open('your_file.txt', 'rb'))
'''
return AppendParamDocumentation('checksum', doc).append_documentation
def document_cloudformation_get_template_return_type(section, event_name, **kwargs):
if 'response-params' in event_name:
template_body_section = section.get_section('TemplateBody')
type_section = template_body_section.get_section('param-type')
type_section.clear_text()
type_section.write('(*dict*) --')
elif 'response-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('TemplateBody')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write('{}')
def switch_host_machinelearning(request, **kwargs):
switch_host_with_param(request, 'PredictEndpoint')
def check_openssl_supports_tls_version_1_2(**kwargs):
import ssl
try:
openssl_version_tuple = ssl.OPENSSL_VERSION_INFO
if openssl_version_tuple < (1, 0, 1):
warnings.warn(
'Currently installed openssl version: %s does not '
'support TLS 1.2, which is required for use of iot-data. '
'Please use python installed with openssl version 1.0.1 or '
'higher.' % (ssl.OPENSSL_VERSION),
UnsupportedTLSVersionWarning
)
# We cannot check the openssl version on python2.6, so we should just
# pass on this conveniency check.
except AttributeError:
pass
def change_get_to_post(request, **kwargs):
# This is useful when we need to change a potentially large GET request
# into a POST with x-www-form-urlencoded encoding.
if request.method == 'GET' and '?' in request.url:
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
request.method = 'POST'
request.url, request.data = request.url.split('?', 1)
def set_list_objects_encoding_type_url(params, context, **kwargs):
if 'EncodingType' not in params:
# We set this context so that we know it wasn't the customer that
# requested the encoding.
context['encoding_type_auto_set'] = True
params['EncodingType'] = 'url'
def decode_list_object(parsed, context, **kwargs):
# This is needed because we are passing url as the encoding type. Since the
# paginator is based on the key, we need to handle it before it can be
# round tripped.
#
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Marker, Prefix, NextMarker, Key.
_decode_list_object(
top_level_keys=['Delimiter', 'Marker', 'NextMarker'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_v2(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Prefix, ContinuationToken, Key, and StartAfter.
_decode_list_object(
top_level_keys=['Delimiter', 'Prefix', 'StartAfter'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_versions(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
_decode_list_object(
top_level_keys=[
'KeyMarker',
'NextKeyMarker',
'Prefix',
'Delimiter',
],
nested_keys=[
('Versions', 'Key'),
('DeleteMarkers', 'Key'),
('CommonPrefixes', 'Prefix'),
],
parsed=parsed,
context=context
)
def _decode_list_object(top_level_keys, nested_keys, parsed, context):
if parsed.get('EncodingType') == 'url' and \
context.get('encoding_type_auto_set'):
# URL decode top-level keys in the response if present.
for key in top_level_keys:
if key in parsed:
parsed[key] = unquote_str(parsed[key])
# URL decode nested keys from the response if present.
for (top_key, child_key) in nested_keys:
if top_key in parsed:
for member in parsed[top_key]:
member[child_key] = unquote_str(member[child_key])
def convert_body_to_file_like_object(params, **kwargs):
if 'Body' in params:
if isinstance(params['Body'], six.string_types):
params['Body'] = six.BytesIO(ensure_bytes(params['Body']))
elif isinstance(params['Body'], six.binary_type):
params['Body'] = six.BytesIO(params['Body'])
def _add_parameter_aliases(handler_list):
# Mapping of original parameter to parameter alias.
# The key is <service>.<operation>.parameter
# The first part of the key is used for event registration.
# The last part is the original parameter name and the value is the
# alias to expose in documentation.
aliases = {
'ec2.*.Filter': 'Filters',
'logs.CreateExportTask.from': 'fromTime',
'cloudsearchdomain.Search.return': 'returnFields'
}
for original, new_name in aliases.items():
event_portion, original_name = original.rsplit('.', 1)
parameter_alias = ParameterAlias(original_name, new_name)
# Add the handlers to the list of handlers.
# One handler is to handle when users provide the alias.
# The other handler is to update the documentation to show only
# the alias.
parameter_build_event_handler_tuple = (
'before-parameter-build.' + event_portion,
parameter_alias.alias_parameter_in_call,
REGISTER_FIRST
)
docs_event_handler_tuple = (
'docs.*.' + event_portion + '.complete-section',
parameter_alias.alias_parameter_in_documentation)
handler_list.append(parameter_build_event_handler_tuple)
handler_list.append(docs_event_handler_tuple)
class ParameterAlias(object):
def __init__(self, original_name, alias_name):
self._original_name = original_name
self._alias_name = alias_name
def alias_parameter_in_call(self, params, model, **kwargs):
if model.input_shape:
# Only consider accepting the alias if it is modeled in the
# input shape.
if self._original_name in model.input_shape.members:
if self._alias_name in params:
if self._original_name in params:
raise AliasConflictParameterError(
original=self._original_name,
alias=self._alias_name,
operation=model.name
)
# Remove the alias parameter value and use the old name
# instead.
params[self._original_name] = params.pop(self._alias_name)
def alias_parameter_in_documentation(self, event_name, section, **kwargs):
if event_name.startswith('docs.request-params'):
if self._original_name not in section.available_sections:
return
# Replace the name for parameter type
param_section = section.get_section(self._original_name)
param_type_section = param_section.get_section('param-type')
self._replace_content(param_type_section)
# Replace the name for the parameter description
param_name_section = param_section.get_section('param-name')
self._replace_content(param_name_section)
elif event_name.startswith('docs.request-example'):
section = section.get_section('structure-value')
if self._original_name not in section.available_sections:
return
# Replace the name for the example
param_section = section.get_section(self._original_name)
self._replace_content(param_section)
def _replace_content(self, section):
content = section.getvalue().decode('utf-8')
updated_content = content.replace(
self._original_name, self._alias_name)
section.clear_text()
section.write(updated_content)
class ClientMethodAlias(object):
def __init__(self, actual_name):
""" Aliases a non-extant method to an existing method.
:param actual_name: The name of the method that actually exists on
the client.
"""
self._actual = actual_name
def __call__(self, client, **kwargs):
return getattr(client, self._actual)
# TODO: Remove this class as it is no longer used
class HeaderToHostHoister(object):
"""Takes a header and moves it to the front of the hoststring.
"""
_VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?<!-)$', re.IGNORECASE)
def __init__(self, header_name):
self._header_name = header_name
def hoist(self, params, **kwargs):
"""Hoist a header to the hostname.
Hoist a header to the beginning of the hostname with a suffix "." after
it. The original header should be removed from the header map. This
method is intended to be used as a target for the before-call event.
"""
if self._header_name not in params['headers']:
return
header_value = params['headers'][self._header_name]
self._ensure_header_is_valid_host(header_value)
original_url = params['url']
new_url = self._prepend_to_host(original_url, header_value)
params['url'] = new_url
def _ensure_header_is_valid_host(self, header):
match = self._VALID_HOSTNAME.match(header)
if not match:
raise ParamValidationError(report=(
'Hostnames must contain only - and alphanumeric characters, '
'and between 1 and 63 characters long.'
))
def _prepend_to_host(self, url, prefix):
url_components = urlsplit(url)
parts = url_components.netloc.split('.')
parts = [prefix] + parts
new_netloc = '.'.join(parts)
new_components = (
url_components.scheme,
new_netloc,
url_components.path,
url_components.query,
''
)
new_url = urlunsplit(new_components)
return new_url
def inject_api_version_header_if_needed(model, params, **kwargs):
if not model.is_endpoint_discovery_operation:
return
params['headers']['x-amz-api-version'] = model.service_model.api_version
def remove_lex_v2_start_conversation(class_attributes, **kwargs):
"""Operation requires h2 which is currently unsupported in Python"""
if 'start_conversation' in class_attributes:
del class_attributes['start_conversation']
# This is a list of (event_name, handler).
# When a Session is created, everything in this list will be
# automatically registered with that Session.
BUILTIN_HANDLERS = [
('choose-service-name', handle_service_name_alias),
('getattr.mturk.list_hi_ts_for_qualification_type',
ClientMethodAlias('list_hits_for_qualification_type')),
('before-parameter-build.s3.UploadPart',
convert_body_to_file_like_object, REGISTER_LAST),
('before-parameter-build.s3.PutObject',
convert_body_to_file_like_object, REGISTER_LAST),
('creating-client-class', add_generate_presigned_url),
('creating-client-class.s3', add_generate_presigned_post),
('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2),
('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation),
('after-call.iam', json_decode_policies),
('after-call.ec2.GetConsoleOutput', decode_console_output),
('after-call.cloudformation.GetTemplate', json_decode_template_body),
('after-call.s3.GetBucketLocation', parse_get_bucket_location),
('before-parameter-build', generate_idempotent_uuid),
('before-parameter-build.s3', validate_bucket_name),
('before-parameter-build.s3.ListObjects',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectsV2',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectVersions',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.CopyObject',
handle_copy_source_param),
('before-parameter-build.s3.UploadPartCopy',
handle_copy_source_param),
('before-parameter-build.s3.CopyObject', validate_ascii_metadata),
('before-parameter-build.s3.PutObject', validate_ascii_metadata),
('before-parameter-build.s3.CreateMultipartUpload',
validate_ascii_metadata),
('docs.*.s3.CopyObject.complete-section', document_copy_source_form),
('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form),
('before-call.s3', add_expect_header),
('before-call.glacier', add_glacier_version),
('before-call.apigateway', add_accept_header),
('before-call.s3.PutObject', conditionally_calculate_md5),
('before-call.s3.UploadPart', conditionally_calculate_md5),
('before-call.s3.DeleteObjects', escape_xml_payload),
('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload),
('before-call.glacier.UploadArchive', add_glacier_checksums),
('before-call.glacier.UploadMultipartPart', add_glacier_checksums),
('before-call.ec2.CopySnapshot', inject_presigned_url_ec2),
('request-created.machinelearning.Predict', switch_host_machinelearning),
('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CompleteMultipartUpload', check_for_200_error,
REGISTER_FIRST),
('choose-signer.cognito-identity.GetId', disable_signing),
('choose-signer.cognito-identity.GetOpenIdToken', disable_signing),
('choose-signer.cognito-identity.UnlinkIdentity', disable_signing),
('choose-signer.cognito-identity.GetCredentialsForIdentity',
disable_signing),
('choose-signer.sts.AssumeRoleWithSAML', disable_signing),
('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing),
('choose-signer', set_operation_specific_signer),
('before-parameter-build.s3.HeadObject', sse_md5),
('before-parameter-build.s3.GetObject', sse_md5),
('before-parameter-build.s3.PutObject', sse_md5),
('before-parameter-build.s3.CopyObject', sse_md5),
('before-parameter-build.s3.CopyObject', copy_source_sse_md5),
('before-parameter-build.s3.CreateMultipartUpload', sse_md5),
('before-parameter-build.s3.UploadPart', sse_md5),
('before-parameter-build.s3.UploadPartCopy', sse_md5),
('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5),
('before-parameter-build.ec2.RunInstances', base64_encode_user_data),
('before-parameter-build.autoscaling.CreateLaunchConfiguration',
base64_encode_user_data),
('before-parameter-build.route53', fix_route53_ids),
('before-parameter-build.glacier', inject_account_id),
('after-call.s3.ListObjects', decode_list_object),
('after-call.s3.ListObjectsV2', decode_list_object_v2),
('after-call.s3.ListObjectVersions', decode_list_object_versions),
# Cloudsearchdomain search operation will be sent by HTTP POST
('request-created.cloudsearchdomain.Search',
change_get_to_post),
# Glacier documentation customizations
('docs.*.glacier.*.complete-section',
AutoPopulatedParam('accountId', 'Note: this parameter is set to "-" by'
'default if no value is not specified.')
.document_auto_populated_param),
('docs.*.glacier.UploadArchive.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.*.glacier.UploadMultipartPart.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.request-params.glacier.CompleteMultipartUpload.complete-section',
document_glacier_tree_hash_checksum()),
# Cloudformation documentation customizations
('docs.*.cloudformation.GetTemplate.complete-section',
document_cloudformation_get_template_return_type),
# UserData base64 encoding documentation customizations
('docs.*.ec2.RunInstances.complete-section',
document_base64_encoding('UserData')),
('docs.*.autoscaling.CreateLaunchConfiguration.complete-section',
document_base64_encoding('UserData')),
# EC2 CopySnapshot documentation customizations
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('PresignedUrl').document_auto_populated_param),
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('DestinationRegion').document_auto_populated_param),
# S3 SSE documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param),
# S3 SSE Copy Source documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam(
'CopySourceSSECustomerKeyMD5').document_auto_populated_param),
# Add base64 information to Lambda
('docs.*.lambda.UpdateFunctionCode.complete-section',
document_base64_encoding('ZipFile')),
# The following S3 operations cannot actually accept a ContentMD5
('docs.*.s3.*.complete-section',
HideParamFromOperations(
's3', 'ContentMD5',
['DeleteObjects', 'PutBucketAcl', 'PutBucketCors',
'PutBucketLifecycle', 'PutBucketLogging', 'PutBucketNotification',
'PutBucketPolicy', 'PutBucketReplication', 'PutBucketRequestPayment',
'PutBucketTagging', 'PutBucketVersioning', 'PutBucketWebsite',
'PutObjectAcl']).hide_param),
#############
# RDS
#############
('creating-client-class.rds', add_generate_db_auth_token),
('before-call.rds.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBCluster',
inject_presigned_url_rds),
('before-call.rds.CopyDBSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBInstanceReadReplica',
inject_presigned_url_rds),
('before-call.rds.StartDBInstanceAutomatedBackupsReplication',
inject_presigned_url_rds),
# RDS PresignedUrl documentation customizations
('docs.*.rds.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CopyDBSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBInstanceReadReplica.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# Neptune
#############
('before-call.neptune.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.neptune.CreateDBCluster',
inject_presigned_url_rds),
# Neptune PresignedUrl documentation customizations
('docs.*.neptune.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.neptune.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# DocDB
#############
('before-call.docdb.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.docdb.CreateDBCluster',
inject_presigned_url_rds),
# DocDB PresignedUrl documentation customizations
('docs.*.docdb.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.docdb.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
###########
# SMS Voice
##########
('docs.title.sms-voice',
DeprecatedServiceDocumenter(
'pinpoint-sms-voice').inject_deprecation_notice),
('before-call', inject_api_version_header_if_needed),
]
_add_parameter_aliases(BUILTIN_HANDLERS)
| 46,277 | Python | 39.701847 | 114 | 0.659528 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/exceptions.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from botocore.vendored import requests
from botocore.vendored.requests.packages import urllib3
def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
# This is helpful for reducing Exceptions that only accept kwargs as
# only positional arguments can be provided for __reduce__
# Ideally, this would also be a class method on the BotoCoreError
# but instance methods cannot be pickled.
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return exception_cls(*args, **kwargs)
class BotoCoreError(Exception):
"""
The base exception class for BotoCore exceptions.
:ivar msg: The descriptive message associated with the error.
"""
fmt = 'An unspecified error occurred'
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
def __reduce__(self):
return _exception_from_packed_args, (self.__class__, None, self.kwargs)
class DataNotFoundError(BotoCoreError):
"""
The data associated with a particular path could not be loaded.
:ivar data_path: The data path that the user attempted to load.
"""
fmt = 'Unable to load data for: {data_path}'
class UnknownServiceError(DataNotFoundError):
"""Raised when trying to load data for an unknown service.
:ivar service_name: The name of the unknown service.
"""
fmt = (
"Unknown service: '{service_name}'. Valid service names are: "
"{known_service_names}")
class ApiVersionNotFoundError(BotoCoreError):
"""
The data associated with either the API version or a compatible one
could not be loaded.
:ivar data_path: The data path that the user attempted to load.
:ivar api_version: The API version that the user attempted to load.
"""
fmt = 'Unable to load data {data_path} for: {api_version}'
class HTTPClientError(BotoCoreError):
fmt = 'An HTTP Client raised an unhandled exception: {error}'
def __init__(self, request=None, response=None, **kwargs):
self.request = request
self.response = response
super(HTTPClientError, self).__init__(**kwargs)
def __reduce__(self):
return _exception_from_packed_args, (
self.__class__, (self.request, self.response), self.kwargs)
class ConnectionError(BotoCoreError):
fmt = 'An HTTP Client failed to establish a connection: {error}'
class InvalidIMDSEndpointError(BotoCoreError):
fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}'
class EndpointConnectionError(ConnectionError):
fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
class SSLError(ConnectionError, requests.exceptions.SSLError):
fmt = 'SSL validation failed for {endpoint_url} {error}'
class ConnectionClosedError(HTTPClientError):
fmt = (
'Connection was closed before we received a valid response '
'from endpoint URL: "{endpoint_url}".')
class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout,
urllib3.exceptions.ReadTimeoutError):
fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
class NoCredentialsError(BotoCoreError):
"""
No credentials could be found.
"""
fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
"""
Only partial credentials were found.
:ivar cred_var: The missing credential variable name.
"""
fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class CredentialRetrievalError(BotoCoreError):
"""
Error attempting to retrieve credentials from a remote source.
:ivar provider: The name of the credential provider.
:ivar error_msg: The msg explaining why credentials could not be
retrieved.
"""
fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
class UnknownSignatureVersionError(BotoCoreError):
"""
Requested Signature Version is not known.
:ivar signature_version: The name of the requested signature version.
"""
fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
"""
The service is not available in requested region.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = 'Service {service_name} not available in region {region_name}'
class BaseEndpointResolverError(BotoCoreError):
"""Base error for endpoint resolving errors.
Should never be raised directly, but clients can catch
this exception if they want to generically handle any errors
during the endpoint resolution process.
"""
class NoRegionError(BaseEndpointResolverError):
"""No region was specified."""
fmt = 'You must specify a region.'
class UnknownEndpointError(BaseEndpointResolverError, ValueError):
"""
Could not construct an endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'Unable to construct an endpoint for '
'{service_name} in region {region_name}')
class ProfileNotFound(BotoCoreError):
"""
The specified configuration profile was not found in the
configuration file.
:ivar profile: The name of the profile the user attempted to load.
"""
fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
"""
The configuration file could not be parsed.
:ivar path: The path to the configuration file.
"""
fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
"""
The specified configuration file could not be found.
:ivar path: The path to the configuration file.
"""
fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
"""
One or more required parameters were not supplied.
:ivar object: The object that has missing parameters.
This can be an operation or a parameter (in the
case of inner params). The str() of this object
will be used so it doesn't need to implement anything
other than str().
:ivar missing: The names of the missing parameters.
"""
fmt = ('The following required parameters are missing for '
'{object_name}: {missing}')
class ValidationError(BotoCoreError):
"""
An exception occurred validating parameters.
Subclasses must accept a ``value`` and ``param``
argument in their ``__init__``.
:ivar value: The value that was being validated.
:ivar param: The parameter that failed validation.
:ivar type_name: The name of the underlying type.
"""
fmt = ("Invalid value ('{value}') for param {param} "
"of type {type_name} ")
class ParamValidationError(BotoCoreError):
fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
"""
Unknown key in a struct parameter.
:ivar value: The value that was being checked.
:ivar param: The name of the parameter.
:ivar choices: The valid choices the value can be.
"""
fmt = ("Unknown key '{value}' for param '{param}'. Must be one "
"of: {choices}")
class RangeError(ValidationError):
"""
A parameter value was out of the valid range.
:ivar value: The value that was being checked.
:ivar param: The parameter that failed validation.
:ivar min_value: The specified minimum value.
:ivar max_value: The specified maximum value.
"""
fmt = ('Value out of range for param {param}: '
'{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
"""
Unknown top level parameter.
:ivar name: The name of the unknown parameter.
:ivar operation: The name of the operation.
:ivar choices: The valid choices the parameter name can be.
"""
fmt = (
"Unknown parameter '{name}' for operation {operation}. Must be one "
"of: {choices}"
)
class InvalidRegionError(ValidationError, ValueError):
"""
Invalid region_name provided to client or resource.
:ivar region_name: region_name that was being validated.
"""
fmt = (
"Provided region_name '{region_name}' doesn't match a supported format."
)
class AliasConflictParameterError(ValidationError):
"""
Error when an alias is provided for a parameter as well as the original.
:ivar original: The name of the original parameter.
:ivar alias: The name of the alias
:ivar operation: The name of the operation.
"""
fmt = (
"Parameter '{original}' and its alias '{alias}' were provided "
"for operation {operation}. Only one of them may be used."
)
class UnknownServiceStyle(BotoCoreError):
"""
Unknown style of service invocation.
:ivar service_style: The style requested.
"""
fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
fmt = 'Operation cannot be paginated: {operation_name}'
class ChecksumError(BotoCoreError):
"""The expected checksum did not match the calculated checksum.
"""
fmt = ('Checksum {checksum_type} failed, expected checksum '
'{expected_checksum} did not match calculated checksum '
'{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
"""Need to seek a stream, but stream does not support seeking.
"""
fmt = ('Need to rewind the stream {stream_object}, but stream '
'is not seekable.')
class WaiterError(BotoCoreError):
"""Waiter failed to reach desired state."""
fmt = 'Waiter {name} failed: {reason}'
def __init__(self, name, reason, last_response):
super(WaiterError, self).__init__(name=name, reason=reason)
self.last_response = last_response
class IncompleteReadError(BotoCoreError):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
"""Expression is either invalid or too complex."""
fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
"""Tried to insert before/after an unregistered credential type."""
fmt = 'Credential named {name} not found.'
class WaiterConfigError(BotoCoreError):
"""Error when processing waiter configuration."""
fmt = 'Error processing waiter config: {error_msg}'
class UnknownClientMethodError(BotoCoreError):
"""Error when trying to access a method on a client that does not exist."""
fmt = 'Client does not have method: {method_name}'
class UnsupportedSignatureVersionError(BotoCoreError):
"""Error when trying to use an unsupported Signature Version."""
fmt = 'Signature version is not supported: {signature_version}'
class ClientError(Exception):
MSG_TEMPLATE = (
'An error occurred ({error_code}) when calling the {operation_name} '
'operation{retry_info}: {error_message}')
def __init__(self, error_response, operation_name):
retry_info = self._get_retry_info(error_response)
error = error_response.get('Error', {})
msg = self.MSG_TEMPLATE.format(
error_code=error.get('Code', 'Unknown'),
error_message=error.get('Message', 'Unknown'),
operation_name=operation_name,
retry_info=retry_info,
)
super(ClientError, self).__init__(msg)
self.response = error_response
self.operation_name = operation_name
def _get_retry_info(self, response):
retry_info = ''
if 'ResponseMetadata' in response:
metadata = response['ResponseMetadata']
if metadata.get('MaxAttemptsReached', False):
if 'RetryAttempts' in metadata:
retry_info = (' (reached max retries: %s)' %
metadata['RetryAttempts'])
return retry_info
def __reduce__(self):
# Subclasses of ClientError's are dynamically generated and
# cannot be pickled unless they are attributes of a
# module. So at the very least return a ClientError back.
return ClientError, (self.response, self.operation_name)
class EventStreamError(ClientError):
pass
class UnsupportedTLSVersionWarning(Warning):
"""Warn when an openssl version that uses TLS 1.2 is required"""
pass
class ImminentRemovalWarning(Warning):
pass
class InvalidDNSNameError(BotoCoreError):
"""Error when virtual host path is forced on a non-DNS compatible bucket"""
fmt = (
'Bucket named {bucket_name} is not DNS compatible. Virtual '
'hosted-style addressing cannot be used. The addressing style '
'can be configured by removing the addressing_style value '
'or setting that value to \'path\' or \'auto\' in the AWS Config '
'file or in the botocore.client.Config object.'
)
class InvalidS3AddressingStyleError(BotoCoreError):
"""Error when an invalid path style is specified"""
fmt = (
'S3 addressing style {s3_addressing_style} is invalid. Valid options '
'are: \'auto\', \'virtual\', and \'path\''
)
class UnsupportedS3ArnError(BotoCoreError):
"""Error when S3 ARN provided to Bucket parameter is not supported"""
fmt = (
'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
'ARNs for S3 access-points are supported.'
)
class UnsupportedS3ControlArnError(BotoCoreError):
"""Error when S3 ARN provided to S3 control parameter is not supported"""
fmt = (
'S3 ARN "{arn}" provided is invalid for this operation. {msg}'
)
class InvalidHostLabelError(BotoCoreError):
"""Error when an invalid host label would be bound to an endpoint"""
fmt = (
'Invalid host label to be bound to the hostname of the endpoint: '
'"{label}".'
)
class UnsupportedOutpostResourceError(BotoCoreError):
"""Error when S3 Outpost ARN provided to Bucket parameter is incomplete"""
fmt = (
'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" '
'parameter is invalid. Only ARNs for S3 Outpost arns with an '
'access-point sub-resource are supported.'
)
class UnsupportedS3ConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3: {msg}'
)
class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3 access-points: {msg}'
)
class InvalidEndpointDiscoveryConfigurationError(BotoCoreError):
"""Error when invalid value supplied for endpoint_discovery_enabled"""
fmt = (
'Unsupported configuration value for endpoint_discovery_enabled. '
'Expected one of ("true", "false", "auto") but got {config_value}.'
)
class UnsupportedS3ControlConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with S3 Control"""
fmt = (
'Unsupported configuration when using S3 Control: {msg}'
)
class InvalidRetryConfigurationError(BotoCoreError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Cannot provide retry configuration for "{retry_config_option}". '
'Valid retry configuration options are: \'max_attempts\''
)
class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Value provided to "max_attempts": {provided_max_attempts} must '
'be an integer greater than or equal to {min_value}.'
)
class InvalidRetryModeError(InvalidRetryConfigurationError):
"""Error when invalid retry mode configuration is specified"""
fmt = (
'Invalid value provided to "mode": "{provided_retry_mode}" must '
'be one of: "legacy", "standard", "adaptive"'
)
class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
"""Error for invalid s3 us-east-1 regional endpoints configuration"""
fmt = (
'S3 us-east-1 regional endpoint option '
'{s3_us_east_1_regional_endpoint_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
"""Error when invalid sts regional endpoints configuration is specified"""
fmt = (
'STS regional endpoints option {sts_regional_endpoints_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class StubResponseError(BotoCoreError):
fmt = 'Error getting response stub for operation {operation_name}: {reason}'
class StubAssertionError(StubResponseError, AssertionError):
pass
class UnStubbedResponseError(StubResponseError):
pass
class InvalidConfigError(BotoCoreError):
fmt = '{error_msg}'
class InfiniteLoopConfigError(InvalidConfigError):
fmt = (
'Infinite loop in credential configuration detected. Attempting to '
'load from profile {source_profile} which has already been visited. '
'Visited profiles: {visited_profiles}'
)
class RefreshWithMFAUnsupportedError(BotoCoreError):
fmt = 'Cannot refresh credentials: MFA token required.'
class MD5UnavailableError(BotoCoreError):
fmt = "This system does not support MD5 generation."
class MetadataRetrievalError(BotoCoreError):
fmt = "Error retrieving metadata: {error_msg}"
class UndefinedModelAttributeError(Exception):
pass
class MissingServiceIdError(UndefinedModelAttributeError):
fmt = (
"The model being used for the service {service_name} is missing the "
"serviceId metadata property, which is required."
)
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class SSOError(BotoCoreError):
fmt = "An unspecified error happened when resolving SSO credentials"
class SSOTokenLoadError(SSOError):
fmt = "Error loading SSO Token: {error_msg}"
class UnauthorizedSSOTokenError(SSOError):
fmt = (
"The SSO session associated with this profile has expired or is "
"otherwise invalid. To refresh this SSO session run aws sso login "
"with the corresponding profile."
)
class CapacityNotAvailableError(BotoCoreError):
fmt = (
'Insufficient request capacity available.'
)
class InvalidProxiesConfigError(BotoCoreError):
fmt = (
'Invalid configuration value(s) provided for proxies_config.'
)
| 20,137 | Python | 29.933948 | 80 | 0.684015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/signers.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import weakref
import json
import base64
import botocore
import botocore.auth
from botocore.compat import six, OrderedDict
from botocore.awsrequest import create_request_object, prepare_request_dict
from botocore.exceptions import UnknownSignatureVersionError
from botocore.exceptions import UnknownClientMethodError
from botocore.exceptions import UnsupportedSignatureVersionError
from botocore.utils import fix_s3_host, datetime2timestamp
class RequestSigner(object):
"""
An object to sign requests before they go out over the wire using
one of the authentication mechanisms defined in ``auth.py``. This
class fires two events scoped to a service and operation name:
* choose-signer: Allows overriding the auth signer name.
* before-sign: Allows mutating the request before signing.
Together these events allow for customization of the request
signing pipeline, including overrides, request path manipulation,
and disabling signing per operation.
:type service_id: botocore.model.ServiceId
:param service_id: The service id for the service, e.g. ``S3``
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:type credentials: :py:class:`~botocore.credentials.Credentials`
:param credentials: User credentials with which to sign requests.
:type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks`
:param event_emitter: Extension mechanism to fire events.
"""
def __init__(self, service_id, region_name, signing_name,
signature_version, credentials, event_emitter):
self._region_name = region_name
self._signing_name = signing_name
self._signature_version = signature_version
self._credentials = credentials
self._service_id = service_id
# We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6
self._event_emitter = weakref.proxy(event_emitter)
@property
def region_name(self):
return self._region_name
@property
def signature_version(self):
return self._signature_version
@property
def signing_name(self):
return self._signing_name
def handler(self, operation_name=None, request=None, **kwargs):
# This is typically hooked up to the "request-created" event
# from a client's event emitter. When a new request is created
# this method is invoked to sign the request.
# Don't call this method directly.
return self.sign(operation_name, request)
def sign(self, operation_name, request, region_name=None,
signing_type='standard', expires_in=None, signing_name=None):
"""Sign a request before it goes out over the wire.
:type operation_name: string
:param operation_name: The name of the current operation, e.g.
``ListBuckets``.
:type request: AWSRequest
:param request: The request object to be sent over the wire.
:type region_name: str
:param region_name: The region to sign the request for.
:type signing_type: str
:param signing_type: The type of signing to perform. This can be one of
three possible values:
* 'standard' - This should be used for most requests.
* 'presign-url' - This should be used when pre-signing a request.
* 'presign-post' - This should be used when pre-signing an S3 post.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. This parameter is only valid for signing type 'presign-url'.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
"""
explicit_region_name = region_name
if region_name is None:
region_name = self._region_name
if signing_name is None:
signing_name = self._signing_name
signature_version = self._choose_signer(
operation_name, signing_type, request.context)
# Allow mutating request before signing
self._event_emitter.emit(
'before-sign.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
request=request, signing_name=signing_name,
region_name=self._region_name,
signature_version=signature_version, request_signer=self,
operation_name=operation_name
)
if signature_version != botocore.UNSIGNED:
kwargs = {
'signing_name': signing_name,
'region_name': region_name,
'signature_version': signature_version
}
if expires_in is not None:
kwargs['expires'] = expires_in
signing_context = request.context.get('signing', {})
if not explicit_region_name and signing_context.get('region'):
kwargs['region_name'] = signing_context['region']
if signing_context.get('signing_name'):
kwargs['signing_name'] = signing_context['signing_name']
try:
auth = self.get_auth_instance(**kwargs)
except UnknownSignatureVersionError as e:
if signing_type != 'standard':
raise UnsupportedSignatureVersionError(
signature_version=signature_version)
else:
raise e
auth.add_auth(request)
def _choose_signer(self, operation_name, signing_type, context):
"""
Allow setting the signature version via the choose-signer event.
A value of `botocore.UNSIGNED` means no signing will be performed.
:param operation_name: The operation to sign.
:param signing_type: The type of signing that the signer is to be used
for.
:return: The signature version to sign with.
"""
signing_type_suffix_map = {
'presign-post': '-presign-post',
'presign-url': '-query'
}
suffix = signing_type_suffix_map.get(signing_type, '')
signature_version = self._signature_version
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
handler, response = self._event_emitter.emit_until_response(
'choose-signer.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
signing_name=self._signing_name, region_name=self._region_name,
signature_version=signature_version, context=context)
if response is not None:
signature_version = response
# The suffix needs to be checked again in case we get an improper
# signature version from choose-signer.
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
return signature_version
def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
"""
Get an auth instance which can be used to sign a request
using the given signature version.
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:rtype: :py:class:`~botocore.auth.BaseSigner`
:return: Auth instance to sign a request.
"""
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
# If there's no credentials provided (i.e credentials is None),
# then we'll pass a value of "None" over to the auth classes,
# which already handle the cases where no credentials have
# been provided.
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
# Alias get_auth for backwards compatibility.
get_auth = get_auth_instance
def generate_presigned_url(self, request_dict, operation_name,
expires_in=3600, region_name=None,
signing_name=None):
"""Generates a presigned url
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type operation_name: str
:param operation_name: The operation being signed.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type region_name: string
:param region_name: The region name to sign the presigned url.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
:returns: The presigned url
"""
request = create_request_object(request_dict)
self.sign(operation_name, request, region_name,
'presign-url', expires_in, signing_name)
request.prepare()
return request.url
class CloudFrontSigner(object):
'''A signer to create a signed CloudFront URL.
First you create a cloudfront signer based on a normalized RSA signer::
import rsa
def rsa_signer(message):
private_key = open('private_key.pem', 'r').read()
return rsa.sign(
message,
rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')),
'SHA-1') # CloudFront requires SHA-1 hash
cf_signer = CloudFrontSigner(key_id, rsa_signer)
To sign with a canned policy::
signed_url = cf_signer.generate_signed_url(
url, date_less_than=datetime(2015, 12, 1))
To sign with a custom policy::
signed_url = cf_signer.generate_signed_url(url, policy=my_policy)
'''
def __init__(self, key_id, rsa_signer):
"""Create a CloudFrontSigner.
:type key_id: str
:param key_id: The CloudFront Key Pair ID
:type rsa_signer: callable
:param rsa_signer: An RSA signer.
Its only input parameter will be the message to be signed,
and its output will be the signed content as a binary string.
The hash algorithm needed by CloudFront is SHA-1.
"""
self.key_id = key_id
self.rsa_signer = rsa_signer
def generate_presigned_url(self, url, date_less_than=None, policy=None):
"""Creates a signed CloudFront URL based on given parameters.
:type url: str
:param url: The URL of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after that date and time
:type policy: str
:param policy: The custom policy, possibly built by self.build_policy()
:rtype: str
:return: The signed URL.
"""
if (date_less_than is not None and policy is not None or
date_less_than is None and policy is None):
e = 'Need to provide either date_less_than or policy, but not both'
raise ValueError(e)
if date_less_than is not None:
# We still need to build a canned policy for signing purpose
policy = self.build_policy(url, date_less_than)
if isinstance(policy, six.text_type):
policy = policy.encode('utf8')
if date_less_than is not None:
params = ['Expires=%s' % int(datetime2timestamp(date_less_than))]
else:
params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')]
signature = self.rsa_signer(policy)
params.extend([
'Signature=%s' % self._url_b64encode(signature).decode('utf8'),
'Key-Pair-Id=%s' % self.key_id,
])
return self._build_url(url, params)
def _build_url(self, base_url, extra_params):
separator = '&' if '?' in base_url else '?'
return base_url + separator + '&'.join(extra_params)
def build_policy(self, resource, date_less_than,
date_greater_than=None, ip_address=None):
"""A helper to build policy.
:type resource: str
:param resource: The URL or the stream filename of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after the time has passed
:type date_greater_than: datetime
:param date_greater_than: The URL will not be valid until this time
:type ip_address: str
:param ip_address: Use 'x.x.x.x' for an IP, or 'x.x.x.x/x' for a subnet
:rtype: str
:return: The policy in a compact string.
"""
# Note:
# 1. Order in canned policy is significant. Special care has been taken
# to ensure the output will match the order defined by the document.
# There is also a test case to ensure that order.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-policy-statement
# 2. Albeit the order in custom policy is not required by CloudFront,
# we still use OrderedDict internally to ensure the result is stable
# and also matches canned policy requirement.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html
moment = int(datetime2timestamp(date_less_than))
condition = OrderedDict({"DateLessThan": {"AWS:EpochTime": moment}})
if ip_address:
if '/' not in ip_address:
ip_address += '/32'
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
if date_greater_than:
moment = int(datetime2timestamp(date_greater_than))
condition["DateGreaterThan"] = {"AWS:EpochTime": moment}
ordered_payload = [('Resource', resource), ('Condition', condition)]
custom_policy = {"Statement": [OrderedDict(ordered_payload)]}
return json.dumps(custom_policy, separators=(',', ':'))
def _url_b64encode(self, data):
# Required by CloudFront. See also:
# http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html
return base64.b64encode(
data).replace(b'+', b'-').replace(b'=', b'_').replace(b'/', b'~')
def add_generate_db_auth_token(class_attributes, **kwargs):
class_attributes['generate_db_auth_token'] = generate_db_auth_token
def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
"""Generates an auth token used to connect to a db with IAM credentials.
:type DBHostname: str
:param DBHostname: The hostname of the database to connect to.
:type Port: int
:param Port: The port number the database is listening on.
:type DBUsername: str
:param DBUsername: The username to log in as.
:type Region: str
:param Region: The region the database is in. If None, the client
region will be used.
:return: A presigned url which can be used as an auth token.
"""
region = Region
if region is None:
region = self.meta.region_name
params = {
'Action': 'connect',
'DBUser': DBUsername,
}
request_dict = {
'url_path': '/',
'query_string': '',
'headers': {},
'body': params,
'method': 'GET'
}
# RDS requires that the scheme not be set when sent over. This can cause
# issues when signing because the Python url parsing libraries follow
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
# Otherwise the url is presumed to be relative, and thus the whole
# netloc would be treated as a path component. To work around this we
# introduce https here and remove it once we're done processing it.
scheme = 'https://'
endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port)
prepare_request_dict(request_dict, endpoint_url)
presigned_url = self._request_signer.generate_presigned_url(
operation_name='connect', request_dict=request_dict,
region_name=region, expires_in=900, signing_name='rds-db'
)
return presigned_url[len(scheme):]
class S3PostPresigner(object):
def __init__(self, request_signer):
self._request_signer = request_signer
def generate_presigned_post(self, request_dict, fields=None,
conditions=None, expires_in=3600,
region_name=None):
"""Generates the url and the form fields used for a presigned s3 post
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type fields: dict
:param fields: A dictionary of prefilled form fields to build on top
of.
:type conditions: list
:param conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
{"bucket": "mybucket"},
["starts-with", "$key", "mykey"]
]
:type expires_in: int
:param expires_in: The number of seconds the presigned post is valid
for.
:type region_name: string
:param region_name: The region name to sign the presigned post to.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
if fields is None:
fields = {}
if conditions is None:
conditions = []
# Create the policy for the post.
policy = {}
# Create an expiration date for the policy
datetime_now = datetime.datetime.utcnow()
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
# Append all of the conditions that the user supplied.
policy['conditions'] = []
for condition in conditions:
policy['conditions'].append(condition)
# Store the policy and the fields in the request for signing
request = create_request_object(request_dict)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
self._request_signer.sign(
'PutObject', request, region_name, 'presign-post')
# Return the url and the fields for th form to post.
return {'url': request.url, 'fields': fields}
def add_generate_presigned_url(class_attributes, **kwargs):
class_attributes['generate_presigned_url'] = generate_presigned_url
def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600,
HttpMethod=None):
"""Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
:returns: The presigned url
"""
client_method = ClientMethod
params = Params
if params is None:
params = {}
expires_in = ExpiresIn
http_method = HttpMethod
context = {
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
}
request_signer = self._request_signer
serializer = self._serializer
try:
operation_name = self._PY_TO_OP_NAME[client_method]
except KeyError:
raise UnknownClientMethodError(method_name=client_method)
operation_model = self.meta.service_model.operation_model(
operation_name)
params = self._emit_api_params(params, operation_model, context)
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
params, operation_model)
# Switch out the http method if user specified it.
if http_method is not None:
request_dict['method'] = http_method
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url, context=context)
# Generate the presigned url.
return request_signer.generate_presigned_url(
request_dict=request_dict, expires_in=expires_in,
operation_name=operation_name)
def add_generate_presigned_post(class_attributes, **kwargs):
class_attributes['generate_presigned_post'] = generate_presigned_post
def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None,
ExpiresIn=3600):
"""Builds the url and the form fields used for a presigned s3 post
:type Bucket: string
:param Bucket: The name of the bucket to presign the post to. Note that
bucket related conditions should not be included in the
``conditions`` parameter.
:type Key: string
:param Key: Key name, optionally add ${filename} to the end to
attach the submitted filename. Note that key related conditions and
fields are filled out for you and should not be included in the
``Fields`` or ``Conditions`` parameter.
:type Fields: dict
:param Fields: A dictionary of prefilled form fields to build on top
of. Elements that may be included are acl, Cache-Control,
Content-Type, Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and x-amz-meta-.
Note that if a particular element is included in the fields
dictionary it will not be automatically added to the conditions
list. You must specify a condition for the element as well.
:type Conditions: list
:param Conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
["content-length-range", 2, 5],
["starts-with", "$success_action_redirect", ""]
]
Conditions that are included may pertain to acl,
content-length-range, Cache-Control, Content-Type,
Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and/or x-amz-meta-.
Note that if you include a condition, you must specify
the a valid value in the fields dictionary as well. A value will
not be added automatically to the fields dictionary based on the
conditions.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned post
is valid for.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
bucket = Bucket
key = Key
fields = Fields
conditions = Conditions
expires_in = ExpiresIn
if fields is None:
fields = {}
else:
fields = fields.copy()
if conditions is None:
conditions = []
post_presigner = S3PostPresigner(self._request_signer)
serializer = self._serializer
# We choose the CreateBucket operation model because its url gets
# serialized to what a presign post requires.
operation_model = self.meta.service_model.operation_model(
'CreateBucket')
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
{'Bucket': bucket}, operation_model)
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url,
context={
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
},
)
# Append that the bucket name to the list of conditions.
conditions.append({'bucket': bucket})
# If the key ends with filename, the only constraint that can be
# imposed is if it starts with the specified prefix.
if key.endswith('${filename}'):
conditions.append(["starts-with", '$key', key[:-len('${filename}')]])
else:
conditions.append({'key': key})
# Add the key to the fields.
fields['key'] = key
return post_presigner.generate_presigned_post(
request_dict=request_dict, fields=fields, conditions=conditions,
expires_in=expires_in)
def _should_use_global_endpoint(client):
if client.meta.partition != 'aws':
return False
s3_config = client.meta.config.s3
if s3_config:
if s3_config.get('use_dualstack_endpoint', False):
return False
if s3_config.get('us_east_1_regional_endpoint') == 'regional' and \
client.meta.config.region_name == 'us-east-1':
return False
return True
| 28,234 | Python | 37.414966 | 194 | 0.629985 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/hooks.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import logging
from collections import defaultdict, deque, namedtuple
from botocore.compat import accepts_kwargs, six
from botocore.utils import EVENT_ALIASES
logger = logging.getLogger(__name__)
_NodeList = namedtuple('NodeList', ['first', 'middle', 'last'])
_FIRST = 0
_MIDDLE = 1
_LAST = 2
class NodeList(_NodeList):
def __copy__(self):
first_copy = copy.copy(self.first)
middle_copy = copy.copy(self.middle)
last_copy = copy.copy(self.last)
copied = NodeList(first_copy, middle_copy, last_copy)
return copied
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if response[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
return []
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler for a given event.
If a ``unique_id`` is given, the handler will not be registered
if a handler with the ``unique_id`` has already been registered.
Handlers are called in the order they have been registered.
Note handlers can also be registered with ``register_first()``
and ``register_last()``. All handlers registered with
``register_first()`` are called before handlers registered
with ``register()`` which are called before handlers registered
with ``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register,
unique_id_uses_count=unique_id_uses_count)
def register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called first for an event.
All event handlers registered with ``register_first()`` will
be called before handlers registered with ``register()`` and
``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_first,
unique_id_uses_count=unique_id_uses_count)
def register_last(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called last for an event.
All event handlers registered with ``register_last()`` will be called
after handlers registered with ``register_first()`` and ``register()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_last,
unique_id_uses_count=unique_id_uses_count)
def _verify_and_register(self, event_name, handler, unique_id,
register_method, unique_id_uses_count):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
register_method(event_name, handler, unique_id, unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister an event handler for a given event.
If no ``unique_id`` was given during registration, then the
first instance of the event handler is removed (if the event
handler has been registered multiple times).
"""
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
if not accepts_kwargs(func):
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
except TypeError:
return False
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
# This is used to ensure that unique_id's are only
# registered once.
self._unique_id_handlers = {}
def _emit(self, event_name, kwargs, stop_on_response=False):
"""
Emit an event with optional keyword arguments.
:type event_name: string
:param event_name: Name of the event
:type kwargs: dict
:param kwargs: Arguments to be passed to the handler functions.
:type stop_on_response: boolean
:param stop_on_response: Whether to stop on the first non-None
response. If False, then all handlers
will be called. This is especially useful
to handlers which mutate data and then
want to stop propagation of the event.
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
response = handler(**kwargs)
responses.append((handler, response))
if stop_on_response and response is not None:
return responses
return responses
def emit(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args.
>>> responses = emitter.emit(
... 'my-event.service.operation', arg1='one', arg2='two')
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
return self._emit(event_name, kwargs)
def emit_until_response(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args,
until the first non-``None`` response is received. This
method prevents subsequent handlers from being invoked.
>>> handler, response = emitter.emit_until_response(
'my-event.service.operation', arg1='one', arg2='two')
:rtype: tuple
:return: The first (handler, response) tuple where the response
is not ``None``, otherwise (``None``, ``None``).
"""
responses = self._emit(event_name, kwargs, stop_on_response=True)
if responses:
return responses[-1]
else:
return (None, None)
def _register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_MIDDLE)
def _register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_FIRST)
def _register_last(self, event_name, handler, unique_id,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_LAST)
def _register_section(self, event_name, handler, unique_id,
unique_id_uses_count, section):
if unique_id is not None:
if unique_id in self._unique_id_handlers:
# We've already registered a handler using this unique_id
# so we don't need to register it again.
count = self._unique_id_handlers[unique_id].get('count', None)
if unique_id_uses_count:
if not count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to use a counter. Subsequent register "
"calls to unique id must specify use of a counter "
"as well." % unique_id)
else:
self._unique_id_handlers[unique_id]['count'] += 1
else:
if count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to not use a counter. Subsequent "
"register calls to unique id must specify not to "
"use a counter as well." % unique_id)
return
else:
# Note that the trie knows nothing about the unique
# id. We track uniqueness in this class via the
# _unique_id_handlers.
self._handlers.append_item(event_name, handler,
section=section)
unique_id_handler_item = {'handler': handler}
if unique_id_uses_count:
unique_id_handler_item['count'] = 1
self._unique_id_handlers[unique_id] = unique_id_handler_item
else:
self._handlers.append_item(event_name, handler, section=section)
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._lookup_cache = {}
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
if unique_id is not None:
try:
count = self._unique_id_handlers[unique_id].get('count', None)
except KeyError:
# There's no handler matching that unique_id so we have
# nothing to unregister.
return
if unique_id_uses_count:
if count is None:
raise ValueError(
"Initial registration of unique id %s was specified to "
"use a counter. Subsequent unregister calls to unique "
"id must specify use of a counter as well." % unique_id)
elif count == 1:
handler = self._unique_id_handlers.pop(unique_id)['handler']
else:
self._unique_id_handlers[unique_id]['count'] -= 1
return
else:
if count:
raise ValueError(
"Initial registration of unique id %s was specified "
"to not use a counter. Subsequent unregister calls "
"to unique id must specify not to use a counter as "
"well." % unique_id)
handler = self._unique_id_handlers.pop(unique_id)['handler']
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
def __copy__(self):
new_instance = self.__class__()
new_state = self.__dict__.copy()
new_state['_handlers'] = copy.copy(self._handlers)
new_state['_unique_id_handlers'] = copy.copy(self._unique_id_handlers)
new_instance.__dict__ = new_state
return new_instance
class EventAliaser(BaseEventHooks):
def __init__(self, event_emitter, event_aliases=None):
self._event_aliases = event_aliases
if event_aliases is None:
self._event_aliases = EVENT_ALIASES
self._emitter = event_emitter
def emit(self, event_name, **kwargs):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.emit(aliased_event_name, **kwargs)
def emit_until_response(self, event_name, **kwargs):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.emit_until_response(aliased_event_name, **kwargs)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register_first(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def register_last(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register_last(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.unregister(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def _alias_event_name(self, event_name):
for old_part, new_part in self._event_aliases.items():
# We can't simply do a string replace for everything, otherwise we
# might end up translating substrings that we never intended to
# translate. When there aren't any dots in the old event name
# part, then we can quickly replace the item in the list if it's
# there.
event_parts = event_name.split('.')
if '.' not in old_part:
try:
# Theoretically a given event name could have the same part
# repeated, but in practice this doesn't happen
event_parts[event_parts.index(old_part)] = new_part
except ValueError:
continue
# If there's dots in the name, it gets more complicated. Now we
# have to replace multiple sections of the original event.
elif old_part in event_name:
old_parts = old_part.split('.')
self._replace_subsection(event_parts, old_parts, new_part)
else:
continue
new_name = '.'.join(event_parts)
logger.debug("Changing event name from %s to %s" % (
event_name, new_name
))
return new_name
return event_name
def _replace_subsection(self, sections, old_parts, new_part):
for i in range(len(sections)):
if sections[i] == old_parts[0] and \
sections[i:i+len(old_parts)] == old_parts:
sections[i:i+len(old_parts)] = [new_part]
return
def __copy__(self):
return self.__class__(
copy.copy(self._emitter),
copy.copy(self._event_aliases)
)
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
# Each dictionary can be though of as a node, where a node
# has values associated with the node, and children is a link
# to more nodes. So 'foo.bar' would have a 'foo' node with
# a 'bar' node as a child of foo.
# {'foo': {'children': {'bar': {...}}}}.
self._root = {'chunk': None, 'children': {}, 'values': None}
def append_item(self, key, value, section=_MIDDLE):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current['children']:
new_child = {'chunk': part, 'values': None, 'children': {}}
current['children'][part] = new_child
current = new_child
else:
current = current['children'][part]
if current['values'] is None:
current['values'] = NodeList([], [], [])
current['values'][section].append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, 0)
return collected
def _get_items(self, starting_node, key_parts, collected, starting_index):
stack = [(starting_node, starting_index)]
key_parts_len = len(key_parts)
# Traverse down the nodes, where at each level we add the
# next part from key_parts as well as the wildcard element '*'.
# This means for each node we see we potentially add two more
# elements to our stack.
while stack:
current_node, index = stack.pop()
if current_node['values']:
# We're using extendleft because we want
# the values associated with the node furthest
# from the root to come before nodes closer
# to the root. extendleft() also adds its items
# in right-left order so .extendleft([1, 2, 3])
# will result in final_list = [3, 2, 1], which is
# why we reverse the lists.
node_list = current_node['values']
complete_order = (node_list.first + node_list.middle +
node_list.last)
collected.extendleft(reversed(complete_order))
if not index == key_parts_len:
children = current_node['children']
directs = children.get(key_parts[index])
wildcard = children.get('*')
next_index = index + 1
if wildcard is not None:
stack.append((wildcard, next_index))
if directs is not None:
stack.append((directs, next_index))
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
key_parts = key.split('.')
current = self._root
self._remove_item(current, key_parts, value, index=0)
def _remove_item(self, current_node, key_parts, value, index):
if current_node is None:
return
elif index < len(key_parts):
next_node = current_node['children'].get(key_parts[index])
if next_node is not None:
self._remove_item(next_node, key_parts, value, index + 1)
if index == len(key_parts) - 1:
node_list = next_node['values']
if value in node_list.first:
node_list.first.remove(value)
elif value in node_list.middle:
node_list.middle.remove(value)
elif value in node_list.last:
node_list.last.remove(value)
if not next_node['children'] and not next_node['values']:
# Then this is a leaf node with no values so
# we can just delete this link from the parent node.
# This makes subsequent search faster in the case
# where a key does not exist.
del current_node['children'][key_parts[index]]
else:
raise ValueError(
"key is not in trie: %s" % '.'.join(key_parts))
def __copy__(self):
# The fact that we're using a nested dict under the covers
# is an implementation detail, and the user shouldn't have
# to know that they'd normally need a deepcopy so we expose
# __copy__ instead of __deepcopy__.
new_copy = self.__class__()
copied_attrs = self._recursive_copy(self.__dict__)
new_copy.__dict__ = copied_attrs
return new_copy
def _recursive_copy(self, node):
# We can't use copy.deepcopy because we actually only want to copy
# the structure of the trie, not the handlers themselves.
# Each node has a chunk, children, and values.
copied_node = {}
for key, value in node.items():
if isinstance(value, NodeList):
copied_node[key] = copy.copy(value)
elif isinstance(value, dict):
copied_node[key] = self._recursive_copy(value)
else:
copied_node[key] = value
return copied_node
| 24,573 | Python | 40.650847 | 80 | 0.568836 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/awsrequest.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import io
import sys
import logging
import functools
import socket
import urllib3.util
from urllib3.connection import VerifiedHTTPSConnection
from urllib3.connection import HTTPConnection
from urllib3.connectionpool import HTTPConnectionPool
from urllib3.connectionpool import HTTPSConnectionPool
import botocore.utils
from botocore.compat import six
from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, \
urlencode, MutableMapping
from botocore.exceptions import UnseekableStreamError
logger = logging.getLogger(__name__)
class AWSHTTPResponse(HTTPResponse):
# The *args, **kwargs is used because the args are slightly
# different in py2.6 than in py2.7/py3.
def __init__(self, *args, **kwargs):
self._status_tuple = kwargs.pop('status_tuple')
HTTPResponse.__init__(self, *args, **kwargs)
def _read_status(self):
if self._status_tuple is not None:
status_tuple = self._status_tuple
self._status_tuple = None
return status_tuple
else:
return HTTPResponse._read_status(self)
class AWSConnection(object):
"""Mixin for HTTPConnection that supports Expect 100-continue.
This when mixed with a subclass of httplib.HTTPConnection (though
technically we subclass from urllib3, which subclasses
httplib.HTTPConnection) and we only override this class to support Expect
100-continue, which we need for S3. As far as I can tell, this is
general purpose enough to not be specific to S3, but I'm being
tentative and keeping it in botocore because I've only tested
this against AWS services.
"""
def __init__(self, *args, **kwargs):
super(AWSConnection, self).__init__(*args, **kwargs)
self._original_response_cls = self.response_class
# We'd ideally hook into httplib's states, but they're all
# __mangled_vars so we use our own state var. This variable is set
# when we receive an early response from the server. If this value is
# set to True, any calls to send() are noops. This value is reset to
# false every time _send_request is called. This is to workaround the
# fact that py2.6 (and only py2.6) has a separate send() call for the
# body in _send_request, as opposed to endheaders(), which is where the
# body is sent in all versions > 2.6.
self._response_received = False
self._expect_header_set = False
def close(self):
super(AWSConnection, self).close()
# Reset all of our instance state we were tracking.
self._response_received = False
self._expect_header_set = False
self.response_class = self._original_response_cls
def _send_request(self, method, url, body, headers, *args, **kwargs):
self._response_received = False
if headers.get('Expect', b'') == b'100-continue':
self._expect_header_set = True
else:
self._expect_header_set = False
self.response_class = self._original_response_cls
rval = super(AWSConnection, self)._send_request(
method, url, body, headers, *args, **kwargs)
self._expect_header_set = False
return rval
def _convert_to_bytes(self, mixed_buffer):
# Take a list of mixed str/bytes and convert it
# all into a single bytestring.
# Any six.text_types will be encoded as utf-8.
bytes_buffer = []
for chunk in mixed_buffer:
if isinstance(chunk, six.text_type):
bytes_buffer.append(chunk.encode('utf-8'))
else:
bytes_buffer.append(chunk)
msg = b"\r\n".join(bytes_buffer)
return msg
def _send_output(self, message_body=None, *args, **kwargs):
self._buffer.extend((b"", b""))
msg = self._convert_to_bytes(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if self._expect_header_set:
# This is our custom behavior. If the Expect header was
# set, it will trigger this custom behavior.
logger.debug("Waiting for 100 Continue response.")
# Wait for 1 second for the server to send a response.
if urllib3.util.wait_for_read(self.sock, 1):
self._handle_expect_response(message_body)
return
else:
# From the RFC:
# Because of the presence of older implementations, the
# protocol allows ambiguous situations in which a client may
# send "Expect: 100-continue" without receiving either a 417
# (Expectation Failed) status or a 100 (Continue) status.
# Therefore, when a client sends this header field to an origin
# server (possibly via a proxy) from which it has never seen a
# 100 (Continue) status, the client SHOULD NOT wait for an
# indefinite period before sending the request body.
logger.debug("No response seen from server, continuing to "
"send the response body.")
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def _consume_headers(self, fp):
# Most servers (including S3) will just return
# the CLRF after the 100 continue response. However,
# some servers (I've specifically seen this for squid when
# used as a straight HTTP proxy) will also inject a
# Connection: keep-alive header. To account for this
# we'll read until we read '\r\n', and ignore any headers
# that come immediately after the 100 continue response.
current = None
while current != b'\r\n':
current = fp.readline()
def _handle_expect_response(self, message_body):
# This is called when we sent the request headers containing
# an Expect: 100-continue header and received a response.
# We now need to figure out what to do.
fp = self.sock.makefile('rb', 0)
try:
maybe_status_line = fp.readline()
parts = maybe_status_line.split(None, 2)
if self._is_100_continue_status(maybe_status_line):
self._consume_headers(fp)
logger.debug("100 Continue response seen, "
"now sending request body.")
self._send_message_body(message_body)
elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):
# From the RFC:
# Requirements for HTTP/1.1 origin servers:
#
# - Upon receiving a request which includes an Expect
# request-header field with the "100-continue"
# expectation, an origin server MUST either respond with
# 100 (Continue) status and continue to read from the
# input stream, or respond with a final status code.
#
# So if we don't get a 100 Continue response, then
# whatever the server has sent back is the final response
# and don't send the message_body.
logger.debug("Received a non 100 Continue response "
"from the server, NOT sending request body.")
status_tuple = (parts[0].decode('ascii'),
int(parts[1]), parts[2].decode('ascii'))
response_class = functools.partial(
AWSHTTPResponse, status_tuple=status_tuple)
self.response_class = response_class
self._response_received = True
finally:
fp.close()
def _send_message_body(self, message_body):
if message_body is not None:
self.send(message_body)
def send(self, str):
if self._response_received:
logger.debug("send() called, but reseponse already received. "
"Not sending data.")
return
return super(AWSConnection, self).send(str)
def _is_100_continue_status(self, maybe_status_line):
parts = maybe_status_line.split(None, 2)
# Check for HTTP/<version> 100 Continue\r\n
return (
len(parts) >= 3 and parts[0].startswith(b'HTTP/') and
parts[1] == b'100')
class AWSHTTPConnection(AWSConnection, HTTPConnection):
""" An HTTPConnection that supports 100 Continue behavior. """
class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection):
""" An HTTPSConnection that supports 100 Continue behavior. """
class AWSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = AWSHTTPConnection
class AWSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = AWSHTTPSConnection
def prepare_request_dict(request_dict, endpoint_url, context=None,
user_agent=None):
"""
This method prepares a request dict to be created into an
AWSRequestObject. This prepares the request dict by adding the
url and the user agent to the request dict.
:type request_dict: dict
:param request_dict: The request dict (created from the
``serialize`` module).
:type user_agent: string
:param user_agent: The user agent to use for this request.
:type endpoint_url: string
:param endpoint_url: The full endpoint url, which contains at least
the scheme, the hostname, and optionally any path components.
"""
r = request_dict
if user_agent is not None:
headers = r['headers']
headers['User-Agent'] = user_agent
host_prefix = r.get('host_prefix')
url = _urljoin(endpoint_url, r['url_path'], host_prefix)
if r['query_string']:
# NOTE: This is to avoid circular import with utils. This is being
# done to avoid moving classes to different modules as to not cause
# breaking chainges.
percent_encode_sequence = botocore.utils.percent_encode_sequence
encoded_query_string = percent_encode_sequence(r['query_string'])
if '?' not in url:
url += '?%s' % encoded_query_string
else:
url += '&%s' % encoded_query_string
r['url'] = url
r['context'] = context
if context is None:
r['context'] = {}
def create_request_object(request_dict):
"""
This method takes a request dict and creates an AWSRequest object
from it.
:type request_dict: dict
:param request_dict: The request dict (created from the
``prepare_request_dict`` method).
:rtype: ``botocore.awsrequest.AWSRequest``
:return: An AWSRequest object based on the request_dict.
"""
r = request_dict
request_object = AWSRequest(
method=r['method'], url=r['url'], data=r['body'], headers=r['headers'])
request_object.context = r['context']
return request_object
def _urljoin(endpoint_url, url_path, host_prefix):
p = urlsplit(endpoint_url)
# <part> - <index>
# scheme - p[0]
# netloc - p[1]
# path - p[2]
# query - p[3]
# fragment - p[4]
if not url_path or url_path == '/':
# If there's no path component, ensure the URL ends with
# a '/' for backwards compatibility.
if not p[2]:
new_path = '/'
else:
new_path = p[2]
elif p[2].endswith('/') and url_path.startswith('/'):
new_path = p[2][:-1] + url_path
else:
new_path = p[2] + url_path
new_netloc = p[1]
if host_prefix is not None:
new_netloc = host_prefix + new_netloc
reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4]))
return reconstructed
class AWSRequestPreparer(object):
"""
This class performs preparation on AWSRequest objects similar to that of
the PreparedRequest class does in the requests library. However, the logic
has been boiled down to meet the specific use cases in botocore. Of note
there are the following differences:
This class does not heavily prepare the URL. Requests performed many
validations and corrections to ensure the URL is properly formatted.
Botocore either performs these validations elsewhere or otherwise
consistently provides well formatted URLs.
This class does not heavily prepare the body. Body preperation is
simple and supports only the cases that we document: bytes and
file-like objects to determine the content-length. This will also
additionally prepare a body that is a dict to be url encoded params
string as some signers rely on this. Finally, this class does not
support multipart file uploads.
This class does not prepare the method, auth or cookies.
"""
def prepare(self, original):
method = original.method
url = self._prepare_url(original)
body = self._prepare_body(original)
headers = self._prepare_headers(original, body)
stream_output = original.stream_output
return AWSPreparedRequest(method, url, headers, body, stream_output)
def _prepare_url(self, original):
url = original.url
if original.params:
params = urlencode(list(original.params.items()), doseq=True)
url = '%s?%s' % (url, params)
return url
def _prepare_headers(self, original, prepared_body=None):
headers = HeadersDict(original.headers.items())
# If the transfer encoding or content length is already set, use that
if 'Transfer-Encoding' in headers or 'Content-Length' in headers:
return headers
# Ensure we set the content length when it is expected
if original.method not in ('GET', 'HEAD', 'OPTIONS'):
length = self._determine_content_length(prepared_body)
if length is not None:
headers['Content-Length'] = str(length)
else:
# Failed to determine content length, using chunked
# NOTE: This shouldn't ever happen in practice
body_type = type(prepared_body)
logger.debug('Failed to determine length of %s', body_type)
headers['Transfer-Encoding'] = 'chunked'
return headers
def _to_utf8(self, item):
key, value = item
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return key, value
def _prepare_body(self, original):
"""Prepares the given HTTP body data."""
body = original.data
if body == b'':
body = None
if isinstance(body, dict):
params = [self._to_utf8(item) for item in body.items()]
body = urlencode(params, doseq=True)
return body
def _determine_content_length(self, body):
# No body, content length of 0
if not body:
return 0
# Try asking the body for it's length
try:
return len(body)
except (AttributeError, TypeError) as e:
pass
# Try getting the length from a seekable stream
if hasattr(body, 'seek') and hasattr(body, 'tell'):
try:
orig_pos = body.tell()
body.seek(0, 2)
end_file_pos = body.tell()
body.seek(orig_pos)
return end_file_pos - orig_pos
except io.UnsupportedOperation:
# in case when body is, for example, io.BufferedIOBase object
# it has "seek" method which throws "UnsupportedOperation"
# exception in such case we want to fall back to "chunked"
# encoding
pass
# Failed to determine the length
return None
class AWSRequest(object):
"""Represents the elements of an HTTP request.
This class was originally inspired by requests.models.Request, but has been
boiled down to meet the specific use cases in botocore. That being said this
class (even in requests) is effectively a named-tuple.
"""
_REQUEST_PREPARER_CLS = AWSRequestPreparer
def __init__(self,
method=None,
url=None,
headers=None,
data=None,
params=None,
auth_path=None,
stream_output=False):
self._request_preparer = self._REQUEST_PREPARER_CLS()
# Default empty dicts for dict params.
params = {} if params is None else params
self.method = method
self.url = url
self.headers = HTTPHeaders()
self.data = data
self.params = params
self.auth_path = auth_path
self.stream_output = stream_output
if headers is not None:
for key, value in headers.items():
self.headers[key] = value
# This is a dictionary to hold information that is used when
# processing the request. What is inside of ``context`` is open-ended.
# For example, it may have a timestamp key that is used for holding
# what the timestamp is when signing the request. Note that none
# of the information that is inside of ``context`` is directly
# sent over the wire; the information is only used to assist in
# creating what is sent over the wire.
self.context = {}
def prepare(self):
"""Constructs a :class:`AWSPreparedRequest <AWSPreparedRequest>`."""
return self._request_preparer.prepare(self)
@property
def body(self):
body = self.prepare().body
if isinstance(body, six.text_type):
body = body.encode('utf-8')
return body
class AWSPreparedRequest(object):
"""A data class representing a finalized request to be sent over the wire.
Requests at this stage should be treated as final, and the properties of
the request should not be modified.
:ivar method: The HTTP Method
:ivar url: The full url
:ivar headers: The HTTP headers to send.
:ivar body: The HTTP body.
:ivar stream_output: If the response for this request should be streamed.
"""
def __init__(self, method, url, headers, body, stream_output):
self.method = method
self.url = url
self.headers = headers
self.body = body
self.stream_output = stream_output
def __repr__(self):
fmt = (
'<AWSPreparedRequest stream_output=%s, method=%s, url=%s, '
'headers=%s>'
)
return fmt % (self.stream_output, self.method, self.url, self.headers)
def reset_stream(self):
"""Resets the streaming body to it's initial position.
If the request contains a streaming body (a streamable file-like object)
seek to the object's initial position to ensure the entire contents of
the object is sent. This is a no-op for static bytes-like body types.
"""
# Trying to reset a stream when there is a no stream will
# just immediately return. It's not an error, it will produce
# the same result as if we had actually reset the stream (we'll send
# the entire body contents again if we need to).
# Same case if the body is a string/bytes/bytearray type.
non_seekable_types = (six.binary_type, six.text_type, bytearray)
if self.body is None or isinstance(self.body, non_seekable_types):
return
try:
logger.debug("Rewinding stream: %s", self.body)
self.body.seek(0)
except Exception as e:
logger.debug("Unable to rewind stream: %s", e)
raise UnseekableStreamError(stream_object=self.body)
class AWSResponse(object):
"""A data class representing an HTTP response.
This class was originally inspired by requests.models.Response, but has
been boiled down to meet the specific use cases in botocore. This has
effectively been reduced to a named tuple.
:ivar url: The full url.
:ivar status_code: The status code of the HTTP response.
:ivar headers: The HTTP headers received.
:ivar body: The HTTP response body.
"""
def __init__(self, url, status_code, headers, raw):
self.url = url
self.status_code = status_code
self.headers = HeadersDict(headers)
self.raw = raw
self._content = None
@property
def content(self):
"""Content of the response as bytes."""
if self._content is None:
# Read the contents.
# NOTE: requests would attempt to call stream and fall back
# to a custom generator that would call read in a loop, but
# we don't rely on this behavior
self._content = bytes().join(self.raw.stream()) or bytes()
return self._content
@property
def text(self):
"""Content of the response as a proper text type.
Uses the encoding type provided in the reponse headers to decode the
response content into a proper text type. If the encoding is not
present in the headers, UTF-8 is used as a default.
"""
encoding = botocore.utils.get_encoding_from_headers(self.headers)
if encoding:
return self.content.decode(encoding)
else:
return self.content.decode('utf-8')
class _HeaderKey(object):
def __init__(self, key):
self._key = key
self._lower = key.lower()
def __hash__(self):
return hash(self._lower)
def __eq__(self, other):
return isinstance(other, _HeaderKey) and self._lower == other._lower
def __str__(self):
return self._key
def __repr__(self):
return repr(self._key)
class HeadersDict(MutableMapping):
"""A case-insenseitive dictionary to represent HTTP headers. """
def __init__(self, *args, **kwargs):
self._dict = {}
self.update(*args, **kwargs)
def __setitem__(self, key, value):
self._dict[_HeaderKey(key)] = value
def __getitem__(self, key):
return self._dict[_HeaderKey(key)]
def __delitem__(self, key):
del self._dict[_HeaderKey(key)]
def __iter__(self):
return (str(key) for key in self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return repr(self._dict)
def copy(self):
return HeadersDict(self.items())
| 23,613 | Python | 36.842949 | 80 | 0.615678 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/history.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
HISTORY_RECORDER = None
logger = logging.getLogger(__name__)
class BaseHistoryHandler(object):
def emit(self, event_type, payload, source):
raise NotImplementedError('emit()')
class HistoryRecorder(object):
def __init__(self):
self._enabled = False
self._handlers = []
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def add_handler(self, handler):
self._handlers.append(handler)
def record(self, event_type, payload, source='BOTOCORE'):
if self._enabled and self._handlers:
for handler in self._handlers:
try:
handler.emit(event_type, payload, source)
except Exception:
# Never let the process die because we had a failure in
# a record collection handler.
logger.debug("Exception raised in %s.", handler,
exc_info=True)
def get_global_history_recorder():
global HISTORY_RECORDER
if HISTORY_RECORDER is None:
HISTORY_RECORDER = HistoryRecorder()
return HISTORY_RECORDER
| 1,748 | Python | 30.232142 | 75 | 0.644737 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/errorfactory.py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import ClientError
from botocore.utils import get_service_module_name
class BaseClientExceptions(object):
ClientError = ClientError
def __init__(self, code_to_exception):
"""Base class for exceptions object on a client
:type code_to_exception: dict
:param code_to_exception: Mapping of error codes (strings) to exception
class that should be raised when encountering a particular
error code.
"""
self._code_to_exception = code_to_exception
def from_code(self, error_code):
"""Retrieves the error class based on the error code
This is helpful for identifying the exception class needing to be
caught based on the ClientError.parsed_reponse['Error']['Code'] value
:type error_code: string
:param error_code: The error code associated to a ClientError exception
:rtype: ClientError or a subclass of ClientError
:returns: The appropriate modeled exception class for that error
code. If the error code does not match any of the known
modeled exceptions then return a generic ClientError.
"""
return self._code_to_exception.get(error_code, self.ClientError)
def __getattr__(self, name):
exception_cls_names = [
exception_cls.__name__ for exception_cls
in self._code_to_exception.values()
]
raise AttributeError(
'%r object has no attribute %r. Valid exceptions are: %s' % (
self, name, ', '.join(exception_cls_names)))
class ClientExceptionsFactory(object):
def __init__(self):
self._client_exceptions_cache = {}
def create_client_exceptions(self, service_model):
"""Creates a ClientExceptions object for the particular service client
:type service_model: botocore.model.ServiceModel
:param service_model: The service model for the client
:rtype: object that subclasses from BaseClientExceptions
:returns: The exceptions object of a client that can be used
to grab the various different modeled exceptions.
"""
service_name = service_model.service_name
if service_name not in self._client_exceptions_cache:
client_exceptions = self._create_client_exceptions(service_model)
self._client_exceptions_cache[service_name] = client_exceptions
return self._client_exceptions_cache[service_name]
def _create_client_exceptions(self, service_model):
cls_props = {}
code_to_exception = {}
for error_shape in service_model.error_shapes:
exception_name = str(error_shape.name)
exception_cls = type(exception_name, (ClientError,), {})
cls_props[exception_name] = exception_cls
code = str(error_shape.error_code)
code_to_exception[code] = exception_cls
cls_name = str(get_service_module_name(service_model) + 'Exceptions')
client_exceptions_cls = type(
cls_name, (BaseClientExceptions,), cls_props)
return client_exceptions_cls(code_to_exception)
| 3,727 | Python | 40.88764 | 79 | 0.669708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/args.py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Internal module to help with normalizing botocore client args.
This module (and all function/classes within this module) should be
considered internal, and *not* a public API.
"""
import copy
import logging
import socket
import botocore.exceptions
import botocore.serialize
import botocore.utils
from botocore.signers import RequestSigner
from botocore.config import Config
from botocore.endpoint import EndpointCreator
logger = logging.getLogger(__name__)
VALID_REGIONAL_ENDPOINTS_CONFIG = [
'legacy',
'regional',
]
LEGACY_GLOBAL_STS_REGIONS = [
'ap-northeast-1',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'aws-global',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
]
class ClientArgsCreator(object):
def __init__(self, event_emitter, user_agent, response_parser_factory,
loader, exceptions_factory, config_store):
self._event_emitter = event_emitter
self._user_agent = user_agent
self._response_parser_factory = response_parser_factory
self._loader = loader
self._exceptions_factory = exceptions_factory
self._config_store = config_store
def get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials, scoped_config,
client_config, endpoint_bridge):
final_args = self.compute_client_args(
service_model, client_config, endpoint_bridge, region_name,
endpoint_url, is_secure, scoped_config)
service_name = final_args['service_name']
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = RequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
new_config = Config(**config_kwargs)
endpoint_creator = EndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
proxies_config=new_config.proxies_config)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory
}
def compute_client_args(self, service_model, client_config,
endpoint_bridge, region_name, endpoint_url,
is_secure, scoped_config):
service_name = service_model.endpoint_prefix
protocol = service_model.metadata['protocol']
parameter_validation = True
if client_config and not client_config.parameter_validation:
parameter_validation = False
elif scoped_config:
raw_value = scoped_config.get('parameter_validation')
if raw_value is not None:
parameter_validation = botocore.utils.ensure_boolean(raw_value)
# Override the user agent if specified in the client config.
user_agent = self._user_agent
if client_config is not None:
if client_config.user_agent is not None:
user_agent = client_config.user_agent
if client_config.user_agent_extra is not None:
user_agent += ' %s' % client_config.user_agent_extra
s3_config = self.compute_s3_config(client_config)
endpoint_config = self._compute_endpoint_config(
service_name=service_name,
region_name=region_name,
endpoint_url=endpoint_url,
is_secure=is_secure,
endpoint_bridge=endpoint_bridge,
s3_config=s3_config,
)
# Create a new client config to be passed to the client based
# on the final values. We do not want the user to be able
# to try to modify an existing client with a client config.
config_kwargs = dict(
region_name=endpoint_config['region_name'],
signature_version=endpoint_config['signature_version'],
user_agent=user_agent)
if client_config is not None:
config_kwargs.update(
connect_timeout=client_config.connect_timeout,
read_timeout=client_config.read_timeout,
max_pool_connections=client_config.max_pool_connections,
proxies=client_config.proxies,
proxies_config=client_config.proxies_config,
retries=client_config.retries,
client_cert=client_config.client_cert,
inject_host_prefix=client_config.inject_host_prefix,
)
self._compute_retry_config(config_kwargs)
s3_config = self.compute_s3_config(client_config)
return {
'service_name': service_name,
'parameter_validation': parameter_validation,
'user_agent': user_agent,
'endpoint_config': endpoint_config,
'protocol': protocol,
'config_kwargs': config_kwargs,
's3_config': s3_config,
'socket_options': self._compute_socket_options(scoped_config)
}
def compute_s3_config(self, client_config):
s3_configuration = self._config_store.get_config_variable('s3')
# Next specific client config values takes precedence over
# specific values in the scoped config.
if client_config is not None:
if client_config.s3 is not None:
if s3_configuration is None:
s3_configuration = client_config.s3
else:
# The current s3_configuration dictionary may be
# from a source that only should be read from so
# we want to be safe and just make a copy of it to modify
# before it actually gets updated.
s3_configuration = s3_configuration.copy()
s3_configuration.update(client_config.s3)
return s3_configuration
def _compute_endpoint_config(self, service_name, region_name, endpoint_url,
is_secure, endpoint_bridge, s3_config):
resolve_endpoint_kwargs = {
'service_name': service_name,
'region_name': region_name,
'endpoint_url': endpoint_url,
'is_secure': is_secure,
'endpoint_bridge': endpoint_bridge,
}
if service_name == 's3':
return self._compute_s3_endpoint_config(
s3_config=s3_config, **resolve_endpoint_kwargs)
if service_name == 'sts':
return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs)
return self._resolve_endpoint(**resolve_endpoint_kwargs)
def _compute_s3_endpoint_config(self, s3_config,
**resolve_endpoint_kwargs):
force_s3_global = self._should_force_s3_global(
resolve_endpoint_kwargs['region_name'], s3_config)
if force_s3_global:
resolve_endpoint_kwargs['region_name'] = None
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
self._set_region_if_custom_s3_endpoint(
endpoint_config, resolve_endpoint_kwargs['endpoint_bridge'])
# For backwards compatibility reasons, we want to make sure the
# client.meta.region_name will remain us-east-1 if we forced the
# endpoint to be the global region. Specifically, if this value
# changes to aws-global, it breaks logic where a user is checking
# for us-east-1 as the global endpoint such as in creating buckets.
if force_s3_global and endpoint_config['region_name'] == 'aws-global':
endpoint_config['region_name'] = 'us-east-1'
return endpoint_config
def _should_force_s3_global(self, region_name, s3_config):
s3_regional_config = 'legacy'
if s3_config and 'us_east_1_regional_endpoint' in s3_config:
s3_regional_config = s3_config['us_east_1_regional_endpoint']
self._validate_s3_regional_config(s3_regional_config)
return (
s3_regional_config == 'legacy' and
region_name in ['us-east-1', None]
)
def _validate_s3_regional_config(self, config_val):
if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG:
raise botocore.exceptions.\
InvalidS3UsEast1RegionalEndpointConfigError(
s3_us_east_1_regional_endpoint_config=config_val)
def _set_region_if_custom_s3_endpoint(self, endpoint_config,
endpoint_bridge):
# If a user is providing a custom URL, the endpoint resolver will
# refuse to infer a signing region. If we want to default to s3v4,
# we have to account for this.
if endpoint_config['signing_region'] is None \
and endpoint_config['region_name'] is None:
endpoint = endpoint_bridge.resolve('s3')
endpoint_config['signing_region'] = endpoint['signing_region']
endpoint_config['region_name'] = endpoint['region_name']
def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs):
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
if self._should_set_global_sts_endpoint(
resolve_endpoint_kwargs['region_name'],
resolve_endpoint_kwargs['endpoint_url']):
self._set_global_sts_endpoint(
endpoint_config, resolve_endpoint_kwargs['is_secure'])
return endpoint_config
def _should_set_global_sts_endpoint(self, region_name, endpoint_url):
if endpoint_url:
return False
return (
self._get_sts_regional_endpoints_config() == 'legacy' and
region_name in LEGACY_GLOBAL_STS_REGIONS
)
def _get_sts_regional_endpoints_config(self):
sts_regional_endpoints_config = self._config_store.get_config_variable(
'sts_regional_endpoints')
if not sts_regional_endpoints_config:
sts_regional_endpoints_config = 'legacy'
if sts_regional_endpoints_config not in \
VALID_REGIONAL_ENDPOINTS_CONFIG:
raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError(
sts_regional_endpoints_config=sts_regional_endpoints_config)
return sts_regional_endpoints_config
def _set_global_sts_endpoint(self, endpoint_config, is_secure):
scheme = 'https' if is_secure else 'http'
endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme
endpoint_config['signing_region'] = 'us-east-1'
def _resolve_endpoint(self, service_name, region_name,
endpoint_url, is_secure, endpoint_bridge):
return endpoint_bridge.resolve(
service_name, region_name, endpoint_url, is_secure)
def _compute_socket_options(self, scoped_config):
# This disables Nagle's algorithm and is the default socket options
# in urllib3.
socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
if scoped_config:
# Enables TCP Keepalive if specified in shared config file.
if self._ensure_boolean(scoped_config.get('tcp_keepalive', False)):
socket_options.append(
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
return socket_options
def _compute_retry_config(self, config_kwargs):
self._compute_retry_max_attempts(config_kwargs)
self._compute_retry_mode(config_kwargs)
def _compute_retry_max_attempts(self, config_kwargs):
# There's a pre-existing max_attempts client config value that actually
# means max *retry* attempts. There's also a `max_attempts` we pull
# from the config store that means *total attempts*, which includes the
# intitial request. We can't change what `max_attempts` means in
# client config so we try to normalize everything to a new
# "total_max_attempts" variable. We ensure that after this, the only
# configuration for "max attempts" is the 'total_max_attempts' key.
# An explicitly provided max_attempts in the client config
# overrides everything.
retries = config_kwargs.get('retries')
if retries is not None:
if 'total_max_attempts' in retries:
retries.pop('max_attempts', None)
return
if 'max_attempts' in retries:
value = retries.pop('max_attempts')
# client config max_attempts means total retries so we
# have to add one for 'total_max_attempts' to account
# for the initial request.
retries['total_max_attempts'] = value + 1
return
# Otherwise we'll check the config store which checks env vars,
# config files, etc. There is no default value for max_attempts
# so if this returns None and we don't set a default value here.
max_attempts = self._config_store.get_config_variable('max_attempts')
if max_attempts is not None:
if retries is None:
retries = {}
config_kwargs['retries'] = retries
retries['total_max_attempts'] = max_attempts
def _compute_retry_mode(self, config_kwargs):
retries = config_kwargs.get('retries')
if retries is None:
retries = {}
config_kwargs['retries'] = retries
elif 'mode' in retries:
# If there's a retry mode explicitly set in the client config
# that overrides everything.
return
retry_mode = self._config_store.get_config_variable('retry_mode')
if retry_mode is None:
retry_mode = 'legacy'
retries['mode'] = retry_mode
def _ensure_boolean(self, val):
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
| 16,199 | Python | 42.665768 | 79 | 0.618063 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/__init__.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.20.49'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 3,881 | Python | 38.212121 | 78 | 0.681783 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/waiter.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
import logging
import time
from botocore.utils import get_service_module_name
from botocore.docs.docstring import WaiterDocstring
from .exceptions import WaiterError, ClientError, WaiterConfigError
from . import xform_name
logger = logging.getLogger(__name__)
def create_waiter_with_client(waiter_name, waiter_model, client):
"""
:type waiter_name: str
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiter_model: botocore.waiter.WaiterModel
:param waiter_model: The model for the waiter configuration.
:type client: botocore.client.BaseClient
:param client: The botocore client associated with the service.
:rtype: botocore.waiter.Waiter
:return: The waiter object.
"""
single_waiter_config = waiter_model.get_waiter(waiter_name)
operation_name = xform_name(single_waiter_config.operation)
operation_method = NormalizedOperationMethod(
getattr(client, operation_name))
# Create a new wait method that will serve as a proxy to the underlying
# Waiter.wait method. This is needed to attach a docstring to the
# method.
def wait(self, **kwargs):
Waiter.wait(self, **kwargs)
wait.__doc__ = WaiterDocstring(
waiter_name=waiter_name,
event_emitter=client.meta.events,
service_model=client.meta.service_model,
service_waiter_model=waiter_model,
include_signature=False
)
# Rename the waiter class based on the type of waiter.
waiter_class_name = str('%s.Waiter.%s' % (
get_service_module_name(client.meta.service_model),
waiter_name))
# Create the new waiter class
documented_waiter_cls = type(
waiter_class_name, (Waiter,), {'wait': wait})
# Return an instance of the new waiter class.
return documented_waiter_cls(
waiter_name, single_waiter_config, operation_method
)
def is_valid_waiter_error(response):
error = response.get('Error')
if isinstance(error, dict) and 'Code' in error:
return True
return False
class NormalizedOperationMethod(object):
def __init__(self, client_method):
self._client_method = client_method
def __call__(self, **kwargs):
try:
return self._client_method(**kwargs)
except ClientError as e:
return e.response
class WaiterModel(object):
SUPPORTED_VERSION = 2
def __init__(self, waiter_config):
"""
Note that the WaiterModel takes ownership of the waiter_config.
It may or may not mutate the waiter_config. If this is a concern,
it is best to make a copy of the waiter config before passing it to
the WaiterModel.
:type waiter_config: dict
:param waiter_config: The loaded waiter config
from the <service>*.waiters.json file. This can be
obtained from a botocore Loader object as well.
"""
self._waiter_config = waiter_config['waiters']
# These are part of the public API. Changing these
# will result in having to update the consuming code,
# so don't change unless you really need to.
version = waiter_config.get('version', 'unknown')
self._verify_supported_version(version)
self.version = version
self.waiter_names = list(sorted(waiter_config['waiters'].keys()))
def _verify_supported_version(self, version):
if version != self.SUPPORTED_VERSION:
raise WaiterConfigError(
error_msg=("Unsupported waiter version, supported version "
"must be: %s, but version of waiter config "
"is: %s" % (self.SUPPORTED_VERSION,
version)))
def get_waiter(self, waiter_name):
try:
single_waiter_config = self._waiter_config[waiter_name]
except KeyError:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return SingleWaiterConfig(single_waiter_config)
class SingleWaiterConfig(object):
"""Represents the waiter configuration for a single waiter.
A single waiter is considered the configuration for a single
value associated with a named waiter (i.e TableExists).
"""
def __init__(self, single_waiter_config):
self._config = single_waiter_config
# These attributes are part of the public API.
self.description = single_waiter_config.get('description', '')
# Per the spec, these three fields are required.
self.operation = single_waiter_config['operation']
self.delay = single_waiter_config['delay']
self.max_attempts = single_waiter_config['maxAttempts']
@property
def acceptors(self):
acceptors = []
for acceptor_config in self._config['acceptors']:
acceptor = AcceptorConfig(acceptor_config)
acceptors.append(acceptor)
return acceptors
class AcceptorConfig(object):
def __init__(self, config):
self.state = config['state']
self.matcher = config['matcher']
self.expected = config['expected']
self.argument = config.get('argument')
self.matcher_func = self._create_matcher_func()
@property
def explanation(self):
if self.matcher == 'path':
return 'For expression "%s" we matched expected path: "%s"' % (self.argument, self.expected)
elif self.matcher == 'pathAll':
return 'For expression "%s" all members matched excepted path: "%s"' % (self.argument, self.expected)
elif self.matcher == 'pathAny':
return 'For expression "%s" we matched expected path: "%s" at least once' % (self.argument, self.expected)
elif self.matcher == 'status':
return 'Matched expected HTTP status code: %s' % self.expected
elif self.matcher == 'error':
return 'Matched expected service error code: %s' % self.expected
else:
return 'No explanation for unknown waiter type: "%s"' % self.matcher
def _create_matcher_func(self):
# An acceptor function is a callable that takes a single value. The
# parsed AWS response. Note that the parsed error response is also
# provided in the case of errors, so it's entirely possible to
# handle all the available matcher capabilities in the future.
# There's only three supported matchers, so for now, this is all
# contained to a single method. If this grows, we can expand this
# out to separate methods or even objects.
if self.matcher == 'path':
return self._create_path_matcher()
elif self.matcher == 'pathAll':
return self._create_path_all_matcher()
elif self.matcher == 'pathAny':
return self._create_path_any_matcher()
elif self.matcher == 'status':
return self._create_status_matcher()
elif self.matcher == 'error':
return self._create_error_matcher()
else:
raise WaiterConfigError(
error_msg="Unknown acceptor: %s" % self.matcher)
def _create_path_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
return expression.search(response) == expected
return acceptor_matches
def _create_path_all_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
result = expression.search(response)
if not isinstance(result, list) or not result:
# pathAll matcher must result in a list.
# Also we require at least one element in the list,
# that is, an empty list should not result in this
# acceptor match.
return False
for element in result:
if element != expected:
return False
return True
return acceptor_matches
def _create_path_any_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
result = expression.search(response)
if not isinstance(result, list) or not result:
# pathAny matcher must result in a list.
# Also we require at least one element in the list,
# that is, an empty list should not result in this
# acceptor match.
return False
for element in result:
if element == expected:
return True
return False
return acceptor_matches
def _create_status_matcher(self):
expected = self.expected
def acceptor_matches(response):
# We don't have any requirements on the expected incoming data
# other than it is a dict, so we don't assume there's
# a ResponseMetadata.HTTPStatusCode.
status_code = response.get('ResponseMetadata', {}).get(
'HTTPStatusCode')
return status_code == expected
return acceptor_matches
def _create_error_matcher(self):
expected = self.expected
def acceptor_matches(response):
# When the client encounters an error, it will normally raise
# an exception. However, the waiter implementation will catch
# this exception, and instead send us the parsed error
# response. So response is still a dictionary, and in the case
# of an error response will contain the "Error" and
# "ResponseMetadata" key.
return response.get("Error", {}).get("Code", "") == expected
return acceptor_matches
class Waiter(object):
def __init__(self, name, config, operation_method):
"""
:type name: string
:param name: The name of the waiter
:type config: botocore.waiter.SingleWaiterConfig
:param config: The configuration for the waiter.
:type operation_method: callable
:param operation_method: A callable that accepts **kwargs
and returns a response. For example, this can be
a method from a botocore client.
"""
self._operation_method = operation_method
# The two attributes are exposed to allow for introspection
# and documentation.
self.name = name
self.config = config
def wait(self, **kwargs):
acceptors = list(self.config.acceptors)
current_state = 'waiting'
# pop the invocation specific config
config = kwargs.pop('WaiterConfig', {})
sleep_amount = config.get('Delay', self.config.delay)
max_attempts = config.get('MaxAttempts', self.config.max_attempts)
last_matched_acceptor = None
num_attempts = 0
while True:
response = self._operation_method(**kwargs)
num_attempts += 1
for acceptor in acceptors:
if acceptor.matcher_func(response):
last_matched_acceptor = acceptor
current_state = acceptor.state
break
else:
# If none of the acceptors matched, we should
# transition to the failure state if an error
# response was received.
if is_valid_waiter_error(response):
# Transition to a failure state, which we
# can just handle here by raising an exception.
raise WaiterError(
name=self.name,
reason='An error occurred (%s): %s' % (
response['Error'].get('Code', 'Unknown'),
response['Error'].get('Message', 'Unknown'),
),
last_response=response,
)
if current_state == 'success':
logger.debug("Waiting complete, waiter matched the "
"success state.")
return
if current_state == 'failure':
reason = 'Waiter encountered a terminal failure state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
if num_attempts >= max_attempts:
if last_matched_acceptor is None:
reason = 'Max attempts exceeded'
else:
reason = 'Max attempts exceeded. Previously accepted state: %s' %(
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
time.sleep(sleep_amount)
| 14,070 | Python | 37.236413 | 118 | 0.599005 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/configprovider.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains the inteface for controlling how configuration
is loaded.
"""
import logging
import os
from botocore import utils
logger = logging.getLogger(__name__)
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
BOTOCORE_DEFAUT_SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
'api_versions': ('api_versions', None, {}, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': (
'metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': (
'metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
'ec2_metadata_service_endpoint': (
'ec2_metadata_service_endpoint',
'AWS_EC2_METADATA_SERVICE_ENDPOINT',
None, None),
'imds_use_ipv6': (
'imds_use_ipv6',
'AWS_IMDS_USE_IPV6',
False, None),
'parameter_validation': ('parameter_validation', None, True, None),
# Client side monitoring configurations.
# Note: These configurations are considered internal to botocore.
# Do not use them until publicly documented.
'csm_enabled': (
'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean),
'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None),
'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int),
'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None),
# Endpoint discovery configuration
'endpoint_discovery_enabled': (
'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED',
'auto', None),
'sts_regional_endpoints': (
'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy',
None
),
'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None),
# We can't have a default here for v1 because we need to defer to
# whatever the defaults are in _retry.json.
'max_attempts': ('max_attempts', 'AWS_MAX_ATTEMPTS', None, int),
}
# A mapping for the s3 specific configuration vars. These are the configuration
# vars that typically go in the s3 section of the config file. This mapping
# follows the same schema as the previous session variable mapping.
DEFAULT_S3_CONFIG_VARS = {
'addressing_style': (
('s3', 'addressing_style'), None, None, None),
'use_accelerate_endpoint': (
('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean
),
'use_dualstack_endpoint': (
('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean
),
'payload_signing_enabled': (
('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean
),
'use_arn_region': (
['s3_use_arn_region',
('s3', 'use_arn_region')],
'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean
),
'us_east_1_regional_endpoint': (
['s3_us_east_1_regional_endpoint',
('s3', 'us_east_1_regional_endpoint')],
'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None
)
}
# A mapping for the proxy specific configuration vars. These are
# used to configure how botocore interacts with proxy setups while
# sending requests.
DEFAULT_PROXIES_CONFIG_VARS = {
'proxy_ca_bundle': ('proxy_ca_bundle', None, None, None),
'proxy_client_cert': ('proxy_client_cert', None, None, None),
'proxy_use_forwarding_for_https': (
'proxy_use_forwarding_for_https', None, None, utils.normalize_boolean),
}
def create_botocore_default_config_mapping(session):
chain_builder = ConfigChainFactory(session=session)
config_mapping = _create_config_chain_mapping(
chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES)
config_mapping['s3'] = SectionConfigProvider(
's3', session, _create_config_chain_mapping(
chain_builder, DEFAULT_S3_CONFIG_VARS)
)
config_mapping['proxies_config'] = SectionConfigProvider(
'proxies_config', session, _create_config_chain_mapping(
chain_builder, DEFAULT_PROXIES_CONFIG_VARS)
)
return config_mapping
def _create_config_chain_mapping(chain_builder, config_variables):
mapping = {}
for logical_name, config in config_variables.items():
mapping[logical_name] = chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=config[1],
config_property_names=config[0],
default=config[2],
conversion_func=config[3]
)
return mapping
class ConfigChainFactory(object):
"""Factory class to create our most common configuration chain case.
This is a convenience class to construct configuration chains that follow
our most common pattern. This is to prevent ordering them incorrectly,
and to make the config chain construction more readable.
"""
def __init__(self, session, environ=None):
"""Initialize a ConfigChainFactory.
:type session: :class:`botocore.session.Session`
:param session: This is the session that should be used to look up
values from the config file.
:type environ: dict
:param environ: A mapping to use for environment variables. If this
is not provided it will default to use os.environ.
"""
self._session = session
if environ is None:
environ = os.environ
self._environ = environ
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_names=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
In botocore most of our config chains follow the the precendence:
session_instance_variables, environment, config_file, default_value.
This is a convenience function for creating a chain that follow
that precendence.
:type instance_name: str
:param instance_name: This indicates what session instance variable
corresponds to this config value. If it is None it will not be
added to the chain.
:type env_var_names: str or list of str or None
:param env_var_names: One or more environment variable names to
search for this value. They are searched in order. If it is None
it will not be added to the chain.
:type config_property_names: str/tuple or list of str/tuple or None
:param config_property_names: One of more strings or tuples
representing the name of the key in the config file for this
config option. They are searched in order. If it is None it will
not be added to the chain.
:type default: Any
:param default: Any constant value to be returned.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no effect on
the return type. Otherwise, it is treated as a function that will
conversion_func our provided type.
:rvalue: ConfigChain
:returns: A ConfigChain that resolves in the order env_var_names ->
config_property_name -> default. Any values that were none are
omitted form the chain.
"""
providers = []
if instance_name is not None:
providers.append(
InstanceVarProvider(
instance_var=instance_name,
session=self._session
)
)
if env_var_names is not None:
providers.extend(self._get_env_providers(env_var_names))
if config_property_names is not None:
providers.extend(
self._get_scoped_config_providers(config_property_names)
)
if default is not None:
providers.append(ConstantProvider(value=default))
return ChainProvider(
providers=providers,
conversion_func=conversion_func,
)
def _get_env_providers(self, env_var_names):
env_var_providers = []
if not isinstance(env_var_names, list):
env_var_names = [env_var_names]
for env_var_name in env_var_names:
env_var_providers.append(
EnvironmentProvider(name=env_var_name, env=self._environ)
)
return env_var_providers
def _get_scoped_config_providers(self, config_property_names):
scoped_config_providers = []
if not isinstance(config_property_names, list):
config_property_names = [config_property_names]
for config_property_name in config_property_names:
scoped_config_providers.append(
ScopedConfigProvider(
config_var_name=config_property_name,
session=self._session,
)
)
return scoped_config_providers
class ConfigValueStore(object):
"""The ConfigValueStore object stores configuration values."""
def __init__(self, mapping=None):
"""Initialize a ConfigValueStore.
:type mapping: dict
:param mapping: The mapping parameter is a map of string to a subclass
of BaseProvider. When a config variable is asked for via the
get_config_variable method, the corresponding provider will be
invoked to load the value.
"""
self._overrides = {}
self._mapping = {}
if mapping is not None:
for logical_name, provider in mapping.items():
self.set_config_provider(logical_name, provider)
def get_config_variable(self, logical_name):
"""
Retrieve the value associeated with the specified logical_name
from the corresponding provider. If no value is found None will
be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:returns: value of variable or None if not defined.
"""
if logical_name in self._overrides:
return self._overrides[logical_name]
if logical_name not in self._mapping:
return None
provider = self._mapping[logical_name]
return provider.provide()
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._overrides[logical_name] = value
def clear_config_variable(self, logical_name):
"""Remove an override config variable from the session.
:type logical_name: str
:param logical_name: The name of the parameter to clear the override
value from.
"""
self._overrides.pop(logical_name, None)
def set_config_provider(self, logical_name, provider):
"""Set the provider for a config value.
This provides control over how a particular configuration value is
loaded. This replaces the provider for ``logical_name`` with the new
``provider``.
:type logical_name: str
:param logical_name: The name of the config value to change the config
provider for.
:type provider: :class:`botocore.configprovider.BaseProvider`
:param provider: The new provider that should be responsible for
providing a value for the config named ``logical_name``.
"""
self._mapping[logical_name] = provider
class BaseProvider(object):
"""Base class for configuration value providers.
A configuration provider has some method of providing a configuration
value.
"""
def provide(self):
"""Provide a config value."""
raise NotImplementedError('provide')
class ChainProvider(BaseProvider):
"""This provider wraps one or more other providers.
Each provider in the chain is called, the first one returning a non-None
value is then returned.
"""
def __init__(self, providers=None, conversion_func=None):
"""Initalize a ChainProvider.
:type providers: list
:param providers: The initial list of providers to check for values
when invoked.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no affect on
the return type. Otherwise, it is treated as a function that will
transform provided value.
"""
if providers is None:
providers = []
self._providers = providers
self._conversion_func = conversion_func
def provide(self):
"""Provide the value from the first provider to return non-None.
Each provider in the chain has its provide method called. The first
one in the chain to return a non-None value is the returned from the
ChainProvider. When no non-None value is found, None is returned.
"""
for provider in self._providers:
value = provider.provide()
if value is not None:
return self._convert_type(value)
return None
def _convert_type(self, value):
if self._conversion_func is not None:
return self._conversion_func(value)
return value
def __repr__(self):
return '[%s]' % ', '.join([str(p) for p in self._providers])
class InstanceVarProvider(BaseProvider):
"""This class loads config values from the session instance vars."""
def __init__(self, instance_var, session):
"""Initialize InstanceVarProvider.
:type instance_var: str
:param instance_var: The instance variable to load from the session.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._instance_var = instance_var
self._session = session
def provide(self):
"""Provide a config value from the session instance vars."""
instance_vars = self._session.instance_variables()
value = instance_vars.get(self._instance_var)
return value
def __repr__(self):
return 'InstanceVarProvider(instance_var=%s, session=%s)' % (
self._instance_var,
self._session,
)
class ScopedConfigProvider(BaseProvider):
def __init__(self, config_var_name, session):
"""Initialize ScopedConfigProvider.
:type config_var_name: str or tuple
:param config_var_name: The name of the config variable to load from
the configuration file. If the value is a tuple, it must only
consist of two items, where the first item represents the section
and the second item represents the config var name in the section.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._config_var_name = config_var_name
self._session = session
def provide(self):
"""Provide a value from a config file property."""
scoped_config = self._session.get_scoped_config()
if isinstance(self._config_var_name, tuple):
section_config = scoped_config.get(self._config_var_name[0])
if not isinstance(section_config, dict):
return None
return section_config.get(self._config_var_name[1])
return scoped_config.get(self._config_var_name)
def __repr__(self):
return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % (
self._config_var_name,
self._session,
)
class EnvironmentProvider(BaseProvider):
"""This class loads config values from environment variables."""
def __init__(self, name, env):
"""Initialize with the keys in the dictionary to check.
:type name: str
:param name: The key with that name will be loaded and returned.
:type env: dict
:param env: Environment variables dictionary to get variables from.
"""
self._name = name
self._env = env
def provide(self):
"""Provide a config value from a source dictionary."""
if self._name in self._env:
return self._env[self._name]
return None
def __repr__(self):
return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env)
class SectionConfigProvider(BaseProvider):
"""Provides a dictionary from a section in the scoped config
This is useful for retrieving scoped config variables (i.e. s3) that have
their own set of config variables and resolving logic.
"""
def __init__(self, section_name, session, override_providers=None):
self._section_name = section_name
self._session = session
self._scoped_config_provider = ScopedConfigProvider(
self._section_name, self._session)
self._override_providers = override_providers
if self._override_providers is None:
self._override_providers = {}
def provide(self):
section_config = self._scoped_config_provider.provide()
if section_config and not isinstance(section_config, dict):
logger.debug("The %s config key is not a dictionary type, "
"ignoring its value of: %s", self._section_name,
section_config)
return None
for section_config_var, provider in self._override_providers.items():
provider_val = provider.provide()
if provider_val is not None:
if section_config is None:
section_config = {}
section_config[section_config_var] = provider_val
return section_config
def __repr__(self):
return (
'SectionConfigProvider(section_name=%s, '
'session=%s, override_providers=%s)' % (
self._section_name, self._session,
self._override_providers,
)
)
class ConstantProvider(BaseProvider):
"""This provider provides a constant value."""
def __init__(self, value):
self._value = value
def provide(self):
"""Provide the constant value given during initialization."""
return self._value
def __repr__(self):
return 'ConstantProvider(value=%s)' % self._value
| 22,235 | Python | 38.636364 | 79 | 0.636609 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/endpoint.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None,
proxies_config=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
proxies_config=proxies_config
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| 13,811 | Python | 41.109756 | 81 | 0.627543 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/validate.py | """User input parameter validation.
This module handles user input parameter validation
against a provided input model.
Note that the objects in this module do *not* mutate any
arguments. No type version happens here. It is up to another
layer to properly convert arguments to any required types.
Validation Errors
-----------------
"""
from botocore.compat import six
import decimal
import json
from datetime import datetime
from botocore.utils import parse_to_aware_datetime
from botocore.utils import is_json_value_header
from botocore.exceptions import ParamValidationError
def validate_parameters(params, shape):
"""Validates input parameters against a schema.
This is a convenience function that validates parameters against a schema.
You can also instantiate and use the ParamValidator class directly if you
want more control.
If there are any validation errors then a ParamValidationError
will be raised. If there are no validation errors than no exception
is raised and a value of None is returned.
:param params: The user provided input parameters.
:type shape: botocore.model.Shape
:param shape: The schema which the input parameters should
adhere to.
:raise: ParamValidationError
"""
validator = ParamValidator()
report = validator.validate(params, shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
def type_check(valid_types):
def _create_type_check_guard(func):
def _on_passes_type_check(self, param, shape, errors, name):
if _type_check(param, errors, name):
return func(self, param, shape, errors, name)
def _type_check(param, errors, name):
if not isinstance(param, valid_types):
valid_type_names = [six.text_type(t) for t in valid_types]
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
return False
return True
return _on_passes_type_check
return _create_type_check_guard
def range_check(name, value, shape, error_type, errors):
failed = False
min_allowed = float('-inf')
if 'min' in shape.metadata:
min_allowed = shape.metadata['min']
if value < min_allowed:
failed = True
elif hasattr(shape, 'serialization'):
# Members that can be bound to the host have an implicit min of 1
if shape.serialization.get('hostLabel'):
min_allowed = 1
if value < min_allowed:
failed = True
if failed:
errors.report(name, error_type, param=value, min_allowed=min_allowed)
class ValidationErrors(object):
def __init__(self):
self._errors = []
def has_errors(self):
if self._errors:
return True
return False
def generate_report(self):
error_messages = []
for error in self._errors:
error_messages.append(self._format_error(error))
return '\n'.join(error_messages)
def _format_error(self, error):
error_type, name, additional = error
name = self._get_name(name)
if error_type == 'missing required field':
return 'Missing required parameter in %s: "%s"' % (
name, additional['required_name'])
elif error_type == 'unknown field':
return 'Unknown parameter in %s: "%s", must be one of: %s' % (
name, additional['unknown_param'],
', '.join(additional['valid_names']))
elif error_type == 'invalid type':
return 'Invalid type for parameter %s, value: %s, type: %s, ' \
'valid types: %s' % (name, additional['param'],
str(type(additional['param'])),
', '.join(additional['valid_types']))
elif error_type == 'invalid range':
min_allowed = additional['min_allowed']
return ('Invalid value for parameter %s, value: %s, '
'valid min value: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'invalid length':
min_allowed = additional['min_allowed']
return ('Invalid length for parameter %s, value: %s, '
'valid min length: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'unable to encode to json':
return 'Invalid parameter %s must be json serializable: %s' \
% (name, additional['type_error'])
def _get_name(self, name):
if not name:
return 'input'
elif name.startswith('.'):
return name[1:]
else:
return name
def report(self, name, reason, **kwargs):
self._errors.append((reason, name, kwargs))
class ParamValidator(object):
"""Validates parameters against a shape model."""
def validate(self, params, shape):
"""Validate parameters against a shape model.
This method will validate the parameters against a provided shape model.
All errors will be collected before returning to the caller. This means
that this method will not stop at the first error, it will return all
possible errors.
:param params: User provided dict of parameters
:param shape: A shape model describing the expected input.
:return: A list of errors.
"""
errors = ValidationErrors()
self._validate(params, shape, errors, name='')
return errors
def _check_special_validation_cases(self, shape):
if is_json_value_header(shape):
return self._validate_jsonvalue_string
def _validate(self, params, shape, errors, name):
special_validator = self._check_special_validation_cases(shape)
if special_validator:
special_validator(params, shape, errors, name)
else:
getattr(self, '_validate_%s' % shape.type_name)(
params, shape, errors, name)
def _validate_jsonvalue_string(self, params, shape, errors, name):
# Check to see if a value marked as a jsonvalue can be dumped to
# a json string.
try:
json.dumps(params)
except (ValueError, TypeError) as e:
errors.report(name, 'unable to encode to json', type_error=e)
@type_check(valid_types=(dict,))
def _validate_structure(self, params, shape, errors, name):
# Validate required fields.
for required_member in shape.metadata.get('required', []):
if required_member not in params:
errors.report(name, 'missing required field',
required_name=required_member, user_params=params)
members = shape.members
known_params = []
# Validate known params.
for param in params:
if param not in members:
errors.report(name, 'unknown field', unknown_param=param,
valid_names=list(members))
else:
known_params.append(param)
# Validate structure members.
for param in known_params:
self._validate(params[param], shape.members[param],
errors, '%s.%s' % (name, param))
@type_check(valid_types=six.string_types)
def _validate_string(self, param, shape, errors, name):
# Validate range. For a string, the min/max contraints
# are of the string length.
# Looks like:
# "WorkflowId":{
# "type":"string",
# "min":1,
# "max":256
# }
range_check(name, len(param), shape, 'invalid length', errors)
@type_check(valid_types=(list, tuple))
def _validate_list(self, param, shape, errors, name):
member_shape = shape.member
range_check(name, len(param), shape, 'invalid length', errors)
for i, item in enumerate(param):
self._validate(item, member_shape, errors, '%s[%s]' % (name, i))
@type_check(valid_types=(dict,))
def _validate_map(self, param, shape, errors, name):
key_shape = shape.key
value_shape = shape.value
for key, value in param.items():
self._validate(key, key_shape, errors, "%s (key: %s)"
% (name, key))
self._validate(value, value_shape, errors, '%s.%s' % (name, key))
@type_check(valid_types=six.integer_types)
def _validate_integer(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_blob(self, param, shape, errors, name):
if isinstance(param, (bytes, bytearray, six.text_type)):
return
elif hasattr(param, 'read'):
# File like objects are also allowed for blob types.
return
else:
errors.report(name, 'invalid type', param=param,
valid_types=[str(bytes), str(bytearray),
'file-like object'])
@type_check(valid_types=(bool,))
def _validate_boolean(self, param, shape, errors, name):
pass
@type_check(valid_types=(float, decimal.Decimal) + six.integer_types)
def _validate_double(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
_validate_float = _validate_double
@type_check(valid_types=six.integer_types)
def _validate_long(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_timestamp(self, param, shape, errors, name):
# We don't use @type_check because datetimes are a bit
# more flexible. You can either provide a datetime
# object, or a string that parses to a datetime.
is_valid_type = self._type_check_datetime(param)
if not is_valid_type:
valid_type_names = [six.text_type(datetime), 'timestamp-string']
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
def _type_check_datetime(self, value):
try:
parse_to_aware_datetime(value)
return True
except (TypeError, ValueError, AttributeError):
# Yes, dateutil can sometimes raise an AttributeError
# when parsing timestamps.
return False
class ParamValidationDecorator(object):
def __init__(self, param_validator, serializer):
self._param_validator = param_validator
self._serializer = serializer
def serialize_to_request(self, parameters, operation_model):
input_shape = operation_model.input_shape
if input_shape is not None:
report = self._param_validator.validate(parameters,
operation_model.input_shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
return self._serializer.serialize_to_request(parameters,
operation_model)
| 11,323 | Python | 37.256757 | 80 | 0.59463 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/utils.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import socket
import cgi
import warnings
import dateutil.parser
from dateutil.tz import tzutc
import botocore
import botocore.awsrequest
import botocore.httpsession
from botocore.compat import (
json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict,
six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE
)
from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass
from botocore.exceptions import (
InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError,
MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError,
ConnectionClosedError, ConnectTimeoutError, UnsupportedS3ArnError,
UnsupportedS3AccesspointConfigurationError, SSOTokenLoadError,
InvalidRegionError, InvalidIMDSEndpointError, UnsupportedOutpostResourceError,
UnsupportedS3ControlConfigurationError, UnsupportedS3ControlArnError,
InvalidHostLabelError, HTTPClientError, UnsupportedS3ConfigurationError,
)
from urllib3.exceptions import LocationParseError
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_BASE_URL = 'http://169.254.169.254/'
METADATA_BASE_URL_IPv6 = 'http://[fe80:ec2::254%eth0]/'
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (
ReadTimeoutError, EndpointConnectionError, ConnectionClosedError,
ConnectTimeoutError,
)
S3_ACCELERATE_WHITELIST = ['dualstack']
# In switching events from using service name / endpoint prefix to service
# id, we have to preserve compatibility. This maps the instances where either
# is different than the transformed service id.
EVENT_ALIASES = {
"a4b": "alexa-for-business",
"alexaforbusiness": "alexa-for-business",
"api.mediatailor": "mediatailor",
"api.pricing": "pricing",
"api.sagemaker": "sagemaker",
"apigateway": "api-gateway",
"application-autoscaling": "application-auto-scaling",
"appstream2": "appstream",
"autoscaling": "auto-scaling",
"autoscaling-plans": "auto-scaling-plans",
"ce": "cost-explorer",
"cloudhsmv2": "cloudhsm-v2",
"cloudsearchdomain": "cloudsearch-domain",
"cognito-idp": "cognito-identity-provider",
"config": "config-service",
"cur": "cost-and-usage-report-service",
"data.iot": "iot-data-plane",
"data.jobs.iot": "iot-jobs-data-plane",
"data.mediastore": "mediastore-data",
"datapipeline": "data-pipeline",
"devicefarm": "device-farm",
"devices.iot1click": "iot-1click-devices-service",
"directconnect": "direct-connect",
"discovery": "application-discovery-service",
"dms": "database-migration-service",
"ds": "directory-service",
"dynamodbstreams": "dynamodb-streams",
"elasticbeanstalk": "elastic-beanstalk",
"elasticfilesystem": "efs",
"elasticloadbalancing": "elastic-load-balancing",
"elasticmapreduce": "emr",
"elastictranscoder": "elastic-transcoder",
"elb": "elastic-load-balancing",
"elbv2": "elastic-load-balancing-v2",
"email": "ses",
"entitlement.marketplace": "marketplace-entitlement-service",
"es": "elasticsearch-service",
"events": "eventbridge",
"cloudwatch-events": "eventbridge",
"iot-data": "iot-data-plane",
"iot-jobs-data": "iot-jobs-data-plane",
"iot1click-devices": "iot-1click-devices-service",
"iot1click-projects": "iot-1click-projects",
"kinesisanalytics": "kinesis-analytics",
"kinesisvideo": "kinesis-video",
"lex-models": "lex-model-building-service",
"lex-runtime": "lex-runtime-service",
"logs": "cloudwatch-logs",
"machinelearning": "machine-learning",
"marketplace-entitlement": "marketplace-entitlement-service",
"marketplacecommerceanalytics": "marketplace-commerce-analytics",
"metering.marketplace": "marketplace-metering",
"meteringmarketplace": "marketplace-metering",
"mgh": "migration-hub",
"models.lex": "lex-model-building-service",
"monitoring": "cloudwatch",
"mturk-requester": "mturk",
"opsworks-cm": "opsworkscm",
"projects.iot1click": "iot-1click-projects",
"resourcegroupstaggingapi": "resource-groups-tagging-api",
"route53": "route-53",
"route53domains": "route-53-domains",
"runtime.lex": "lex-runtime-service",
"runtime.sagemaker": "sagemaker-runtime",
"sdb": "simpledb",
"secretsmanager": "secrets-manager",
"serverlessrepo": "serverlessapplicationrepository",
"servicecatalog": "service-catalog",
"states": "sfn",
"stepfunctions": "sfn",
"storagegateway": "storage-gateway",
"streams.dynamodb": "dynamodb-streams",
"tagging": "resource-groups-tagging-api"
}
# Vendoring IPv6 validation regex patterns from urllib3
# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
def ensure_boolean(val):
"""Ensures a boolean value if a string or boolean is provided
For strings, the value for True/False is case insensitive
"""
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def normalize_boolean(val):
"""Returns None if val is None, otherwise ensure value
converted to boolean"""
if val is None:
return val
else:
return ensure_boolean(val)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
class BadIMDSRequestError(Exception):
def __init__(self, request):
self.request = request
class IMDSFetcher(object):
_RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError
_TOKEN_PATH = 'latest/api/token'
_TOKEN_TTL = '21600'
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, base_url=METADATA_BASE_URL,
env=None, user_agent=None, config=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._base_url = self._select_base_url(base_url, config)
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
self._user_agent = user_agent
self._session = botocore.httpsession.URLLib3Session(
timeout=self._timeout,
proxies=get_environ_proxies(self._base_url),
)
def get_base_url(self):
return self._base_url
def _select_base_url(self, base_url, config):
if config is None:
config = {}
requires_ipv6 = ensure_boolean(config.get('imds_use_ipv6', False))
custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint')
if requires_ipv6 and custom_metadata_endpoint:
logger.warn("Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint.")
chosen_base_url = None
if base_url != METADATA_BASE_URL:
chosen_base_url = base_url
elif custom_metadata_endpoint:
chosen_base_url = custom_metadata_endpoint
elif requires_ipv6:
chosen_base_url = METADATA_BASE_URL_IPv6
else:
chosen_base_url = METADATA_BASE_URL
logger.debug("IMDS ENDPOINT: %s" % chosen_base_url)
if not is_valid_uri(chosen_base_url):
raise InvalidIMDSEndpointError(endpoint=chosen_base_url)
return chosen_base_url
def _fetch_metadata_token(self):
self._assert_enabled()
url = self._base_url + self._TOKEN_PATH
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
for i in range(self._num_attempts):
try:
response = self._session.send(request.prepare())
if response.status_code == 200:
return response.text
elif response.status_code in (404, 403, 405):
return None
elif response.status_code in (400,):
raise BadIMDSRequestError(request)
except ReadTimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except HTTPClientError as e:
if isinstance(e.kwargs.get('error'), LocationParseError):
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
def _get_request(self, url_path, retry_func, token=None):
"""Make a get request to the Instance Metadata Service.
:type url_path: str
:param url_path: The path component of the URL to make a get request.
This arg is appended to the base_url that was provided in the
initializer.
:type retry_func: callable
:param retry_func: A function that takes the response as an argument
and determines if it needs to retry. By default empty and non
200 OK responses are retried.
:type token: str
:param token: Metadata token to send along with GET requests to IMDS.
"""
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
for i in range(self._num_attempts):
try:
request = botocore.awsrequest.AWSRequest(
method='GET', url=url, headers=headers)
response = self._session.send(request.prepare())
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _add_user_agent(self, headers):
if self._user_agent is not None:
headers['User-Agent'] = self._user_agent
def _assert_enabled(self):
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _default_retry(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _is_non_ok_response(self, response):
if response.status_code != 200:
self._log_imds_response(response, 'non-200', log_body=True)
return True
return False
def _is_empty(self, response):
if not response.content:
self._log_imds_response(response, 'no body', log_body=True)
return True
return False
def _log_imds_response(self, response, reason_to_log, log_body=False):
statement = (
"Metadata service returned %s response "
"with status code of %s for url: %s"
)
logger_args = [
reason_to_log, response.status_code, response.url
]
if log_body:
statement += ", content body: %s"
logger_args.append(response.content)
logger.debug(statement, *logger_args)
class InstanceMetadataFetcher(IMDSFetcher):
_URL_PATH = 'latest/meta-data/iam/security-credentials/'
_REQUIRED_CREDENTIAL_FIELDS = [
'AccessKeyId', 'SecretAccessKey', 'Token', 'Expiration'
]
def retrieve_iam_role_credentials(self):
try:
token = self._fetch_metadata_token()
role_name = self._get_iam_role(token)
credentials = self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
# IMDS can return a 200 response that has a JSON formatted
# error message (i.e. if ec2 is not trusted entity for the
# attached role). We do not necessarily want to retry for
# these and we also do not necessarily want to raise a key
# error. So at least log the problematic response and return
# an empty dictionary to signal that it was not able to
# retrieve credentials. These error will contain both a
# Code and Message key.
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
def _get_iam_role(self, token=None):
return self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token,
).text
def _get_credentials(self, role_name, token=None):
r = self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token,
)
return json.loads(r.text)
def _is_invalid_json(self, response):
try:
json.loads(response.text)
return False
except ValueError:
self._log_imds_response(response, 'invalid json')
return True
def _needs_retry_for_role_name(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _needs_retry_for_credentials(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response) or
self._is_invalid_json(response)
)
def _contains_all_credential_fields(self, credentials):
for field in self._REQUIRED_CREDENTIAL_FIELDS:
if field not in credentials:
logger.debug(
'Retrieved credentials is missing required field: %s',
field)
return False
return True
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def lowercase_dict(original):
"""Copies the given dictionary ensuring all keys are lowercase strings. """
copy = {}
for key in original:
copy[key.lower()] = original[key]
return copy
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def _parse_timestamp_with_tzinfo(value, tzinfo):
"""Parse timestamp with pluggable tzinfo options."""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzinfo())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzinfo())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
for tzinfo in get_tzinfo_options():
try:
return _parse_timestamp_with_tzinfo(value, tzinfo)
except OSError as e:
logger.debug('Unable to parse timestamp with "%s" timezone info.',
tzinfo.__name__, exc_info=e)
raise RuntimeError('Unable to calculate correct timezone offset for '
'"%s"' % value)
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name in ['float', 'double']:
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_ipv6_endpoint_url(endpoint_url):
netloc = urlparse(endpoint_url).netloc
return IPV6_ADDRZ_RE.match(netloc) is not None
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def is_valid_uri(endpoint_url):
return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url(endpoint_url)
def validate_region_name(region_name):
"""Provided region_name must be a valid host label."""
if region_name is None:
return
valid_host_label = re.compile(r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{,63}(?<!-)$')
valid = valid_host_label.match(region_name)
if not valid:
raise InvalidRegionError(region_name=region_name)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
def hyphenize_service_id(service_id):
"""Translate the form used for event emitters.
:param service_id: The service_id to convert.
"""
return service_id.replace(' ', '-').lower()
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
if self._is_s3_accesspoint(context):
return
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
def _is_s3_accesspoint(self, context):
return 's3_accesspoint' in context
class InvalidArnException(ValueError):
pass
class ArnParser(object):
def parse_arn(self, arn):
arn_parts = arn.split(':', 5)
if len(arn_parts) < 6:
raise InvalidArnException(
'Provided ARN: %s must be of the format: '
'arn:partition:service:region:account:resource' % arn
)
return {
'partition': arn_parts[1],
'service': arn_parts[2],
'region': arn_parts[3],
'account': arn_parts[4],
'resource': arn_parts[5],
}
class S3ArnParamHandler(object):
_RESOURCE_REGEX = re.compile(
r'^(?P<resource_type>accesspoint|outpost)[/:](?P<resource_name>.+)$'
)
_OUTPOST_RESOURCE_REGEX = re.compile(
r'^(?P<outpost_name>[a-zA-Z0-9\-]{1,63})[/:]accesspoint[/:]'
r'(?P<accesspoint_name>[a-zA-Z0-9\-]{1,63}$)'
)
_BLACKLISTED_OPERATIONS = [
'CreateBucket'
]
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register('before-parameter-build.s3', self.handle_arn)
def handle_arn(self, params, model, context, **kwargs):
if model.name in self._BLACKLISTED_OPERATIONS:
return
arn_details = self._get_arn_details_from_bucket_param(params)
if arn_details is None:
return
if arn_details['resource_type'] == 'accesspoint':
self._store_accesspoint(params, context, arn_details)
elif arn_details['resource_type'] == 'outpost':
self._store_outpost(params, context, arn_details)
def _get_arn_details_from_bucket_param(self, params):
if 'Bucket' in params:
try:
arn = params['Bucket']
arn_details = self._arn_parser.parse_arn(arn)
self._add_resource_type_and_name(arn, arn_details)
return arn_details
except InvalidArnException:
pass
return None
def _add_resource_type_and_name(self, arn, arn_details):
match = self._RESOURCE_REGEX.match(arn_details['resource'])
if match:
arn_details['resource_type'] = match.group('resource_type')
arn_details['resource_name'] = match.group('resource_name')
else:
raise UnsupportedS3ArnError(arn=arn)
def _store_accesspoint(self, params, context, arn_details):
# Ideally the access-point would be stored as a parameter in the
# request where the serializer would then know how to serialize it,
# but access-points are not modeled in S3 operations so it would fail
# validation. Instead, we set the access-point to the bucket parameter
# to have some value set when serializing the request and additional
# information on the context from the arn to use in forming the
# access-point endpoint.
params['Bucket'] = arn_details['resource_name']
context['s3_accesspoint'] = {
'name': arn_details['resource_name'],
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
def _store_outpost(self, params, context, arn_details):
resource_name = arn_details['resource_name']
match = self._OUTPOST_RESOURCE_REGEX.match(resource_name)
if not match:
raise UnsupportedOutpostResourceError(resource_name=resource_name)
# Because we need to set the bucket name to something to pass
# validation we're going to use the access point name to be consistent
# with normal access point arns.
accesspoint_name = match.group('accesspoint_name')
params['Bucket'] = accesspoint_name
context['s3_accesspoint'] = {
'outpost_name': match.group('outpost_name'),
'name': accesspoint_name,
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
class S3EndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3', self.set_endpoint)
event_emitter.register(
'before-call.s3.WriteGetObjectResponse',
self.update_endpoint_to_s3_object_lambda
)
def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs):
if self._use_accelerate_endpoint:
raise UnsupportedS3ConfigurationError(
msg='S3 client does not support accelerate endpoints for S3 Object Lambda operations',
)
self._override_signing_name(context, 's3-object-lambda')
if self._endpoint_url:
# Only update the url if an explicit url was not provided
return
resolver = self._endpoint_resolver
resolved = resolver.construct_endpoint('s3-object-lambda', self._region)
# Ideally we would be able to replace the endpoint before
# serialization but there's no event to do that currently
new_endpoint = 'https://{host_prefix}{hostname}'.format(
host_prefix=params['host_prefix'],
hostname=resolved['hostname'],
)
params['url'] = _get_new_endpoint(params['url'], new_endpoint, False)
def set_endpoint(self, request, **kwargs):
if self._use_accesspoint_endpoint(request):
self._validate_accesspoint_supported(request)
region_name = self._resolve_region_for_accesspoint_endpoint(
request)
self._resolve_signing_name_for_accesspoint_endpoint(
request)
self._switch_to_accesspoint_endpoint(request, region_name)
return
if self._use_accelerate_endpoint:
switch_host_s3_accelerate(request=request, **kwargs)
if self._s3_addressing_handler:
self._s3_addressing_handler(request=request, **kwargs)
def _use_accesspoint_endpoint(self, request):
return 's3_accesspoint' in request.context
def _validate_accesspoint_supported(self, request):
if self._use_accelerate_endpoint:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 accelerate configuration '
'when an access-point ARN is specified.'
)
)
request_partition = request.context['s3_accesspoint']['partition']
if request_partition != self._partition:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client is configured for "%s" partition, but access-point'
' ARN provided is for "%s" partition. The client and '
' access-point partition must be the same.' % (
self._partition, request_partition)
)
)
s3_service = request.context['s3_accesspoint'].get('service')
if s3_service == 's3-object-lambda' and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an S3 Object Lambda access point ARN is specified.'
)
)
outpost_name = request.context['s3_accesspoint'].get('outpost_name')
if outpost_name and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost ARN is specified.'
)
)
def _resolve_region_for_accesspoint_endpoint(self, request):
if self._s3_config.get('use_arn_region', True):
accesspoint_region = request.context['s3_accesspoint']['region']
# If we are using the region from the access point,
# we will also want to make sure that we set it as the
# signing region as well
self._override_signing_region(request, accesspoint_region)
return accesspoint_region
return self._region
def _resolve_signing_name_for_accesspoint_endpoint(self, request):
accesspoint_service = request.context['s3_accesspoint']['service']
self._override_signing_name(request.context, accesspoint_service)
def _switch_to_accesspoint_endpoint(self, request, region_name):
original_components = urlsplit(request.url)
accesspoint_endpoint = urlunsplit((
original_components.scheme,
self._get_accesspoint_netloc(request.context, region_name),
self._get_accesspoint_path(
original_components.path, request.context),
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, accesspoint_endpoint))
request.url = accesspoint_endpoint
def _get_accesspoint_netloc(self, request_context, region_name):
s3_accesspoint = request_context['s3_accesspoint']
accesspoint_netloc_components = [
'%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']),
]
outpost_name = s3_accesspoint.get('outpost_name')
if self._endpoint_url:
if outpost_name:
accesspoint_netloc_components.append(outpost_name)
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
accesspoint_netloc_components.append(endpoint_url_netloc)
else:
if outpost_name:
outpost_host = [outpost_name, 's3-outposts']
accesspoint_netloc_components.extend(outpost_host)
elif s3_accesspoint['service'] == 's3-object-lambda':
accesspoint_netloc_components.append('s3-object-lambda')
else:
accesspoint_netloc_components.append('s3-accesspoint')
if self._s3_config.get('use_dualstack_endpoint'):
accesspoint_netloc_components.append('dualstack')
accesspoint_netloc_components.extend(
[
region_name,
self._get_dns_suffix(region_name)
]
)
return '.'.join(accesspoint_netloc_components)
def _get_accesspoint_path(self, original_path, request_context):
# The Bucket parameter was substituted with the access-point name as
# some value was required in serializing the bucket name. Now that
# we are making the request directly to the access point, we will
# want to remove that access-point name from the path.
name = request_context['s3_accesspoint']['name']
# All S3 operations require at least a / in their path.
return original_path.replace('/' + name, '', 1) or '/'
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, context, signing_name):
signing_context = context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
context['signing'] = signing_context
@CachedProperty
def _use_accelerate_endpoint(self):
# Enable accelerate if the configuration is set to to true or the
# endpoint being used matches one of the accelerate endpoints.
# Accelerate has been explicitly configured.
if self._s3_config.get('use_accelerate_endpoint'):
return True
# Accelerate mode is turned on automatically if an endpoint url is
# provided that matches the accelerate scheme.
if self._endpoint_url is None:
return False
# Accelerate is only valid for Amazon endpoints.
netloc = urlsplit(self._endpoint_url).netloc
if not netloc.endswith('amazonaws.com'):
return False
# The first part of the url should always be s3-accelerate.
parts = netloc.split('.')
if parts[0] != 's3-accelerate':
return False
# Url parts between 's3-accelerate' and 'amazonaws.com' which
# represent different url features.
feature_parts = parts[1:-2]
# There should be no duplicate url parts.
if len(feature_parts) != len(set(feature_parts)):
return False
# Remaining parts must all be in the whitelist.
return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts)
@CachedProperty
def _addressing_style(self):
# Use virtual host style addressing if accelerate is enabled or if
# the given endpoint url is an accelerate endpoint.
if self._use_accelerate_endpoint:
return 'virtual'
# If a particular addressing style is configured, use it.
configured_addressing_style = self._s3_config.get('addressing_style')
if configured_addressing_style:
return configured_addressing_style
@CachedProperty
def _s3_addressing_handler(self):
# If virtual host style was configured, use it regardless of whether
# or not the bucket looks dns compatible.
if self._addressing_style == 'virtual':
logger.debug("Using S3 virtual host style addressing.")
return switch_to_virtual_host_style
# If path style is configured, no additional steps are needed. If
# endpoint_url was specified, don't default to virtual. We could
# potentially default provided endpoint urls to virtual hosted
# style, but for now it is avoided.
if self._addressing_style == 'path' or self._endpoint_url is not None:
logger.debug("Using S3 path style addressing.")
return None
logger.debug("Defaulting to S3 virtual host style addressing with "
"path style addressing fallback.")
# By default, try to use virtual style with path fallback.
return fix_s3_host
class S3ControlEndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
_HOST_LABEL_REGEX = re.compile(r'^[a-zA-Z0-9\-]{1,63}$')
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3-control', self.set_endpoint)
def set_endpoint(self, request, **kwargs):
if self._use_endpoint_from_arn_details(request):
self._validate_endpoint_from_arn_details_supported(request)
region_name = self._resolve_region_from_arn_details(request)
self._resolve_signing_name_from_arn_details(request)
self._resolve_endpoint_from_arn_details(request, region_name)
self._add_headers_from_arn_details(request)
elif self._use_endpoint_from_outpost_id(request):
self._validate_outpost_redirection_valid(request)
outpost_id = request.context['outpost_id']
self._override_signing_name(request, 's3-outposts')
new_netloc = self._construct_outpost_endpoint(self._region)
self._update_request_netloc(request, new_netloc)
def _use_endpoint_from_arn_details(self, request):
return 'arn_details' in request.context
def _use_endpoint_from_outpost_id(self, request):
return 'outpost_id' in request.context
def _validate_endpoint_from_arn_details_supported(self, request):
if not self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
if arn_region != self._region:
error_msg = (
'The use_arn_region configuration is disabled but '
'received arn for "%s" when the client is configured '
'to use "%s"'
) % (arn_region, self._region)
raise UnsupportedS3ControlConfigurationError(msg=error_msg)
request_partion = request.context['arn_details']['partition']
if request_partion != self._partition:
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client is configured for "%s" partition, but arn '
'provided is for "%s" partition. The client and '
'arn partition must be the same.' % (
self._partition, request_partion)
)
)
if self._s3_config.get('use_accelerate_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg='S3 control client does not support accelerate endpoints',
)
if 'outpost_name' in request.context['arn_details']:
self._validate_outpost_redirection_valid(request)
def _validate_outpost_redirection_valid(self, request):
if self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost is specified.'
)
)
def _resolve_region_from_arn_details(self, request):
if self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
# If we are using the region from the expanded arn, we will also
# want to make sure that we set it as the signing region as well
self._override_signing_region(request, arn_region)
return arn_region
return self._region
def _resolve_signing_name_from_arn_details(self, request):
arn_service = request.context['arn_details']['service']
self._override_signing_name(request, arn_service)
return arn_service
def _resolve_endpoint_from_arn_details(self, request, region_name):
new_netloc = self._resolve_netloc_from_arn_details(request, region_name)
self._update_request_netloc(request, new_netloc)
def _update_request_netloc(self, request, new_netloc):
original_components = urlsplit(request.url)
arn_details_endpoint = urlunsplit((
original_components.scheme,
new_netloc,
original_components.path,
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, arn_details_endpoint)
)
request.url = arn_details_endpoint
def _resolve_netloc_from_arn_details(self, request, region_name):
arn_details = request.context['arn_details']
if 'outpost_name' in arn_details:
return self._construct_outpost_endpoint(region_name)
account = arn_details['account']
return self._construct_s3_control_endpoint(region_name, account)
def _is_valid_host_label(self, label):
return self._HOST_LABEL_REGEX.match(label)
def _validate_host_labels(self, *labels):
for label in labels:
if not self._is_valid_host_label(label):
raise InvalidHostLabelError(label=label)
def _construct_s3_control_endpoint(self, region_name, account):
self._validate_host_labels(region_name, account)
if self._endpoint_url:
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
netloc = [account, endpoint_url_netloc]
else:
netloc = [
account,
's3-control',
]
self._add_dualstack(netloc)
dns_suffix = self._get_dns_suffix(region_name)
netloc.extend([region_name, dns_suffix])
return self._construct_netloc(netloc)
def _construct_outpost_endpoint(self, region_name):
self._validate_host_labels(region_name)
if self._endpoint_url:
return urlsplit(self._endpoint_url).netloc
else:
netloc = [
's3-outposts',
region_name,
self._get_dns_suffix(region_name),
]
return self._construct_netloc(netloc)
def _construct_netloc(self, netloc):
return '.'.join(netloc)
def _add_dualstack(self, netloc):
if self._s3_config.get('use_dualstack_endpoint'):
netloc.append('dualstack')
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, request, signing_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
request.context['signing'] = signing_context
def _add_headers_from_arn_details(self, request):
arn_details = request.context['arn_details']
outpost_name = arn_details.get('outpost_name')
if outpost_name:
self._add_outpost_id_header(request, outpost_name)
def _add_outpost_id_header(self, request, outpost_name):
request.headers['x-amz-outpost-id'] = outpost_name
class S3ControlArnParamHandler(object):
_RESOURCE_SPLIT_REGEX = re.compile(r'[/:]')
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register(
'before-parameter-build.s3-control',
self.handle_arn,
)
def handle_arn(self, params, model, context, **kwargs):
if model.name in ('CreateBucket', 'ListRegionalBuckets'):
# CreateBucket and ListRegionalBuckets are special cases that do
# not obey ARN based redirection but will redirect based off of the
# presence of the OutpostId parameter
self._handle_outpost_id_param(params, model, context)
else:
self._handle_name_param(params, model, context)
self._handle_bucket_param(params, model, context)
def _get_arn_details_from_param(self, params, param_name):
if param_name not in params:
return None
try:
arn = params[param_name]
arn_details = self._arn_parser.parse_arn(arn)
arn_details['original'] = arn
arn_details['resources'] = self._split_resource(arn_details)
return arn_details
except InvalidArnException:
return None
def _split_resource(self, arn_details):
return self._RESOURCE_SPLIT_REGEX.split(arn_details['resource'])
def _override_account_id_param(self, params, arn_details):
account_id = arn_details['account']
if 'AccountId' in params and params['AccountId'] != account_id:
error_msg = (
'Account ID in arn does not match the AccountId parameter '
'provided: "%s"'
) % params['AccountId']
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
params['AccountId'] = account_id
def _handle_outpost_id_param(self, params, model, context):
if 'OutpostId' not in params:
return
context['outpost_id'] = params['OutpostId']
def _handle_name_param(self, params, model, context):
# CreateAccessPoint is a special case that does not expand Name
if model.name == 'CreateAccessPoint':
return
arn_details = self._get_arn_details_from_param(params, 'Name')
if arn_details is None:
return
if self._is_outpost_accesspoint(arn_details):
self._store_outpost_accesspoint(params, context, arn_details)
else:
error_msg = 'The Name parameter does not support the provided ARN'
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_accesspoint(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/accesspoint/name
return resources[0] == 'outpost' and resources[2] == 'accesspoint'
def _store_outpost_accesspoint(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
accesspoint_name = arn_details['resources'][3]
params['Name'] = accesspoint_name
arn_details['accesspoint_name'] = accesspoint_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
def _handle_bucket_param(self, params, model, context):
arn_details = self._get_arn_details_from_param(params, 'Bucket')
if arn_details is None:
return
if self._is_outpost_bucket(arn_details):
self._store_outpost_bucket(params, context, arn_details)
else:
error_msg = (
'The Bucket parameter does not support the provided ARN'
)
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_bucket(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/bucket/name
return resources[0] == 'outpost' and resources[2] == 'bucket'
def _store_outpost_bucket(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
bucket_name = arn_details['resources'][3]
params['Bucket'] = bucket_name
arn_details['bucket_name'] = bucket_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = botocore.httpsession.URLLib3Session(
timeout=self.TIMEOUT_SECONDS
)
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
AWSRequest = botocore.awsrequest.AWSRequest
request = AWSRequest(method='GET', url=full_url, headers=headers)
response = self._session.send(request.prepare())
response_text = response.content.decode('utf-8')
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%s) from ECS metadata: %s"
) % (response.status_code, response_text))
try:
return json.loads(response_text)
except ValueError:
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, response_text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
def get_environ_proxies(url):
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
# NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't
# support current as urllib only checks DNS suffix
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
if proxy_bypass(urlparse(url).netloc):
return True
except (TypeError, socket.gaierror):
pass
return False
def get_encoding_from_headers(headers, default='ISO-8859-1'):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:param default: default encoding if the content-type is text
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return default
def calculate_md5(body, **kwargs):
if isinstance(body, (bytes, bytearray)):
binary_md5 = _calculate_md5_from_bytes(body)
else:
binary_md5 = _calculate_md5_from_file(body)
return base64.b64encode(binary_md5).decode('ascii')
def _calculate_md5_from_bytes(body_bytes):
md5 = get_md5(body_bytes)
return md5.digest()
def _calculate_md5_from_file(fileobj):
start_position = fileobj.tell()
md5 = get_md5()
for chunk in iter(lambda: fileobj.read(1024 * 1024), b''):
md5.update(chunk)
fileobj.seek(start_position)
return md5.digest()
def conditionally_calculate_md5(params, **kwargs):
"""Only add a Content-MD5 if the system supports it."""
headers = params['headers']
body = params['body']
if MD5_AVAILABLE and body is not None and 'Content-MD5' not in headers:
md5_digest = calculate_md5(body, **kwargs)
params['headers']['Content-MD5'] = md5_digest
class FileWebIdentityTokenLoader(object):
def __init__(self, web_identity_token_path, _open=open):
self._web_identity_token_path = web_identity_token_path
self._open = _open
def __call__(self):
with self._open(self._web_identity_token_path) as token_file:
return token_file.read()
class SSOTokenLoader(object):
def __init__(self, cache=None):
if cache is None:
cache = {}
self._cache = cache
def _generate_cache_key(self, start_url):
return hashlib.sha1(start_url.encode('utf-8')).hexdigest()
def __call__(self, start_url):
cache_key = self._generate_cache_key(start_url)
try:
token = self._cache[cache_key]
return token['accessToken']
except KeyError:
logger.debug('Failed to load SSO token:', exc_info=True)
error_msg = (
'The SSO access token has either expired or is otherwise '
'invalid.'
)
raise SSOTokenLoadError(error_msg=error_msg)
| 88,236 | Python | 37.414018 | 102 | 0.612437 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/serialize.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
import calendar
import datetime
from xml.etree import ElementTree
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore.utils import is_json_value_header
from botocore.utils import conditionally_calculate_md5
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'host_prefix'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'host_prefix': 'value.',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
if isinstance(value, datetime.datetime):
value = self._timestamp_unixtimestamp(value)
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value, timestamp_format=None):
if timestamp_format is None:
timestamp_format = self.TIMESTAMP_FORMAT
timestamp_format = timestamp_format.lower()
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % timestamp_format)
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
def _expand_host_prefix(self, parameters, operation_model):
operation_endpoint = operation_model.endpoint
if operation_endpoint is None:
return None
host_prefix_expression = operation_endpoint['hostPrefix']
input_members = operation_model.input_shape.members
host_labels = [
member for member, shape in input_members.items()
if shape.serialization.get('hostLabel')
]
format_kwargs = dict((name, parameters[name]) for name in host_labels)
return host_prefix_expression.format(**format_kwargs)
def _prepare_additional_traits(self, request, operation_model):
"""Determine if additional traits are required for given model"""
if operation_model.http_checksum_required:
conditionally_calculate_md5(request)
return request
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601'
HEADER_TIMESTAMP_FORMAT = 'rfc822'
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
elif isinstance(param_value, bool):
partitioned['query_string_kwargs'][
key_name] = str(param_value).lower()
elif member.type_name == 'timestamp':
timestamp_format = member.serialization.get(
'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT)
partitioned['query_string_kwargs'][
key_name] = self._convert_timestamp_to_str(
param_value, timestamp_format
)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
timestamp_format = shape.serialization.get(
'timestampFormat', self.HEADER_TIMESTAMP_FORMAT)
return self._convert_timestamp_to_str(timestamp, timestamp_format)
elif is_json_value_header(shape):
# Serialize with no spaces after separators to save space in
# the header.
return self._get_base64(json.dumps(value, separators=(',', ':')))
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(
params, shape.serialization.get('timestampFormat'))
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
| 30,430 | Python | 42.164539 | 79 | 0.616891 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/model.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore.utils import CachedProperty, instance_cache, hyphenize_service_id
from botocore.compat import OrderedDict
from botocore.exceptions import MissingServiceIdError
from botocore.exceptions import UndefinedModelAttributeError
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class ServiceId(str):
def hyphenize(self):
return hyphenize_service_id(self)
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute',
'eventstream', 'event', 'eventheader', 'eventpayload',
'jsonvalue', 'timestampFormat', 'hostLabel']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum',
'idempotencyToken', 'error', 'exception',
'endpointdiscoveryid', 'retryable']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
* jsonvalue
* timestampFormat
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
* idempotencyToken
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
@property
def event_stream_name(self):
return None
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
@CachedProperty
def event_stream_name(self):
for member_name, member in self.members.items():
if member.serialization.get('eventstream'):
return member_name
return None
@CachedProperty
def error_code(self):
if not self.metadata.get('exception', False):
return None
error_metadata = self.metadata.get("error", {})
code = error_metadata.get("code")
if code:
return code
# Use the exception name if there is no explicit code modeled
return self.name
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class StringShape(Shape):
@CachedProperty
def enum(self):
return self.metadata.get('enum', [])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
self._instance_cache = {}
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def shape_for_error_code(self, error_code):
return self._error_code_cache.get(error_code, None)
@CachedProperty
def _error_code_cache(self):
error_code_cache = {}
for error_shape in self.error_shapes:
code = error_shape.error_code
error_code_cache[code] = error_shape
return error_code_cache
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
@CachedProperty
def shape_names(self):
return list(self._service_description.get('shapes', {}))
@CachedProperty
def error_shapes(self):
error_shapes = []
for shape_name in self.shape_names:
error_shape = self.shape_for(shape_name)
if error_shape.metadata.get('exception', False):
error_shapes.append(error_shape)
return error_shapes
@instance_cache
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self, operation_name)
@CachedProperty
def documentation(self):
return self._service_description.get('documentation', '')
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def service_id(self):
try:
return ServiceId(self._get_metadata_property('serviceId'))
except UndefinedModelAttributeError:
raise MissingServiceIdError(
service_name=self._service_name
)
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
@CachedProperty
def endpoint_discovery_operation(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if model.is_endpoint_discovery_operation:
return model
@CachedProperty
def endpoint_discovery_required(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if (model.endpoint_discovery is not None and
model.endpoint_discovery.get('required')):
return True
return False
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.service_name)
class OperationModel(object):
def __init__(self, operation_model, service_model, name=None):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
:type name: string
:param name: The operation name. This is the operation name exposed to
the users of this model. This can potentially be different from
the "wire_name", which is the operation name that *must* by
provided over the wire. For example, given::
"CreateCloudFrontOriginAccessIdentity":{
"name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
...
}
The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
but the ``self.wire_name`` would be
``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
value we must send in the corresponding HTTP request.
"""
self._operation_model = operation_model
self._service_model = service_model
self._api_name = name
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self._wire_name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def name(self):
if self._api_name is not None:
return self._api_name
else:
return self.wire_name
@property
def wire_name(self):
"""The wire name of the operation.
In many situations this is the same value as the
``name``, value, but in some services, the operation name
exposed to the user is different from the operaiton name
we send across the wire (e.g cloudfront).
Any serialization code should use ``wire_name``.
"""
return self._operation_model.get('name')
@property
def service_model(self):
return self._service_model
@CachedProperty
def documentation(self):
return self._operation_model.get('documentation', '')
@CachedProperty
def deprecated(self):
return self._operation_model.get('deprecated', False)
@CachedProperty
def endpoint_discovery(self):
# Explicit None default. An empty dictionary for this trait means it is
# enabled but not required to be used.
return self._operation_model.get('endpointdiscovery', None)
@CachedProperty
def is_endpoint_discovery_operation(self):
return self._operation_model.get('endpointoperation', False)
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def idempotent_members(self):
input_shape = self.input_shape
if not input_shape:
return []
return [name for (name, shape) in input_shape.members.items()
if 'idempotencyToken' in shape.metadata and
shape.metadata['idempotencyToken']]
@CachedProperty
def auth_type(self):
return self._operation_model.get('authtype')
@CachedProperty
def error_shapes(self):
shapes = self._operation_model.get("errors", [])
return list(self._service_model.resolve_shape_ref(s) for s in shapes)
@CachedProperty
def endpoint(self):
return self._operation_model.get('endpoint')
@CachedProperty
def http_checksum_required(self):
return self._operation_model.get('httpChecksumRequired', False)
@CachedProperty
def has_event_stream_input(self):
return self.get_event_stream_input() is not None
@CachedProperty
def has_event_stream_output(self):
return self.get_event_stream_output() is not None
def get_event_stream_input(self):
return self._get_event_stream(self.input_shape)
def get_event_stream_output(self):
return self._get_event_stream(self.output_shape)
def _get_event_stream(self, shape):
"""Returns the event stream member's shape if any or None otherwise."""
if shape is None:
return None
event_name = shape.event_stream_name
if event_name:
return shape.members[event_name]
return None
@CachedProperty
def has_streaming_input(self):
return self.get_streaming_input() is not None
@CachedProperty
def has_streaming_output(self):
return self.get_streaming_output() is not None
def get_streaming_input(self):
return self._get_streaming_body(self.input_shape)
def get_streaming_output(self):
return self._get_streaming_body(self.output_shape)
def _get_streaming_body(self, shape):
"""Returns the streaming member's shape if any; or None otherwise."""
if shape is None:
return None
payload = shape.serialization.get('payload')
if payload is not None:
payload_shape = shape.members[payload]
if payload_shape.type_name == 'blob':
return payload_shape
return None
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
'string': StringShape
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
result = shape_cls(shape_name, shape_model, self)
return result
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
:type dict_type: class
:param dict_type: The dictionary type to use, allowing you to opt-in
to using OrderedDict or another dict type. This can
be particularly useful for testing when order
matters, such as for documentation.
"""
def __init__(self, name=None):
self.members = OrderedDict()
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = OrderedDict()
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = OrderedDict()
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
for attr in Shape.METADATA_ATTRS:
if attr in model:
shape[attr] = model[attr]
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| 28,352 | Python | 33.367273 | 79 | 0.60553 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retryhandler.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.exceptions import (
ChecksumError, EndpointConnectionError, ReadTimeoutError,
ConnectionError, ConnectionClosedError,
)
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ConnectionClosedError, ReadTimeoutError,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_config = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return self._check_caught_exception(
attempt_number, caught_exception)
else:
raise ValueError("Both response and caught_exception are None.")
def _check_response(self, attempt_number, response):
pass
def _check_caught_exception(self, attempt_number, caught_exception):
pass
class MaxAttemptsDecorator(BaseChecker):
"""Allow retries up to a maximum number of attempts.
This will pass through calls to the decorated retry checker, provided
that the number of attempts does not exceed max_attempts. It will
also catch any retryable_exceptions passed in. Once max_attempts has
been exceeded, then False will be returned or the retryable_exceptions
that was previously being caught will be raised.
"""
def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._checker = checker
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
def __call__(self, attempt_number, response, caught_exception):
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
if attempt_number >= self._max_attempts:
# explicitly set MaxAttemptsReached
if response is not None and 'ResponseMetadata' in response[1]:
response[1]['ResponseMetadata']['MaxAttemptsReached'] = True
logger.debug("Reached the maximum number of retry "
"attempts: %s", attempt_number)
return False
else:
return should_retry
else:
return False
def _should_retry(self, attempt_number, response, caught_exception):
if self._retryable_exceptions and \
attempt_number < self._max_attempts:
try:
return self._checker(attempt_number, response, caught_exception)
except self._retryable_exceptions as e:
logger.debug("retry needed, retryable exception caught: %s",
e, exc_info=True)
return True
else:
# If we've exceeded the max attempts we just let the exception
# propogate if one has occurred.
return self._checker(attempt_number, response, caught_exception)
class HTTPStatusCodeChecker(BaseChecker):
def __init__(self, status_code):
self._status_code = status_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
logger.debug(
"retry needed: retryable HTTP status code received: %s",
self._status_code)
return True
else:
return False
class ServiceErrorCodeChecker(BaseChecker):
def __init__(self, status_code, error_code):
self._status_code = status_code
self._error_code = error_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
actual_error_code = response[1].get('Error', {}).get('Code')
if actual_error_code == self._error_code:
logger.debug(
"retry needed: matching HTTP status and error code seen: "
"%s, %s", self._status_code, self._error_code)
return True
return False
class MultiChecker(BaseChecker):
def __init__(self, checkers):
self._checkers = checkers
def __call__(self, attempt_number, response, caught_exception):
for checker in self._checkers:
checker_response = checker(attempt_number, response,
caught_exception)
if checker_response:
return checker_response
return False
class CRC32Checker(BaseChecker):
def __init__(self, header):
# The header where the expected crc32 is located.
self._header_name = header
def _check_response(self, attempt_number, response):
http_response = response[0]
expected_crc = http_response.headers.get(self._header_name)
if expected_crc is None:
logger.debug("crc32 check skipped, the %s header is not "
"in the http response.", self._header_name)
else:
actual_crc32 = crc32(response[0].content) & 0xffffffff
if not actual_crc32 == int(expected_crc):
logger.debug(
"retry needed: crc32 check failed, expected != actual: "
"%s != %s", int(expected_crc), actual_crc32)
raise ChecksumError(checksum_type='crc32',
expected_checksum=int(expected_crc),
actual_checksum=actual_crc32)
class ExceptionRaiser(BaseChecker):
"""Raise any caught exceptions.
This class will raise any non None ``caught_exception``.
"""
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
raise caught_exception
| 13,781 | Python | 37.283333 | 80 | 0.642261 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/credentials.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
import botocore.configloader
import botocore.compat
from botocore import UNSIGNED
from botocore.compat import total_seconds
from botocore.compat import compat_shell_split
from botocore.config import Config
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import InfiniteLoopConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.exceptions import MetadataRetrievalError
from botocore.exceptions import CredentialRetrievalError
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from botocore.utils import ContainerMetadataFetcher
from botocore.utils import FileWebIdentityTokenLoader
from botocore.utils import SSOTokenLoader
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
class ProfileProviderBuilder(object):
"""This class handles the creation of profile based providers.
NOTE: This class is only intended for internal use.
This class handles the creation and ordering of the various credential
providers that primarly source their configuration from the shared config.
This is needed to enable sharing between the default credential chain and
the source profile chain created by the assume role provider.
"""
def __init__(self, session, cache=None, region_name=None,
sso_token_cache=None):
self._session = session
self._cache = cache
self._region_name = region_name
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
return [
self._create_web_identity_provider(
profile_name, disable_env_vars,
),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_process_provider(self, profile_name):
return ProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return SharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return ConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def _get_client_creator(session, region_name):
def client_creator(service_name, **kwargs):
create_client_kwargs = {
'region_name': region_name
}
create_client_kwargs.update(**kwargs)
return session.create_client(service_name, **create_client_kwargs)
return client_creator
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR, dumps_func=None):
self._working_dir = working_dir
if dumps_func is None:
dumps_func = self._default_dumps
self._dumps = dumps_func
def _default_dumps(self, obj):
return json.dumps(obj, default=_serialize_if_needed)
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = self._dumps(value)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh needed, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
if not data:
missing_keys = expected_keys
else:
missing_keys = [k for k in expected_keys if k not in data]
if missing_keys:
message = "Credential refresh failed, response did not contain: %s"
raise CredentialRetrievalError(
provider=self.method,
error_msg=message % ', '.join(missing_keys),
)
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, cache=None, expiry_window_seconds=None):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
if expiry_window_seconds is None:
expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, role_arn, extra_args=None,
cache=None, expiry_window_seconds=None):
self._client_creator = client_creator
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._generate_assume_role_name()
super(BaseAssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _generate_assume_role_name(self):
self._role_session_name = 'botocore-session-%s' % (int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._source_credentials = source_credentials
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs['TokenCode'] = token_code
duration_seconds = assume_role_kwargs.get('DurationSeconds')
if duration_seconds is not None:
assume_role_kwargs['DurationSeconds'] = duration_seconds
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AssumeRoleWithWebIdentityCredentialFetcher(
BaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type web_identity_token_loader: callable
:param web_identity_token_loader: A callable that takes no arguments
and returns a web identity token str.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._web_identity_token_loader = web_identity_token_loader
super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = Config(signature_version=UNSIGNED)
client = self._client_creator('sts', config=config)
return client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class CredentialProvider(object):
# A short name to identify the provider within botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
access_key = self.environ.get(self._mapping['access_key'], '')
if access_key:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'], '')
if not access_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'], '')
if not secret_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
credentials['token'] = None
for token_env_var in mapping['token']:
token = environ.get(token_env_var, '')
if token:
credentials['token'] = token
break
credentials['expiry_time'] = None
expiry_time = environ.get(mapping['expiry_time'], '')
if expiry_time:
credentials['expiry_time'] = expiry_time
if require_expiry and not expiry_time:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
CANONICAL_NAME = 'SharedConfig'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None,
profile_provider_builder=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._profile_provider_builder = profile_provider_builder
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return (
self.ROLE_CONFIG_VAR in profile and
# We need to ensure this provider doesn't look at a profile when
# the profile has configuration for web identity. Simply relying on
# the order in the credential chain is insufficient as it doesn't
# prevent the case when we're doing an assume role chain.
self.WEB_IDENTITY_TOKE_FILE_VAR not in profile
)
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
duration_seconds = profile.get('duration_seconds')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
if duration_seconds is not None:
try:
role_config['duration_seconds'] = int(duration_seconds)
except ValueError:
pass
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
# This is only here for backwards compatibility. If this provider
# isn't given a profile provider builder we still want to be able
# handle the basic static credential case as we would before the
# provile provider builder parameter was added.
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = CredentialResolver(profile_providers)
credentials = profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AssumeRoleWithWebIdentityProvider(CredentialProvider):
METHOD = 'assume-role-with-web-identity'
CANONICAL_NAME = None
_CONFIG_TO_ENV_VAR = {
'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE',
'role_session_name': 'AWS_ROLE_SESSION_NAME',
'role_arn': 'AWS_ROLE_ARN',
}
def __init__(
self,
load_config,
client_creator,
profile_name,
cache=None,
disable_env_vars=False,
token_loader_cls=None,
):
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
self._profile_config = None
self._disable_env_vars = disable_env_vars
if token_loader_cls is None:
token_loader_cls = FileWebIdentityTokenLoader
self._token_loader_cls = token_loader_cls
def load(self):
return self._assume_role_with_web_identity()
def _get_profile_config(self, key):
if self._profile_config is None:
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
self._profile_config = profiles.get(self._profile_name, {})
return self._profile_config.get(key)
def _get_env_config(self, key):
if self._disable_env_vars:
return None
env_key = self._CONFIG_TO_ENV_VAR.get(key)
if env_key and env_key in os.environ:
return os.environ[env_key]
return None
def _get_config(self, key):
env_value = self._get_env_config(key)
if env_value is not None:
return env_value
return self._get_profile_config(key)
def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class SSOCredentialFetcher(CachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(SSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
# NOTE: It would be good to hoist this cache key construction logic
# into the CachedCredentialFetcher class as we should be consistent.
# Unfortunately, the current assume role fetchers that sub class don't
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
client = self._client_creator('sso', config=config)
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class SSOProvider(CredentialProvider):
METHOD = 'sso'
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(
os.path.join('~', '.aws', 'sso', 'cache')
)
_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
'sso_role_name',
'sso_account_id',
]
def __init__(self, load_config, client_creator, profile_name,
cache=None, token_cache=None):
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ', '.join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
'required configuration: %s' % (profile_name, missing)
)
)
return config
def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
| 81,776 | Python | 37.356942 | 79 | 0.610424 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/loaders.py | # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for loading various model files.
This module provides the classes that are used to load models used
by botocore. This can include:
* Service models (e.g. the model for EC2, S3, DynamoDB, etc.)
* Service model extras which customize the service models
* Other models associated with a service (pagination, waiters)
* Non service-specific config (Endpoint data, retry config)
Loading a module is broken down into several steps:
* Determining the path to load
* Search the data_path for files to load
* The mechanics of loading the file
* Searching for extras and applying them to the loaded file
The last item is used so that other faster loading mechanism
besides the default JSON loader can be used.
The Search Path
===============
Similar to how the PATH environment variable is to finding executables
and the PYTHONPATH environment variable is to finding python modules
to import, the botocore loaders have the concept of a data path exposed
through AWS_DATA_PATH.
This enables end users to provide additional search paths where we
will attempt to load models outside of the models we ship with
botocore. When you create a ``Loader``, there are two paths
automatically added to the model search path:
* <botocore root>/data/
* ~/.aws/models
The first value is the path where all the model files shipped with
botocore are located.
The second path is so that users can just drop new model files in
``~/.aws/models`` without having to mess around with the AWS_DATA_PATH.
The AWS_DATA_PATH using the platform specific path separator to
separate entries (typically ``:`` on linux and ``;`` on windows).
Directory Layout
================
The Loader expects a particular directory layout. In order for any
directory specified in AWS_DATA_PATH to be considered, it must have
this structure for service models::
<root>
|
|-- servicename1
| |-- 2012-10-25
| |-- service-2.json
|-- ec2
| |-- 2014-01-01
| | |-- paginators-1.json
| | |-- service-2.json
| | |-- waiters-2.json
| |-- 2015-03-01
| |-- paginators-1.json
| |-- service-2.json
| |-- waiters-2.json
| |-- service-2.sdk-extras.json
That is:
* The root directory contains sub directories that are the name
of the services.
* Within each service directory, there's a sub directory for each
available API version.
* Within each API version, there are model specific files, including
(but not limited to): service-2.json, waiters-2.json, paginators-1.json
The ``-1`` and ``-2`` suffix at the end of the model files denote which version
schema is used within the model. Even though this information is available in
the ``version`` key within the model, this version is also part of the filename
so that code does not need to load the JSON model in order to determine which
version to use.
The ``sdk-extras`` and similar files represent extra data that needs to be
applied to the model after it is loaded. Data in these files might represent
information that doesn't quite fit in the original models, but is still needed
for the sdk. For instance, additional operation parameters might be added here
which don't represent the actual service api.
"""
import os
import logging
from botocore import BOTOCORE_ROOT
from botocore.compat import json
from botocore.compat import OrderedDict
from botocore.exceptions import DataNotFoundError, UnknownServiceError
from botocore.utils import deep_merge
logger = logging.getLogger(__name__)
def instance_cache(func):
"""Cache the result of a method on a per instance basis.
This is not a general purpose caching decorator. In order
for this to be used, it must be used on methods on an
instance, and that instance *must* provide a
``self._cache`` dictionary.
"""
def _wrapper(self, *args, **kwargs):
key = (func.__name__,) + args
for pair in sorted(kwargs.items()):
key += pair
if key in self._cache:
return self._cache[key]
data = func(self, *args, **kwargs)
self._cache[key] = data
return data
return _wrapper
class JSONFileLoader(object):
"""Loader JSON files.
This class can load the default format of models, which is a JSON file.
"""
def exists(self, file_path):
"""Checks if the file exists.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: True if file path exists, False otherwise.
"""
return os.path.isfile(file_path + '.json')
def load_file(self, file_path):
"""Attempt to load the file path.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: The loaded data if it exists, otherwise None.
"""
full_path = file_path + '.json'
if not os.path.isfile(full_path):
return
# By default the file will be opened with locale encoding on Python 3.
# We specify "utf8" here to ensure the correct behavior.
with open(full_path, 'rb') as fp:
payload = fp.read().decode('utf-8')
logger.debug("Loading JSON file: %s", full_path)
return json.loads(payload, object_pairs_hook=OrderedDict)
def create_loader(search_path_string=None):
"""Create a Loader class.
This factory function creates a loader given a search string path.
:type search_string_path: str
:param search_string_path: The AWS_DATA_PATH value. A string
of data path values separated by the ``os.path.pathsep`` value,
which is typically ``:`` on POSIX platforms and ``;`` on
windows.
:return: A ``Loader`` instance.
"""
if search_path_string is None:
return Loader()
paths = []
extra_paths = search_path_string.split(os.pathsep)
for path in extra_paths:
path = os.path.expanduser(os.path.expandvars(path))
paths.append(path)
return Loader(extra_search_paths=paths)
class Loader(object):
"""Find and load data models.
This class will handle searching for and loading data models.
The main method used here is ``load_service_model``, which is a
convenience method over ``load_data`` and ``determine_latest_version``.
"""
FILE_LOADER_CLASS = JSONFileLoader
# The included models in botocore/data/ that we ship with botocore.
BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data')
# For convenience we automatically add ~/.aws/models to the data path.
CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'),
'.aws', 'models')
BUILTIN_EXTRAS_TYPES = ['sdk']
def __init__(self, extra_search_paths=None, file_loader=None,
cache=None, include_default_search_paths=True,
include_default_extras=True):
self._cache = {}
if file_loader is None:
file_loader = self.FILE_LOADER_CLASS()
self.file_loader = file_loader
if extra_search_paths is not None:
self._search_paths = extra_search_paths
else:
self._search_paths = []
if include_default_search_paths:
self._search_paths.extend([self.CUSTOMER_DATA_PATH,
self.BUILTIN_DATA_PATH])
self._extras_types = []
if include_default_extras:
self._extras_types.extend(self.BUILTIN_EXTRAS_TYPES)
self._extras_processor = ExtrasProcessor()
@property
def search_paths(self):
return self._search_paths
@property
def extras_types(self):
return self._extras_types
@instance_cache
def list_available_services(self, type_name):
"""List all known services.
This will traverse the search path and look for all known
services.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the list of available services depends on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:return: A list of all services. The list of services will
be sorted.
"""
services = set()
for possible_path in self._potential_locations():
# Any directory in the search path is potentially a service.
# We'll collect any initial list of potential services,
# but we'll then need to further process these directories
# by searching for the corresponding type_name in each
# potential directory.
possible_services = [
d for d in os.listdir(possible_path)
if os.path.isdir(os.path.join(possible_path, d))]
for service_name in possible_services:
full_dirname = os.path.join(possible_path, service_name)
api_versions = os.listdir(full_dirname)
for api_version in api_versions:
full_load_path = os.path.join(full_dirname,
api_version,
type_name)
if self.file_loader.exists(full_load_path):
services.add(service_name)
break
return sorted(services)
@instance_cache
def determine_latest_version(self, service_name, type_name):
"""Find the latest API version available for a service.
:type service_name: str
:param service_name: The name of the service.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the latest API version available can depend on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:rtype: str
:return: The latest API version. If the service does not exist
or does not have any available API data, then a
``DataNotFoundError`` exception will be raised.
"""
return max(self.list_api_versions(service_name, type_name))
@instance_cache
def list_api_versions(self, service_name, type_name):
"""List all API versions available for a particular service type
:type service_name: str
:param service_name: The name of the service
:type type_name: str
:param type_name: The type name for the service (i.e service-2,
paginators-1, etc.)
:rtype: list
:return: A list of API version strings in sorted order.
"""
known_api_versions = set()
for possible_path in self._potential_locations(service_name,
must_exist=True,
is_dir=True):
for dirname in os.listdir(possible_path):
full_path = os.path.join(possible_path, dirname, type_name)
# Only add to the known_api_versions if the directory
# contains a service-2, paginators-1, etc. file corresponding
# to the type_name passed in.
if self.file_loader.exists(full_path):
known_api_versions.add(dirname)
if not known_api_versions:
raise DataNotFoundError(data_path=service_name)
return sorted(known_api_versions)
@instance_cache
def load_service_model(self, service_name, type_name, api_version=None):
"""Load a botocore service model
This is the main method for loading botocore models (e.g. a service
model, pagination configs, waiter configs, etc.).
:type service_name: str
:param service_name: The name of the service (e.g ``ec2``, ``s3``).
:type type_name: str
:param type_name: The model type. Valid types include, but are not
limited to: ``service-2``, ``paginators-1``, ``waiters-2``.
:type api_version: str
:param api_version: The API version to load. If this is not
provided, then the latest API version will be used.
:type load_extras: bool
:param load_extras: Whether or not to load the tool extras which
contain additional data to be added to the model.
:raises: UnknownServiceError if there is no known service with
the provided service_name.
:raises: DataNotFoundError if no data could be found for the
service_name/type_name/api_version.
:return: The loaded data, as a python type (e.g. dict, list, etc).
"""
# Wrapper around the load_data. This will calculate the path
# to call load_data with.
known_services = self.list_available_services(type_name)
if service_name not in known_services:
raise UnknownServiceError(
service_name=service_name,
known_service_names=', '.join(sorted(known_services)))
if api_version is None:
api_version = self.determine_latest_version(
service_name, type_name)
full_path = os.path.join(service_name, api_version, type_name)
model = self.load_data(full_path)
# Load in all the extras
extras_data = self._find_extras(service_name, type_name, api_version)
self._extras_processor.process(model, extras_data)
return model
def _find_extras(self, service_name, type_name, api_version):
"""Creates an iterator over all the extras data."""
for extras_type in self.extras_types:
extras_name = '%s.%s-extras' % (type_name, extras_type)
full_path = os.path.join(service_name, api_version, extras_name)
try:
yield self.load_data(full_path)
except DataNotFoundError:
pass
@instance_cache
def load_data(self, name):
"""Load data given a data path.
This is a low level method that will search through the various
search paths until it's able to load a value. This is typically
only needed to load *non* model files (such as _endpoints and
_retry). If you need to load model files, you should prefer
``load_service_model``.
:type name: str
:param name: The data path, i.e ``ec2/2015-03-01/service-2``.
:return: The loaded data. If no data could be found then
a DataNotFoundError is raised.
"""
for possible_path in self._potential_locations(name):
found = self.file_loader.load_file(possible_path)
if found is not None:
return found
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=name)
def _potential_locations(self, name=None, must_exist=False,
is_dir=False):
# Will give an iterator over the full path of potential locations
# according to the search path.
for path in self.search_paths:
if os.path.isdir(path):
full_path = path
if name is not None:
full_path = os.path.join(path, name)
if not must_exist:
yield full_path
else:
if is_dir and os.path.isdir(full_path):
yield full_path
elif os.path.exists(full_path):
yield full_path
class ExtrasProcessor(object):
"""Processes data from extras files into service models."""
def process(self, original_model, extra_models):
"""Processes data from a list of loaded extras files into a model
:type original_model: dict
:param original_model: The service model to load all the extras into.
:type extra_models: iterable of dict
:param extra_models: A list of loaded extras models.
"""
for extras in extra_models:
self._process(original_model, extras)
def _process(self, model, extra_model):
"""Process a single extras model into a service model."""
if 'merge' in extra_model:
deep_merge(model, extra_model['merge'])
| 17,355 | Python | 36.567099 | 79 | 0.623855 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/stub.py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from collections import deque
from pprint import pformat
from botocore.validate import validate_parameters
from botocore.exceptions import ParamValidationError, \
StubResponseError, StubAssertionError, UnStubbedResponseError
from botocore.awsrequest import AWSResponse
class _ANY(object):
"""
A helper object that compares equal to everything. Copied from
unittest.mock
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class Stubber(object):
"""
This class will allow you to stub out requests so you don't have to hit
an endpoint to write tests. Responses are returned first in, first out.
If operations are called out of order, or are called with no remaining
queued responses, an error will be raised.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': 'test-bucket'}
stubber.add_response('list_objects', response, expected_params)
stubber.activate()
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
This class can also be called as a context manager, which will handle
activation / deactivation for you.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
response = {
"Owner": {
"ID": "foo",
"DisplayName": "bar"
},
"Buckets": [{
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
"Name": "baz"
}]
}
with Stubber(s3) as stubber:
stubber.add_response('list_buckets', response, {})
service_response = s3.list_buckets()
assert service_response == response
If you have an input parameter that is a randomly generated value, or you
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
it in validation.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber, ANY
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': ANY}
stubber.add_response('list_objects', response, expected_params)
with stubber:
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
"""
def __init__(self, client):
"""
:param client: The client to add your stubs to.
"""
self.client = client
self._event_id = 'boto_stubber'
self._expected_params_event_id = 'boto_stubber_expected_params'
self._queue = deque()
def __enter__(self):
self.activate()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deactivate()
def activate(self):
"""
Activates the stubber on the client
"""
self.client.meta.events.register_first(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.register(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def deactivate(self):
"""
Deactivates the stubber on the client
"""
self.client.meta.events.unregister(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.unregister(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def add_response(self, method, service_response, expected_params=None):
"""
Adds a service response to the response queue. This will be validated
against the service model to ensure correctness. It should be noted,
however, that while missing attributes are often considered correct,
your code may not function properly if you leave them out. Therefore
you should always fill in every value you see in a typical response for
your particular request.
:param method: The name of the client method to stub.
:type method: str
:param service_response: A dict response stub. Provided parameters will
be validated against the service model.
:type service_response: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation. stub.ANY is only valid for top level params.
"""
self._add_response(method, service_response, expected_params)
def _add_response(self, method, service_response, expected_params):
if not hasattr(self.client, method):
raise ValueError(
"Client %s does not have method: %s"
% (self.client.meta.service_model.service_name, method))
# Create a successful http response
http_response = AWSResponse(None, 200, {}, None)
operation_name = self.client.meta.method_to_api_mapping.get(method)
self._validate_response(operation_name, service_response)
# Add the service_response to the queue for returning responses
response = {
'operation_name': operation_name,
'response': (http_response, service_response),
'expected_params': expected_params
}
self._queue.append(response)
def add_client_error(self, method, service_error_code='',
service_message='', http_status_code=400,
service_error_meta=None, expected_params=None,
response_meta=None):
"""
Adds a ``ClientError`` to the response queue.
:param method: The name of the service method to return the error on.
:type method: str
:param service_error_code: The service error code to return,
e.g. ``NoSuchBucket``
:type service_error_code: str
:param service_message: The service message to return, e.g.
'The specified bucket does not exist.'
:type service_message: str
:param http_status_code: The HTTP status code to return, e.g. 404, etc
:type http_status_code: int
:param service_error_meta: Additional keys to be added to the
service Error
:type service_error_meta: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation.
:param response_meta: Additional keys to be added to the
response's ResponseMetadata
:type response_meta: dict
"""
http_response = AWSResponse(None, http_status_code, {}, None)
# We don't look to the model to build this because the caller would
# need to know the details of what the HTTP body would need to
# look like.
parsed_response = {
'ResponseMetadata': {'HTTPStatusCode': http_status_code},
'Error': {
'Message': service_message,
'Code': service_error_code
}
}
if service_error_meta is not None:
parsed_response['Error'].update(service_error_meta)
if response_meta is not None:
parsed_response['ResponseMetadata'].update(response_meta)
operation_name = self.client.meta.method_to_api_mapping.get(method)
# Note that we do not allow for expected_params while
# adding errors into the queue yet.
response = {
'operation_name': operation_name,
'response': (http_response, parsed_response),
'expected_params': expected_params,
}
self._queue.append(response)
def assert_no_pending_responses(self):
"""
Asserts that all expected calls were made.
"""
remaining = len(self._queue)
if remaining != 0:
raise AssertionError(
"%d responses remaining in queue." % remaining)
def _assert_expected_call_order(self, model, params):
if not self._queue:
raise UnStubbedResponseError(
operation_name=model.name,
reason=(
'Unexpected API Call: A call was made but no additional calls expected. '
'Either the API Call was not stubbed or it was called multiple times.'
)
)
name = self._queue[0]['operation_name']
if name != model.name:
raise StubResponseError(
operation_name=model.name,
reason='Operation mismatch: found response for %s.' % name)
def _get_response_handler(self, model, params, context, **kwargs):
self._assert_expected_call_order(model, params)
# Pop off the entire response once everything has been validated
return self._queue.popleft()['response']
def _assert_expected_params(self, model, params, context, **kwargs):
if self._should_not_stub(context):
return
self._assert_expected_call_order(model, params)
expected_params = self._queue[0]['expected_params']
if expected_params is None:
return
# Validate the parameters are equal
for param, value in expected_params.items():
if param not in params or expected_params[param] != params[param]:
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
# Ensure there are no extra params hanging around
if sorted(expected_params.keys()) != sorted(params.keys()):
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
def _should_not_stub(self, context):
# Do not include presign requests when processing stubbed client calls
# as a presign request will never have an HTTP request sent over the
# wire for it and therefore not receive a response back.
if context and context.get('is_presign_request'):
return True
def _validate_response(self, operation_name, service_response):
service_model = self.client.meta.service_model
operation_model = service_model.operation_model(operation_name)
output_shape = operation_model.output_shape
# Remove ResponseMetadata so that the validator doesn't attempt to
# perform validation on it.
response = service_response
if 'ResponseMetadata' in response:
response = copy.copy(service_response)
del response['ResponseMetadata']
if output_shape is not None:
validate_parameters(response, output_shape)
elif response:
# If the output shape is None, that means the response should be
# empty apart from ResponseMetadata
raise ParamValidationError(
report=(
"Service response should only contain ResponseMetadata."))
| 14,361 | Python | 35.359494 | 97 | 0.590906 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/paginate.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from itertools import tee
from botocore.compat import six
import jmespath
import json
import base64
import logging
from botocore.exceptions import PaginationError
from botocore.compat import zip
from botocore.utils import set_value_from_jmespath, merge_dicts
log = logging.getLogger(__name__)
class TokenEncoder(object):
"""Encodes dictionaries into opaque strings.
This for the most part json dumps + base64 encoding, but also supports
having bytes in the dictionary in addition to the types that json can
handle by default.
This is intended for use in encoding pagination tokens, which in some
cases can be complex structures and / or contain bytes.
"""
def encode(self, token):
"""Encodes a dictionary to an opaque string.
:type token: dict
:param token: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
:rtype: str
:returns: An opaque string
"""
try:
# Try just using json dumps first to avoid having to traverse
# and encode the dict. In 99.9999% of cases this will work.
json_string = json.dumps(token)
except (TypeError, UnicodeDecodeError):
# If normal dumping failed, go through and base64 encode all bytes.
encoded_token, encoded_keys = self._encode(token, [])
# Save the list of all the encoded key paths. We can safely
# assume that no service will ever use this key.
encoded_token['boto_encoded_keys'] = encoded_keys
# Now that the bytes are all encoded, dump the json.
json_string = json.dumps(encoded_token)
# base64 encode the json string to produce an opaque token string.
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
def _encode(self, data, path):
"""Encode bytes in given data, keeping track of the path traversed."""
if isinstance(data, dict):
return self._encode_dict(data, path)
elif isinstance(data, list):
return self._encode_list(data, path)
elif isinstance(data, six.binary_type):
return self._encode_bytes(data, path)
else:
return data, []
def _encode_list(self, data, path):
"""Encode any bytes in a list, noting the index of what is encoded."""
new_data = []
encoded = []
for i, value in enumerate(data):
new_path = path + [i]
new_value, new_encoded = self._encode(value, new_path)
new_data.append(new_value)
encoded.extend(new_encoded)
return new_data, encoded
def _encode_dict(self, data, path):
"""Encode any bytes in a dict, noting the index of what is encoded."""
new_data = {}
encoded = []
for key, value in data.items():
new_path = path + [key]
new_value, new_encoded = self._encode(value, new_path)
new_data[key] = new_value
encoded.extend(new_encoded)
return new_data, encoded
def _encode_bytes(self, data, path):
"""Base64 encode a byte string."""
return base64.b64encode(data).decode('utf-8'), [path]
class TokenDecoder(object):
"""Decodes token strings back into dictionaries.
This performs the inverse operation to the TokenEncoder, accepting
opaque strings and decoding them into a useable form.
"""
def decode(self, token):
"""Decodes an opaque string to a dictionary.
:type token: str
:param token: A token string given by the botocore pagination
interface.
:rtype: dict
:returns: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
"""
json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
decoded_token = json.loads(json_string)
# Remove the encoding metadata as it is read since it will no longer
# be needed.
encoded_keys = decoded_token.pop('boto_encoded_keys', None)
if encoded_keys is None:
return decoded_token
else:
return self._decode(decoded_token, encoded_keys)
def _decode(self, token, encoded_keys):
"""Find each encoded value and decode it."""
for key in encoded_keys:
encoded = self._path_get(token, key)
decoded = base64.b64decode(encoded.encode('utf-8'))
self._path_set(token, key, decoded)
return token
def _path_get(self, data, path):
"""Return the nested data at the given path.
For instance:
data = {'foo': ['bar', 'baz']}
path = ['foo', 0]
==> 'bar'
"""
# jmespath isn't used here because it would be difficult to actually
# create the jmespath query when taking all of the unknowns of key
# structure into account. Gross though this is, it is simple and not
# very error prone.
d = data
for step in path:
d = d[step]
return d
def _path_set(self, data, path, value):
"""Set the value of a key in the given data.
Example:
data = {'foo': ['bar', 'baz']}
path = ['foo', 1]
value = 'bin'
==> data = {'foo': ['bar', 'bin']}
"""
container = self._path_get(data, path[:-1])
container[path[-1]] = value
class PaginatorModel(object):
def __init__(self, paginator_config):
self._paginator_config = paginator_config['pagination']
def get_paginator(self, operation_name):
try:
single_paginator_config = self._paginator_config[operation_name]
except KeyError:
raise ValueError("Paginator for operation does not exist: %s"
% operation_name)
return single_paginator_config
class PageIterator(object):
def __init__(self, method, input_token, output_token, more_results,
result_keys, non_aggregate_keys, limit_key, max_items,
starting_token, page_size, op_kwargs):
self._method = method
self._input_token = input_token
self._output_token = output_token
self._more_results = more_results
self._result_keys = result_keys
self._max_items = max_items
self._limit_key = limit_key
self._starting_token = starting_token
self._page_size = page_size
self._op_kwargs = op_kwargs
self._resume_token = None
self._non_aggregate_key_exprs = non_aggregate_keys
self._non_aggregate_part = {}
self._token_encoder = TokenEncoder()
self._token_decoder = TokenDecoder()
@property
def result_keys(self):
return self._result_keys
@property
def resume_token(self):
"""Token to specify to resume pagination."""
return self._resume_token
@resume_token.setter
def resume_token(self, value):
if not isinstance(value, dict):
raise ValueError("Bad starting token: %s" % value)
if 'boto_truncate_amount' in value:
token_keys = sorted(self._input_token + ['boto_truncate_amount'])
else:
token_keys = sorted(self._input_token)
dict_keys = sorted(value.keys())
if token_keys == dict_keys:
self._resume_token = self._token_encoder.encode(value)
else:
raise ValueError("Bad starting token: %s" % value)
@property
def non_aggregate_part(self):
return self._non_aggregate_part
def __iter__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def search(self, expression):
"""Applies a JMESPath expression to a paginator
Each page of results is searched using the provided JMESPath
expression. If the result is not a list, it is yielded
directly. If the result is a list, each element in the result
is yielded individually (essentially implementing a flatmap in
which the JMESPath search is the mapping function).
:type expression: str
:param expression: JMESPath expression to apply to each page.
:return: Returns an iterator that yields the individual
elements of applying a JMESPath expression to each page of
results.
"""
compiled = jmespath.compile(expression)
for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
# Yield result directly if it is not a list.
yield results
def _make_request(self, current_kwargs):
return self._method(**current_kwargs)
def _extract_parsed_response(self, response):
return response
def _record_non_aggregate_key_values(self, response):
non_aggregate_keys = {}
for expression in self._non_aggregate_key_exprs:
result = expression.search(response)
set_value_from_jmespath(non_aggregate_keys,
expression.expression,
result)
self._non_aggregate_part = non_aggregate_keys
def _inject_starting_params(self, op_kwargs):
# If the user has specified a starting token we need to
# inject that into the operation's kwargs.
if self._starting_token is not None:
# Don't need to do anything special if there is no starting
# token specified.
next_token = self._parse_starting_token()[0]
self._inject_token_into_kwargs(op_kwargs, next_token)
if self._page_size is not None:
# Pass the page size as the parameter name for limiting
# page size, also known as the limit_key.
op_kwargs[self._limit_key] = self._page_size
def _inject_token_into_kwargs(self, op_kwargs, next_token):
for name, token in next_token.items():
if (token is not None) and (token != 'None'):
op_kwargs[name] = token
elif name in op_kwargs:
del op_kwargs[name]
def _handle_first_request(self, parsed, primary_result_key,
starting_truncation):
# If the payload is an array or string, we need to slice into it
# and only return the truncated amount.
starting_truncation = self._parse_starting_token()[1]
all_data = primary_result_key.search(parsed)
if isinstance(all_data, (list, six.string_types)):
data = all_data[starting_truncation:]
else:
data = None
set_value_from_jmespath(
parsed,
primary_result_key.expression,
data
)
# We also need to truncate any secondary result keys
# because they were not truncated in the previous last
# response.
for token in self.result_keys:
if token == primary_result_key:
continue
sample = token.search(parsed)
if isinstance(sample, list):
empty_value = []
elif isinstance(sample, six.string_types):
empty_value = ''
elif isinstance(sample, (int, float)):
empty_value = 0
else:
empty_value = None
set_value_from_jmespath(parsed, token.expression, empty_value)
return starting_truncation
def _truncate_response(self, parsed, primary_result_key, truncate_amount,
starting_truncation, next_token):
original = primary_result_key.search(parsed)
if original is None:
original = []
amount_to_keep = len(original) - truncate_amount
truncated = original[:amount_to_keep]
set_value_from_jmespath(
parsed,
primary_result_key.expression,
truncated
)
# The issue here is that even though we know how much we've truncated
# we need to account for this globally including any starting
# left truncation. For example:
# Raw response: [0,1,2,3]
# Starting index: 1
# Max items: 1
# Starting left truncation: [1, 2, 3]
# End right truncation for max items: [1]
# However, even though we only kept 1, this is post
# left truncation so the next starting index should be 2, not 1
# (left_truncation + amount_to_keep).
next_token['boto_truncate_amount'] = \
amount_to_keep + starting_truncation
self.resume_token = next_token
def _get_next_token(self, parsed):
if self._more_results is not None:
if not self._more_results.search(parsed):
return {}
next_tokens = {}
for output_token, input_key in \
zip(self._output_token, self._input_token):
next_token = output_token.search(parsed)
# We do not want to include any empty strings as actual tokens.
# Treat them as None.
if next_token:
next_tokens[input_key] = next_token
else:
next_tokens[input_key] = None
return next_tokens
def result_key_iters(self):
teed_results = tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
def build_full_result(self):
complete_result = {}
for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
def _parse_starting_token(self):
if self._starting_token is None:
return None
# The starting token is a dict passed as a base64 encoded string.
next_token = self._starting_token
try:
next_token = self._token_decoder.decode(next_token)
index = 0
if 'boto_truncate_amount' in next_token:
index = next_token.get('boto_truncate_amount')
del next_token['boto_truncate_amount']
except (ValueError, TypeError):
next_token, index = self._parse_starting_token_deprecated()
return next_token, index
def _parse_starting_token_deprecated(self):
"""
This handles parsing of old style starting tokens, and attempts to
coerce them into the new style.
"""
log.debug("Attempting to fall back to old starting token parser. For "
"token: %s" % self._starting_token)
if self._starting_token is None:
return None
parts = self._starting_token.split('___')
next_token = []
index = 0
if len(parts) == len(self._input_token) + 1:
try:
index = int(parts.pop())
except ValueError:
# This doesn't look like a valid old-style token, so we're
# passing it along as an opaque service token.
parts = [self._starting_token]
for part in parts:
if part == 'None':
next_token.append(None)
else:
next_token.append(part)
return self._convert_deprecated_starting_token(next_token), index
def _convert_deprecated_starting_token(self, deprecated_token):
"""
This attempts to convert a deprecated starting token into the new
style.
"""
len_deprecated_token = len(deprecated_token)
len_input_token = len(self._input_token)
if len_deprecated_token > len_input_token:
raise ValueError("Bad starting token: %s" % self._starting_token)
elif len_deprecated_token < len_input_token:
log.debug("Old format starting token does not contain all input "
"tokens. Setting the rest, in order, as None.")
for i in range(len_input_token - len_deprecated_token):
deprecated_token.append(None)
return dict(zip(self._input_token, deprecated_token))
class Paginator(object):
PAGE_ITERATOR_CLS = PageIterator
def __init__(self, method, pagination_config, model):
self._model = model
self._method = method
self._pagination_cfg = pagination_config
self._output_token = self._get_output_tokens(self._pagination_cfg)
self._input_token = self._get_input_tokens(self._pagination_cfg)
self._more_results = self._get_more_results_token(self._pagination_cfg)
self._non_aggregate_keys = self._get_non_aggregate_keys(
self._pagination_cfg)
self._result_keys = self._get_result_keys(self._pagination_cfg)
self._limit_key = self._get_limit_key(self._pagination_cfg)
@property
def result_keys(self):
return self._result_keys
def _get_non_aggregate_keys(self, config):
keys = []
for key in config.get('non_aggregate_keys', []):
keys.append(jmespath.compile(key))
return keys
def _get_output_tokens(self, config):
output = []
output_token = config['output_token']
if not isinstance(output_token, list):
output_token = [output_token]
for config in output_token:
output.append(jmespath.compile(config))
return output
def _get_input_tokens(self, config):
input_token = self._pagination_cfg['input_token']
if not isinstance(input_token, list):
input_token = [input_token]
return input_token
def _get_more_results_token(self, config):
more_results = config.get('more_results')
if more_results is not None:
return jmespath.compile(more_results)
def _get_result_keys(self, config):
result_key = config.get('result_key')
if result_key is not None:
if not isinstance(result_key, list):
result_key = [result_key]
result_key = [jmespath.compile(rk) for rk in result_key]
return result_key
def _get_limit_key(self, config):
return config.get('limit_key')
def paginate(self, **kwargs):
"""Create paginator object for an operation.
This returns an iterable object. Iterating over
this object will yield a single page of a response
at a time.
"""
page_params = self._extract_paging_params(kwargs)
return self.PAGE_ITERATOR_CLS(
self._method, self._input_token,
self._output_token, self._more_results,
self._result_keys, self._non_aggregate_keys,
self._limit_key,
page_params['MaxItems'],
page_params['StartingToken'],
page_params['PageSize'],
kwargs)
def _extract_paging_params(self, kwargs):
pagination_config = kwargs.pop('PaginationConfig', {})
max_items = pagination_config.get('MaxItems', None)
if max_items is not None:
max_items = int(max_items)
page_size = pagination_config.get('PageSize', None)
if page_size is not None:
if self._limit_key is None:
raise PaginationError(
message="PageSize parameter is not supported for the "
"pagination interface for this operation.")
input_members = self._model.input_shape.members
limit_key_shape = input_members.get(self._limit_key)
if limit_key_shape.type_name == 'string':
if not isinstance(page_size, six.string_types):
page_size = str(page_size)
else:
page_size = int(page_size)
return {
'MaxItems': max_items,
'StartingToken': pagination_config.get('StartingToken', None),
'PageSize': page_size,
}
class ResultKeyIterator(object):
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __iter__(self):
for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
| 27,128 | Python | 39.013274 | 79 | 0.585373 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/eventstream.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Binary Event Stream Decoding """
from binascii import crc32
from struct import unpack
from botocore.exceptions import EventStreamError
# byte length of the prelude (total_length + header_length + prelude_crc)
_PRELUDE_LENGTH = 12
_MAX_HEADERS_LENGTH = 128 * 1024 # 128 Kb
_MAX_PAYLOAD_LENGTH = 16 * 1024 ** 2 # 16 Mb
class ParserError(Exception):
"""Base binary flow encoding parsing exception. """
pass
class DuplicateHeader(ParserError):
"""Duplicate header found in the event. """
def __init__(self, header):
message = 'Duplicate header present: "%s"' % header
super(DuplicateHeader, self).__init__(message)
class InvalidHeadersLength(ParserError):
"""Headers length is longer than the maximum. """
def __init__(self, length):
message = 'Header length of %s exceeded the maximum of %s' % (
length, _MAX_HEADERS_LENGTH
)
super(InvalidHeadersLength, self).__init__(message)
class InvalidPayloadLength(ParserError):
"""Payload length is longer than the maximum. """
def __init__(self, length):
message = 'Payload length of %s exceeded the maximum of %s' % (
length, _MAX_PAYLOAD_LENGTH
)
super(InvalidPayloadLength, self).__init__(message)
class ChecksumMismatch(ParserError):
"""Calculated checksum did not match the expected checksum. """
def __init__(self, expected, calculated):
message = 'Checksum mismatch: expected 0x%08x, calculated 0x%08x' % (
expected, calculated
)
super(ChecksumMismatch, self).__init__(message)
class NoInitialResponseError(ParserError):
"""An event of type initial-response was not received.
This exception is raised when the event stream produced no events or
the first event in the stream was not of the initial-response type.
"""
def __init__(self):
message = 'First event was not of the initial-response type'
super(NoInitialResponseError, self).__init__(message)
class DecodeUtils(object):
"""Unpacking utility functions used in the decoder.
All methods on this class take raw bytes and return a tuple containing
the value parsed from the bytes and the number of bytes consumed to parse
that value.
"""
UINT8_BYTE_FORMAT = '!B'
UINT16_BYTE_FORMAT = '!H'
UINT32_BYTE_FORMAT = '!I'
INT8_BYTE_FORMAT = '!b'
INT16_BYTE_FORMAT = '!h'
INT32_BYTE_FORMAT = '!i'
INT64_BYTE_FORMAT = '!q'
PRELUDE_BYTE_FORMAT = '!III'
# uint byte size to unpack format
UINT_BYTE_FORMAT = {
1: UINT8_BYTE_FORMAT,
2: UINT16_BYTE_FORMAT,
4: UINT32_BYTE_FORMAT,
}
@staticmethod
def unpack_true(data):
"""This method consumes none of the provided bytes and returns True.
:type data: bytes
:param data: The bytes to parse from. This is ignored in this method.
:rtype: tuple
:rtype: (bool, int)
:returns: The tuple (True, 0)
"""
return True, 0
@staticmethod
def unpack_false(data):
"""This method consumes none of the provided bytes and returns False.
:type data: bytes
:param data: The bytes to parse from. This is ignored in this method.
:rtype: tuple
:rtype: (bool, int)
:returns: The tuple (False, 0)
"""
return False, 0
@staticmethod
def unpack_uint8(data):
"""Parse an unsigned 8-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0]
return value, 1
@staticmethod
def unpack_uint32(data):
"""Parse an unsigned 32-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.UINT32_BYTE_FORMAT, data[:4])[0]
return value, 4
@staticmethod
def unpack_int8(data):
"""Parse a signed 8-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT8_BYTE_FORMAT, data[:1])[0]
return value, 1
@staticmethod
def unpack_int16(data):
"""Parse a signed 16-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT16_BYTE_FORMAT, data[:2])[0]
return value, 2
@staticmethod
def unpack_int32(data):
"""Parse a signed 32-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT32_BYTE_FORMAT, data[:4])[0]
return value, 4
@staticmethod
def unpack_int64(data):
"""Parse a signed 64-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT64_BYTE_FORMAT, data[:8])[0]
return value, 8
@staticmethod
def unpack_byte_array(data, length_byte_size=2):
"""Parse a variable length byte array from the bytes.
The bytes are expected to be in the following format:
[ length ][0 ... length bytes]
where length is an unsigned integer represented in the smallest number
of bytes to hold the maximum length of the array.
:type data: bytes
:param data: The bytes to parse from.
:type length_byte_size: int
:param length_byte_size: The byte size of the preceeding integer that
represents the length of the array. Supported values are 1, 2, and 4.
:rtype: (bytes, int)
:returns: A tuple containing the (parsed byte array, bytes consumed).
"""
uint_byte_format = DecodeUtils.UINT_BYTE_FORMAT[length_byte_size]
length = unpack(uint_byte_format, data[:length_byte_size])[0]
bytes_end = length + length_byte_size
array_bytes = data[length_byte_size:bytes_end]
return array_bytes, bytes_end
@staticmethod
def unpack_utf8_string(data, length_byte_size=2):
"""Parse a variable length utf-8 string from the bytes.
The bytes are expected to be in the following format:
[ length ][0 ... length bytes]
where length is an unsigned integer represented in the smallest number
of bytes to hold the maximum length of the array and the following
bytes are a valid utf-8 string.
:type data: bytes
:param bytes: The bytes to parse from.
:type length_byte_size: int
:param length_byte_size: The byte size of the preceeding integer that
represents the length of the array. Supported values are 1, 2, and 4.
:rtype: (str, int)
:returns: A tuple containing the (utf-8 string, bytes consumed).
"""
array_bytes, consumed = DecodeUtils.unpack_byte_array(
data, length_byte_size)
return array_bytes.decode('utf-8'), consumed
@staticmethod
def unpack_uuid(data):
"""Parse a 16-byte uuid from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (bytes, int)
:returns: A tuple containing the (uuid bytes, bytes consumed).
"""
return data[:16], 16
@staticmethod
def unpack_prelude(data):
"""Parse the prelude for an event stream message from the bytes.
The prelude for an event stream message has the following format:
[total_length][header_length][prelude_crc]
where each field is an unsigned 32-bit integer.
:rtype: ((int, int, int), int)
:returns: A tuple of ((total_length, headers_length, prelude_crc),
consumed)
"""
return (unpack(DecodeUtils.PRELUDE_BYTE_FORMAT, data), _PRELUDE_LENGTH)
def _validate_checksum(data, checksum, crc=0):
# To generate the same numeric value across all Python versions and
# platforms use crc32(data) & 0xffffffff.
computed_checksum = crc32(data, crc) & 0xFFFFFFFF
if checksum != computed_checksum:
raise ChecksumMismatch(checksum, computed_checksum)
class MessagePrelude(object):
"""Represents the prelude of an event stream message. """
def __init__(self, total_length, headers_length, crc):
self.total_length = total_length
self.headers_length = headers_length
self.crc = crc
@property
def payload_length(self):
"""Calculates the total payload length.
The extra minus 4 bytes is for the message CRC.
:rtype: int
:returns: The total payload length.
"""
return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4
@property
def payload_end(self):
"""Calculates the byte offset for the end of the message payload.
The extra minus 4 bytes is for the message CRC.
:rtype: int
:returns: The byte offset from the beginning of the event stream
message to the end of the payload.
"""
return self.total_length - 4
@property
def headers_end(self):
"""Calculates the byte offset for the end of the message headers.
:rtype: int
:returns: The byte offset from the beginning of the event stream
message to the end of the headers.
"""
return _PRELUDE_LENGTH + self.headers_length
class EventStreamMessage(object):
"""Represents an event stream message. """
def __init__(self, prelude, headers, payload, crc):
self.prelude = prelude
self.headers = headers
self.payload = payload
self.crc = crc
def to_response_dict(self, status_code=200):
message_type = self.headers.get(':message-type')
if message_type == 'error' or message_type == 'exception':
status_code = 400
return {
'status_code': status_code,
'headers': self.headers,
'body': self.payload
}
class EventStreamHeaderParser(object):
""" Parses the event headers from an event stream message.
Expects all of the header data upfront and creates a dictionary of headers
to return. This object can be reused multiple times to parse the headers
from multiple event stream messages.
"""
# Maps header type to appropriate unpacking function
# These unpacking functions return the value and the amount unpacked
_HEADER_TYPE_MAP = {
# boolean_true
0: DecodeUtils.unpack_true,
# boolean_false
1: DecodeUtils.unpack_false,
# byte
2: DecodeUtils.unpack_int8,
# short
3: DecodeUtils.unpack_int16,
# integer
4: DecodeUtils.unpack_int32,
# long
5: DecodeUtils.unpack_int64,
# byte_array
6: DecodeUtils.unpack_byte_array,
# string
7: DecodeUtils.unpack_utf8_string,
# timestamp
8: DecodeUtils.unpack_int64,
# uuid
9: DecodeUtils.unpack_uuid,
}
def __init__(self):
self._data = None
def parse(self, data):
"""Parses the event stream headers from an event stream message.
:type data: bytes
:param data: The bytes that correspond to the headers section of an
event stream message.
:rtype: dict
:returns: A dicionary of header key, value pairs.
"""
self._data = data
return self._parse_headers()
def _parse_headers(self):
headers = {}
while self._data:
name, value = self._parse_header()
if name in headers:
raise DuplicateHeader(name)
headers[name] = value
return headers
def _parse_header(self):
name = self._parse_name()
value = self._parse_value()
return name, value
def _parse_name(self):
name, consumed = DecodeUtils.unpack_utf8_string(self._data, 1)
self._advance_data(consumed)
return name
def _parse_type(self):
type, consumed = DecodeUtils.unpack_uint8(self._data)
self._advance_data(consumed)
return type
def _parse_value(self):
header_type = self._parse_type()
value_unpacker = self._HEADER_TYPE_MAP[header_type]
value, consumed = value_unpacker(self._data)
self._advance_data(consumed)
return value
def _advance_data(self, consumed):
self._data = self._data[consumed:]
class EventStreamBuffer(object):
"""Streaming based event stream buffer
A buffer class that wraps bytes from an event stream providing parsed
messages as they become available via an iterable interface.
"""
def __init__(self):
self._data = b''
self._prelude = None
self._header_parser = EventStreamHeaderParser()
def add_data(self, data):
"""Add data to the buffer.
:type data: bytes
:param data: The bytes to add to the buffer to be used when parsing
"""
self._data += data
def _validate_prelude(self, prelude):
if prelude.headers_length > _MAX_HEADERS_LENGTH:
raise InvalidHeadersLength(prelude.headers_length)
if prelude.payload_length > _MAX_PAYLOAD_LENGTH:
raise InvalidPayloadLength(prelude.payload_length)
def _parse_prelude(self):
prelude_bytes = self._data[:_PRELUDE_LENGTH]
raw_prelude, _ = DecodeUtils.unpack_prelude(prelude_bytes)
prelude = MessagePrelude(*raw_prelude)
self._validate_prelude(prelude)
# The minus 4 removes the prelude crc from the bytes to be checked
_validate_checksum(prelude_bytes[:_PRELUDE_LENGTH-4], prelude.crc)
return prelude
def _parse_headers(self):
header_bytes = self._data[_PRELUDE_LENGTH:self._prelude.headers_end]
return self._header_parser.parse(header_bytes)
def _parse_payload(self):
prelude = self._prelude
payload_bytes = self._data[prelude.headers_end:prelude.payload_end]
return payload_bytes
def _parse_message_crc(self):
prelude = self._prelude
crc_bytes = self._data[prelude.payload_end:prelude.total_length]
message_crc, _ = DecodeUtils.unpack_uint32(crc_bytes)
return message_crc
def _parse_message_bytes(self):
# The minus 4 includes the prelude crc to the bytes to be checked
message_bytes = self._data[_PRELUDE_LENGTH-4:self._prelude.payload_end]
return message_bytes
def _validate_message_crc(self):
message_crc = self._parse_message_crc()
message_bytes = self._parse_message_bytes()
_validate_checksum(message_bytes, message_crc, crc=self._prelude.crc)
return message_crc
def _parse_message(self):
crc = self._validate_message_crc()
headers = self._parse_headers()
payload = self._parse_payload()
message = EventStreamMessage(self._prelude, headers, payload, crc)
self._prepare_for_next_message()
return message
def _prepare_for_next_message(self):
# Advance the data and reset the current prelude
self._data = self._data[self._prelude.total_length:]
self._prelude = None
def next(self):
"""Provides the next available message parsed from the stream
:rtype: EventStreamMessage
:returns: The next event stream message
"""
if len(self._data) < _PRELUDE_LENGTH:
raise StopIteration()
if self._prelude is None:
self._prelude = self._parse_prelude()
if len(self._data) < self._prelude.total_length:
raise StopIteration()
return self._parse_message()
def __next__(self):
return self.next()
def __iter__(self):
return self
class EventStream(object):
"""Wrapper class for an event stream body.
This wraps the underlying streaming body, parsing it for individual events
and yielding them as they come available through the iterator interface.
The following example uses the S3 select API to get structured data out of
an object stored in S3 using an event stream.
**Example:**
::
from botocore.session import Session
s3 = Session().create_client('s3')
response = s3.select_object_content(
Bucket='bucketname',
Key='keyname',
ExpressionType='SQL',
RequestProgress={'Enabled': True},
Expression="SELECT * FROM S3Object s",
InputSerialization={'CSV': {}},
OutputSerialization={'CSV': {}},
)
# This is the event stream in the response
event_stream = response['Payload']
end_event_received = False
with open('output', 'wb') as f:
# Iterate over events in the event stream as they come
for event in event_stream:
# If we received a records event, write the data to a file
if 'Records' in event:
data = event['Records']['Payload']
f.write(data)
# If we received a progress event, print the details
elif 'Progress' in event:
print(event['Progress']['Details'])
# End event indicates that the request finished successfully
elif 'End' in event:
print('Result is complete')
end_event_received = True
if not end_event_received:
raise Exception("End event not received, request incomplete.")
"""
def __init__(self, raw_stream, output_shape, parser, operation_name):
self._raw_stream = raw_stream
self._output_shape = output_shape
self._operation_name = operation_name
self._parser = parser
self._event_generator = self._create_raw_event_generator()
def __iter__(self):
for event in self._event_generator:
parsed_event = self._parse_event(event)
if parsed_event:
yield parsed_event
def _create_raw_event_generator(self):
event_stream_buffer = EventStreamBuffer()
for chunk in self._raw_stream.stream():
event_stream_buffer.add_data(chunk)
for event in event_stream_buffer:
yield event
def _parse_event(self, event):
response_dict = event.to_response_dict()
parsed_response = self._parser.parse(response_dict, self._output_shape)
if response_dict['status_code'] == 200:
return parsed_response
else:
raise EventStreamError(parsed_response, self._operation_name)
def get_initial_response(self):
try:
initial_event = next(self._event_generator)
event_type = initial_event.headers.get(':event-type')
if event_type == 'initial-response':
return initial_event
except StopIteration:
pass
raise NoInitialResponseError()
def close(self):
"""Closes the underlying streaming body. """
self._raw_stream.close()
| 20,517 | Python | 32.254457 | 79 | 0.61973 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/httpsession.py | import os.path
import logging
import socket
from base64 import b64encode
from urllib3 import PoolManager, ProxyManager, proxy_from_url, Timeout
from urllib3.util.retry import Retry
from urllib3.util.ssl_ import (
ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, DEFAULT_CIPHERS,
)
from urllib3.exceptions import SSLError as URLLib3SSLError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from urllib3.exceptions import ConnectTimeoutError as URLLib3ConnectTimeoutError
from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError
try:
# Always import the original SSLContext, even if it has been patched
from urllib3.contrib.pyopenssl import orig_util_SSLContext as SSLContext
except ImportError:
from urllib3.util.ssl_ import SSLContext
import botocore.awsrequest
from botocore.vendored import six
from botocore.vendored.six.moves.urllib_parse import unquote
from botocore.compat import filter_ssl_warnings, urlparse
from botocore.exceptions import (
ConnectionClosedError, EndpointConnectionError, HTTPClientError,
ReadTimeoutError, ProxyConnectionError, ConnectTimeoutError, SSLError,
InvalidProxiesConfigError
)
filter_ssl_warnings()
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem')
try:
from certifi import where
except ImportError:
def where():
return DEFAULT_CA_BUNDLE
def get_cert_path(verify):
if verify is not True:
return verify
cert_path = where()
logger.debug("Certificate path: {0}".format(cert_path))
return cert_path
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
""" This function is a vendored version of the same function in urllib3
We vendor this function to ensure that the SSL contexts we construct
always use the std lib SSLContext instead of pyopenssl.
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue urllib3#309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True):
# Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None:
# Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
class ProxyConfiguration(object):
"""Represents a proxy configuration dictionary and additional settings.
This class represents a proxy configuration dictionary and provides utility
functions to retreive well structured proxy urls and proxy headers from the
proxy configuration dictionary.
"""
def __init__(self, proxies=None, proxies_settings=None):
if proxies is None:
proxies = {}
if proxies_settings is None:
proxies_settings = {}
self._proxies = proxies
self._proxies_settings = proxies_settings
def proxy_url_for(self, url):
"""Retrieves the corresponding proxy url for a given url. """
parsed_url = urlparse(url)
proxy = self._proxies.get(parsed_url.scheme)
if proxy:
proxy = self._fix_proxy_url(proxy)
return proxy
def proxy_headers_for(self, proxy_url):
"""Retrieves the corresponding proxy headers for a given proxy url. """
headers = {}
username, password = self._get_auth_from_url(proxy_url)
if username and password:
basic_auth = self._construct_basic_auth(username, password)
headers['Proxy-Authorization'] = basic_auth
return headers
@property
def settings(self):
return self._proxies_settings
def _fix_proxy_url(self, proxy_url):
if proxy_url.startswith('http:') or proxy_url.startswith('https:'):
return proxy_url
elif proxy_url.startswith('//'):
return 'http:' + proxy_url
else:
return 'http://' + proxy_url
def _construct_basic_auth(self, username, password):
auth_str = '{0}:{1}'.format(username, password)
encoded_str = b64encode(auth_str.encode('ascii')).strip().decode()
return 'Basic {0}'.format(encoded_str)
def _get_auth_from_url(self, url):
parsed_url = urlparse(url)
try:
return unquote(parsed_url.username), unquote(parsed_url.password)
except (AttributeError, TypeError):
return None, None
class URLLib3Session(object):
"""A basic HTTP client that supports connection pooling and proxies.
This class is inspired by requests.adapters.HTTPAdapter, but has been
boiled down to meet the use cases needed by botocore. For the most part
this classes matches the functionality of HTTPAdapter in requests v2.7.0
(the same as our vendored version). The only major difference of note is
that we currently do not support sending chunked requests. While requests
v2.7.0 implemented this themselves, later version urllib3 support this
directly via a flag to urlopen so enabling it if needed should be trivial.
"""
def __init__(self,
verify=True,
proxies=None,
timeout=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
socket_options=None,
client_cert=None,
proxies_config=None,
):
self._verify = verify
self._proxy_config = ProxyConfiguration(proxies=proxies,
proxies_settings=proxies_config)
self._pool_classes_by_scheme = {
'http': botocore.awsrequest.AWSHTTPConnectionPool,
'https': botocore.awsrequest.AWSHTTPSConnectionPool,
}
if timeout is None:
timeout = DEFAULT_TIMEOUT
if not isinstance(timeout, (int, float)):
timeout = Timeout(connect=timeout[0], read=timeout[1])
self._cert_file = None
self._key_file = None
if isinstance(client_cert, str):
self._cert_file = client_cert
elif isinstance(client_cert, tuple):
self._cert_file, self._key_file = client_cert
self._timeout = timeout
self._max_pool_connections = max_pool_connections
self._socket_options = socket_options
if socket_options is None:
self._socket_options = []
self._proxy_managers = {}
self._manager = PoolManager(**self._get_pool_manager_kwargs())
self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme
@property
def _proxies_kwargs(self):
proxies_settings = self._proxy_config.settings
proxy_ssl_context = self._setup_proxy_ssl_context(proxies_settings)
proxies_kwargs = {
'proxy_ssl_context': proxy_ssl_context,
'use_forwarding_for_https': proxies_settings.get(
'proxy_use_forwarding_for_https'),
}
return {k: v for k, v in proxies_kwargs.items() if v is not None}
def _get_pool_manager_kwargs(self, **extra_kwargs):
pool_manager_kwargs = {
'strict': True,
'timeout': self._timeout,
'maxsize': self._max_pool_connections,
'ssl_context': self._get_ssl_context(),
'socket_options': self._socket_options,
'cert_file': self._cert_file,
'key_file': self._key_file,
}
pool_manager_kwargs.update(**extra_kwargs)
return pool_manager_kwargs
def _get_ssl_context(self):
return create_urllib3_context()
def _get_proxy_manager(self, proxy_url):
if proxy_url not in self._proxy_managers:
proxy_headers = self._proxy_config.proxy_headers_for(proxy_url)
proxy_manager_kwargs = self._get_pool_manager_kwargs(
proxy_headers=proxy_headers)
proxy_manager_kwargs.update(**self._proxies_kwargs)
proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs)
proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme
self._proxy_managers[proxy_url] = proxy_manager
return self._proxy_managers[proxy_url]
def _path_url(self, url):
parsed_url = urlparse(url)
path = parsed_url.path
if not path:
path = '/'
if parsed_url.query:
path = path + '?' + parsed_url.query
return path
def _setup_ssl_cert(self, conn, url, verify):
if url.lower().startswith('https') and verify:
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = get_cert_path(verify)
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
def _setup_proxy_ssl_context(self, proxies_settings):
proxy_ca_bundle = proxies_settings.get('proxy_ca_bundle')
proxy_cert = proxies_settings.get('proxy_client_cert')
if proxy_ca_bundle is None and proxy_cert is None:
return None
context = self._get_ssl_context()
try:
# urllib3 disables this by default but we need
# it for proper proxy tls negotiation.
context.check_hostname = True
if proxy_ca_bundle is not None:
context.load_verify_locations(cafile=proxy_ca_bundle)
if isinstance(proxy_cert, tuple):
context.load_cert_chain(proxy_cert[0], keyfile=proxy_cert[1])
elif isinstance(proxy_cert, str):
context.load_cert_chain(proxy_cert)
return context
except (IOError, URLLib3SSLError) as e:
raise InvalidProxiesConfigError(error=e)
def _get_connection_manager(self, url, proxy_url=None):
if proxy_url:
manager = self._get_proxy_manager(proxy_url)
else:
manager = self._manager
return manager
def _get_request_target(self, url, proxy_url):
has_proxy = proxy_url is not None
if not has_proxy:
return self._path_url(url)
# HTTP proxies expect the request_target to be the absolute url to know
# which host to establish a connection to. urllib3 also supports
# forwarding for HTTPS through the 'use_forwarding_for_https' parameter.
proxy_scheme = urlparse(proxy_url).scheme
using_https_forwarding_proxy = (
proxy_scheme == 'https' and
self._proxies_kwargs.get('use_forwarding_for_https', False)
)
if using_https_forwarding_proxy or url.startswith('http:'):
return url
else:
return self._path_url(url)
def _chunked(self, headers):
return headers.get('Transfer-Encoding', '') == 'chunked'
def send(self, request):
try:
proxy_url = self._proxy_config.proxy_url_for(request.url)
manager = self._get_connection_manager(request.url, proxy_url)
conn = manager.connection_from_url(request.url)
self._setup_ssl_cert(conn, request.url, self._verify)
request_target = self._get_request_target(request.url, proxy_url)
urllib_response = conn.urlopen(
method=request.method,
url=request_target,
body=request.body,
headers=request.headers,
retries=Retry(False),
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=self._chunked(request.headers),
)
http_response = botocore.awsrequest.AWSResponse(
request.url,
urllib_response.status,
urllib_response.headers,
urllib_response,
)
if not request.stream_output:
# Cause the raw stream to be exhausted immediately. We do it
# this way instead of using preload_content because
# preload_content will never buffer chunked responses
http_response.content
return http_response
except URLLib3SSLError as e:
raise SSLError(endpoint_url=request.url, error=e)
except (NewConnectionError, socket.gaierror) as e:
raise EndpointConnectionError(endpoint_url=request.url, error=e)
except ProxyError as e:
raise ProxyConnectionError(proxy_url=proxy_url, error=e)
except URLLib3ConnectTimeoutError as e:
raise ConnectTimeoutError(endpoint_url=request.url, error=e)
except URLLib3ReadTimeoutError as e:
raise ReadTimeoutError(endpoint_url=request.url, error=e)
except ProtocolError as e:
raise ConnectionClosedError(
error=e,
request=request,
endpoint_url=request.url
)
except Exception as e:
message = 'Exception received when sending urllib3 HTTP request'
logger.debug(message, exc_info=True)
raise HTTPClientError(error=e)
| 13,786 | Python | 37.297222 | 80 | 0.629624 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/translate.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.utils import merge_dicts
def build_retry_config(endpoint_prefix, retry_model, definitions,
client_retry_config=None):
service_config = retry_model.get(endpoint_prefix, {})
resolve_references(service_config, definitions)
# We want to merge the global defaults with the service specific
# defaults, with the service specific defaults taking precedence.
# So we use the global defaults as the base.
#
# A deepcopy is done on the retry defaults because it ensures the
# retry model has no chance of getting mutated when the service specific
# configuration or client retry config is merged in.
final_retry_config = {
'__default__': copy.deepcopy(retry_model.get('__default__', {}))
}
resolve_references(final_retry_config, definitions)
# The merge the service specific config on top.
merge_dicts(final_retry_config, service_config)
if client_retry_config is not None:
_merge_client_retry_config(final_retry_config, client_retry_config)
return final_retry_config
def _merge_client_retry_config(retry_config, client_retry_config):
max_retry_attempts_override = client_retry_config.get('max_attempts')
if max_retry_attempts_override is not None:
# In the retry config, the max_attempts refers to the maximum number
# of requests in general will be made. However, for the client's
# retry config it refers to how many retry attempts will be made at
# most. So to translate this number from the client config, one is
# added to convert it to the maximum number request that will be made
# by including the initial request.
#
# It is also important to note that if we ever support per operation
# configuration in the retry model via the client, we will need to
# revisit this logic to make sure max_attempts gets applied
# per operation.
retry_config['__default__'][
'max_attempts'] = max_retry_attempts_override + 1
def resolve_references(config, definitions):
"""Recursively replace $ref keys.
To cut down on duplication, common definitions can be declared
(and passed in via the ``definitions`` attribute) and then
references as {"$ref": "name"}, when this happens the reference
dict is placed with the value from the ``definition`` dict.
This is recursively done.
"""
for key, value in config.items():
if isinstance(value, dict):
if len(value) == 1 and list(value.keys())[0] == '$ref':
# Then we need to resolve this reference.
config[key] = definitions[list(value.values())[0]]
else:
resolve_references(value, definitions)
| 3,412 | Python | 43.324675 | 78 | 0.688159 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/auth.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import datetime
from hashlib import sha256
from hashlib import sha1
import hmac
import logging
from email.utils import formatdate
from operator import itemgetter
import functools
import time
import calendar
import json
from botocore.exceptions import NoCredentialsError
from botocore.utils import normalize_url_path, percent_encode_sequence
from botocore.compat import HTTPHeaders
from botocore.compat import quote, unquote, urlsplit, parse_qs
from botocore.compat import urlunsplit
from botocore.compat import encodebytes
from botocore.compat import six
from botocore.compat import json
from botocore.compat import MD5_AVAILABLE
from botocore.compat import ensure_unicode
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'
SIGNED_HEADERS_BLACKLIST = [
'expect',
'user-agent',
'x-amzn-trace-id',
]
UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
class BaseSigner(object):
REQUIRES_REGION = False
def add_auth(self, request):
raise NotImplementedError("add_auth")
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials):
self.credentials = credentials
def calc_signature(self, request, params):
logger.debug("Calculating signature using v2 auth.")
split = urlsplit(request.url)
path = split.path
if len(path) == 0:
path = '/'
string_to_sign = '%s\n%s\n%s\n' % (request.method,
split.netloc,
path)
lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
pairs = []
for key in sorted(params):
# Any previous signature should not be a part of this
# one, so we skip that particular key. This prevents
# issues during retries.
if key == 'Signature':
continue
value = six.text_type(params[key])
pairs.append(quote(key.encode('utf-8'), safe='') + '=' +
quote(value.encode('utf-8'), safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
logger.debug('String to sign: %s', string_to_sign)
lhmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, request):
# The auth handler is the last thing called in the
# preparation phase of a prepared request.
# Because of this we have to parse the query params
# from the request body so we can update them with
# the sigv2 auth params.
if self.credentials is None:
raise NoCredentialsError
if request.data:
# POST
params = request.data
else:
# GET
params = request.params
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.credentials.token:
params['SecurityToken'] = self.credentials.token
qs, signature = self.calc_signature(request, params)
params['Signature'] = signature
return request
class SigV3Auth(BaseSigner):
def __init__(self, credentials):
self.credentials = credentials
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
if 'Date' in request.headers:
del request.headers['Date']
request.headers['Date'] = formatdate(usegmt=True)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
new_hmac.update(request.headers['Date'].encode('utf-8'))
encoded_signature = encodebytes(new_hmac.digest()).strip()
signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' %
(self.credentials.access_key, 'HmacSHA256',
encoded_signature.decode('utf-8')))
if 'X-Amzn-Authorization' in request.headers:
del request.headers['X-Amzn-Authorization']
request.headers['X-Amzn-Authorization'] = signature
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
# We initialize these value here so the unit tests can have
# valid values. But these will get overriden in ``add_auth``
# later for real requests.
self._region_name = region_name
self._service_name = service_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
header_map = HTTPHeaders()
for name, value in request.headers.items():
lname = name.lower()
if lname not in SIGNED_HEADERS_BLACKLIST:
header_map[lname] = value
if 'host' not in header_map:
# Ensure we sign the lowercased version of the host, as that
# is what will ultimately be sent on the wire.
# TODO: We should set the host ourselves, instead of relying on our
# HTTP client to set it for us.
header_map['host'] = self._canonical_host(request.url).lower()
return header_map
def _canonical_host(self, url):
url_parts = urlsplit(url)
default_ports = {
'http': 80,
'https': 443
}
if any(url_parts.scheme == scheme and url_parts.port == port
for scheme, port in default_ports.items()):
# No need to include the port if it's the default port.
return url_parts.hostname
# Strip out auth if it's present in the netloc.
return url_parts.netloc.rsplit('@', 1)[-1]
def canonical_query_string(self, request):
# The query string can come from two parts. One is the
# params attribute of the request. The other is from the request
# url (in which case we have to re-split the url into its components
# and parse out the query string component).
if request.params:
return self._canonical_query_string_params(request.params)
else:
return self._canonical_query_string_url(urlsplit(request.url))
def _canonical_query_string_params(self, params):
l = []
for param in sorted(params):
value = str(params[param])
l.append('%s=%s' % (quote(param, safe='-_.~'),
quote(value, safe='-_.~')))
cqs = '&'.join(l)
return cqs
def _canonical_query_string_url(self, parts):
canonical_query_string = ''
if parts.query:
# [(key, value), (key2, value2)]
key_val_pairs = []
for pair in parts.query.split('&'):
key, _, value = pair.partition('=')
key_val_pairs.append((key, value))
sorted_key_vals = []
# Sort by the key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
headers = []
sorted_header_names = sorted(set(headers_to_sign))
for key in sorted_header_names:
value = ','.join(self._header_value(v) for v in
sorted(headers_to_sign.get_all(key)))
headers.append('%s:%s' % (key, ensure_unicode(value)))
return '\n'.join(headers)
def _header_value(self, value):
# From the sigv4 docs:
# Lowercase(HeaderName) + ':' + Trimall(HeaderValue)
#
# The Trimall function removes excess white space before and after
# values, and converts sequential spaces to a single space.
return ' '.join(value.split())
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
l = sorted(l)
return ';'.join(l)
def payload(self, request):
if not self._should_sha256_sign_payload(request):
# When payload signing is disabled, we use this static string in
# place of the payload checksum.
return UNSIGNED_PAYLOAD
request_body = request.body
if request_body and hasattr(request_body, 'seek'):
position = request_body.tell()
read_chunksize = functools.partial(request_body.read,
PAYLOAD_BUFFER)
checksum = sha256()
for chunk in iter(read_chunksize, b''):
checksum.update(chunk)
hex_checksum = checksum.hexdigest()
request_body.seek(position)
return hex_checksum
elif request_body:
# The request serialization has ensured that
# request.body is a bytes() type.
return sha256(request_body).hexdigest()
else:
return EMPTY_SHA256_HASH
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def canonical_request(self, request):
cr = [request.method.upper()]
path = self._normalize_url_path(urlsplit(request.url).path)
cr.append(path)
cr.append(self.canonical_query_string(request))
headers_to_sign = self.headers_to_sign(request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
if 'X-Amz-Content-SHA256' in request.headers:
body_checksum = request.headers['X-Amz-Content-SHA256']
else:
body_checksum = self.payload(request)
cr.append(body_checksum)
return '\n'.join(cr)
def _normalize_url_path(self, path):
normalized_path = quote(normalize_url_path(path), safe='/~')
return normalized_path
def scope(self, request):
scope = [self.credentials.access_key]
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, request):
scope = []
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(request.context['timestamp'])
sts.append(self.credential_scope(request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, string_to_sign, request):
key = self.credentials.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
request.context['timestamp'][0:8])
k_region = self._sign(k_date, self._region_name)
k_service = self._sign(k_region, self._service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
# This could be a retry. Make sure the previous
# authorization header is removed first.
self._modify_request_before_signing(request)
canonical_request = self.canonical_request(request)
logger.debug("Calculating signature using v4 auth.")
logger.debug('CanonicalRequest:\n%s', canonical_request)
string_to_sign = self.string_to_sign(request, canonical_request)
logger.debug('StringToSign:\n%s', string_to_sign)
signature = self.signature(string_to_sign, request)
logger.debug('Signature:\n%s', signature)
self._inject_signature_to_request(request, signature)
def _inject_signature_to_request(self, request, signature):
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
headers_to_sign = self.headers_to_sign(request)
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
request.headers['Authorization'] = ', '.join(l)
return request
def _modify_request_before_signing(self, request):
if 'Authorization' in request.headers:
del request.headers['Authorization']
self._set_necessary_date_headers(request)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
if not request.context.get('payload_signing_enabled', True):
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = UNSIGNED_PAYLOAD
def _set_necessary_date_headers(self, request):
# The spec allows for either the Date _or_ the X-Amz-Date value to be
# used so we check both. If there's a Date header, we use the date
# header. Otherwise we use the X-Amz-Date header.
if 'Date' in request.headers:
del request.headers['Date']
datetime_timestamp = datetime.datetime.strptime(
request.context['timestamp'], SIGV4_TIMESTAMP)
request.headers['Date'] = formatdate(
int(calendar.timegm(datetime_timestamp.timetuple())))
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
else:
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
request.headers['X-Amz-Date'] = request.context['timestamp']
class S3SigV4Auth(SigV4Auth):
def _modify_request_before_signing(self, request):
super(S3SigV4Auth, self)._modify_request_before_signing(request)
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = self.payload(request)
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both content-md5 be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# content-md5 is sufficiently secure and durable for us to be
# confident in the request without body signing.
if not request.url.startswith('https') or \
'Content-MD5' not in request.headers:
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super(S3SigV4Auth, self)._should_sha256_sign_payload(request)
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, service_name, region_name,
expires=DEFAULT_EXPIRES):
super(SigV4QueryAuth, self).__init__(credentials, service_name,
region_name)
self._expires = expires
def _modify_request_before_signing(self, request):
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
blacklisted_content_type = (
'application/x-www-form-urlencoded; charset=utf-8'
)
if content_type == blacklisted_content_type:
del request.headers['content-type']
# Note that we're not including X-Amz-Signature.
# From the docs: "The Canonical Query String must include all the query
# parameters from the preceding table except for X-Amz-Signature.
signed_headers = self.signed_headers(self.headers_to_sign(request))
auth_params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': self.scope(request),
'X-Amz-Date': request.context['timestamp'],
'X-Amz-Expires': self._expires,
'X-Amz-SignedHeaders': signed_headers,
}
if self.credentials.token is not None:
auth_params['X-Amz-Security-Token'] = self.credentials.token
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = dict(
[(k, v[0]) for k, v in
parse_qs(url_parts.query, keep_blank_values=True).items()])
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
operation_params = ''
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(self._get_body_as_dict(request))
request.data = ''
if query_dict:
operation_params = percent_encode_sequence(query_dict) + '&'
new_query_string = (operation_params +
percent_encode_sequence(auth_params))
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _get_body_as_dict(self, request):
# For query services, request.data is form-encoded and is already a
# dict, but for other services such as rest-json it could be a json
# string or bytes. In those cases we attempt to load the data as a
# dict.
data = request.data
if isinstance(data, six.binary_type):
data = json.loads(data.decode('utf-8'))
elif isinstance(data, six.string_types):
data = json.loads(data)
return data
def _inject_signature_to_request(self, request, signature):
# Rather than calculating an "Authorization" header, for the query
# param quth, we just append an 'X-Amz-Signature' param to the end
# of the query string.
request.url += '&X-Amz-Signature=%s' % signature
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
def payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return UNSIGNED_PAYLOAD
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request):
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'
fields['x-amz-credential'] = self.scope(request)
fields['x-amz-date'] = request.context['timestamp']
conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})
conditions.append({'x-amz-credential': self.scope(request)})
conditions.append({'x-amz-date': request.context['timestamp']})
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['x-amz-signature'] = self.signature(fields['policy'], request)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
class HmacV1Auth(BaseSigner):
# List of Query String Arguments of Interest
QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl',
'location', 'logging', 'partNumber', 'policy',
'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore', 'storageClass', 'notification',
'replication', 'requestPayment', 'analytics', 'metrics',
'inventory', 'select', 'select-type']
def __init__(self, credentials, service_name=None, region_name=None):
self.credentials = credentials
def sign_string(self, string_to_sign):
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha1)
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).strip().decode('utf-8')
def canonical_standard_headers(self, headers):
interesting_headers = ['content-md5', 'content-type', 'date']
hoi = []
if 'Date' in headers:
del headers['Date']
headers['Date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def canonical_custom_headers(self, headers):
hoi = []
custom_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None:
if lk.startswith('x-amz-'):
custom_headers[lk] = ','.join(v.strip() for v in
headers.get_all(key))
sorted_header_keys = sorted(custom_headers.keys())
for key in sorted_header_keys:
hoi.append("%s:%s" % (key, custom_headers[key]))
return '\n'.join(hoi)
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
def canonical_resource(self, split, auth_path=None):
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
# NOTE:
# The path in the canonical resource should always be the
# full path including the bucket name, even for virtual-hosting
# style addressing. The ``auth_path`` keeps track of the full
# path for the canonical resource and would be passed in if
# the client was using virtual-hosting style.
if auth_path is not None:
buf = auth_path
else:
buf = split.path
if split.query:
qsa = split.query.split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [self.unquote_v(a) for a in qsa
if a[0] in self.QSAOfInterest]
if len(qsa) > 0:
qsa.sort(key=itemgetter(0))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def canonical_string(self, method, split, headers, expires=None,
auth_path=None):
cs = method.upper() + '\n'
cs += self.canonical_standard_headers(headers) + '\n'
custom_headers = self.canonical_custom_headers(headers)
if custom_headers:
cs += custom_headers + '\n'
cs += self.canonical_resource(split, auth_path=auth_path)
return cs
def get_signature(self, method, split, headers, expires=None,
auth_path=None):
if self.credentials.token:
del headers['x-amz-security-token']
headers['x-amz-security-token'] = self.credentials.token
string_to_sign = self.canonical_string(method,
split,
headers,
auth_path=auth_path)
logger.debug('StringToSign:\n%s', string_to_sign)
return self.sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
logger.debug("Calculating signature using hmacv1 auth.")
split = urlsplit(request.url)
logger.debug('HTTP request method: %s', request.method)
signature = self.get_signature(request.method, split,
request.headers,
auth_path=request.auth_path)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'Authorization' in request.headers:
# We have to do this because request.headers is not
# normal dictionary. It has the (unintuitive) behavior
# of aggregating repeated setattr calls for the same
# key value. For example:
# headers['foo'] = 'a'; headers['foo'] = 'b'
# list(headers) will print ['foo', 'foo'].
del request.headers['Authorization']
request.headers['Authorization'] = (
"AWS %s:%s" % (self.credentials.access_key, signature))
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, expires=DEFAULT_EXPIRES):
self.credentials = credentials
self._expires = expires
def _get_date(self):
return str(int(time.time() + int(self._expires)))
def _inject_signature(self, request, signature):
query_dict = {}
query_dict['AWSAccessKeyId'] = self.credentials.access_key
query_dict['Signature'] = signature
for header_key in request.headers:
lk = header_key.lower()
# For query string requests, Expires is used instead of the
# Date header.
if header_key == 'Date':
query_dict['Expires'] = request.headers['Date']
# We only want to include relevant headers in the query string.
# These can be anything that starts with x-amz, is Content-MD5,
# or is Content-Type.
elif lk.startswith('x-amz-') or lk in ['content-md5',
'content-type']:
query_dict[lk] = request.headers[lk]
# Combine all of the identified headers into an encoded
# query string
new_query_string = percent_encode_sequence(query_dict)
# Create a new url with the presigned url.
p = urlsplit(request.url)
if p[3]:
# If there was a pre-existing query string, we should
# add that back before injecting the new query string.
new_query_string = '%s&%s' % (p[3], new_query_string)
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request):
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['AWSAccessKeyId'] = self.credentials.access_key
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['signature'] = self.sign_string(fields['policy'])
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
# Defined at the bottom instead of the top of the module because the Auth
# classes weren't defined yet.
AUTH_TYPE_MAPS = {
'v2': SigV2Auth,
'v4': SigV4Auth,
'v4-query': SigV4QueryAuth,
'v3': SigV3Auth,
'v3https': SigV3Auth,
's3': HmacV1Auth,
's3-query': HmacV1QueryAuth,
's3-presign-post': HmacV1PostAuth,
's3v4': S3SigV4Auth,
's3v4-query': S3SigV4QueryAuth,
's3v4-presign-post': S3SigV4PostAuth,
}
| 34,966 | Python | 39.659302 | 79 | 0.601956 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/monitoring.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import re
import time
from botocore.compat import ensure_unicode, ensure_bytes, urlparse
from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS
logger = logging.getLogger(__name__)
class Monitor(object):
_EVENTS_TO_REGISTER = [
'before-parameter-build',
'request-created',
'response-received',
'after-call',
'after-call-error',
]
def __init__(self, adapter, publisher):
"""Abstraction for monitoring clients API calls
:param adapter: An adapter that takes event emitter events
and produces monitor events
:param publisher: A publisher for generated monitor events
"""
self._adapter = adapter
self._publisher = publisher
def register(self, event_emitter):
"""Register an event emitter to the monitor"""
for event_to_register in self._EVENTS_TO_REGISTER:
event_emitter.register_last(event_to_register, self.capture)
def capture(self, event_name, **payload):
"""Captures an incoming event from the event emitter
It will feed an event emitter event to the monitor's adaptor to create
a monitor event and then publish that event to the monitor's publisher.
"""
try:
monitor_event = self._adapter.feed(event_name, payload)
if monitor_event:
self._publisher.publish(monitor_event)
except Exception as e:
logger.debug(
'Exception %s raised by client monitor in handling event %s',
e, event_name, exc_info=True)
class MonitorEventAdapter(object):
def __init__(self, time=time.time):
"""Adapts event emitter events to produce monitor events
:type time: callable
:param time: A callable that produces the current time
"""
self._time = time
def feed(self, emitter_event_name, emitter_payload):
"""Feed an event emitter event to generate a monitor event
:type emitter_event_name: str
:param emitter_event_name: The name of the event emitted
:type emitter_payload: dict
:param emitter_payload: The payload to associated to the event
emitted
:rtype: BaseMonitorEvent
:returns: A monitor event based on the event emitter events
fired
"""
return self._get_handler(emitter_event_name)(**emitter_payload)
def _get_handler(self, event_name):
return getattr(
self, '_handle_' + event_name.split('.')[0].replace('-', '_')
)
def _handle_before_parameter_build(self, model, context, **kwargs):
context['current_api_call_event'] = APICallEvent(
service=model.service_model.service_id,
operation=model.wire_name,
timestamp=self._get_current_time(),
)
def _handle_request_created(self, request, **kwargs):
context = request.context
new_attempt_event = context[
'current_api_call_event'].new_api_call_attempt(
timestamp=self._get_current_time())
new_attempt_event.request_headers = request.headers
new_attempt_event.url = request.url
context['current_api_call_attempt_event'] = new_attempt_event
def _handle_response_received(self, parsed_response, context, exception,
**kwargs):
attempt_event = context.pop('current_api_call_attempt_event')
attempt_event.latency = self._get_latency(attempt_event)
if parsed_response is not None:
attempt_event.http_status_code = parsed_response[
'ResponseMetadata']['HTTPStatusCode']
attempt_event.response_headers = parsed_response[
'ResponseMetadata']['HTTPHeaders']
attempt_event.parsed_error = parsed_response.get('Error')
else:
attempt_event.wire_exception = exception
return attempt_event
def _handle_after_call(self, context, parsed, **kwargs):
context['current_api_call_event'].retries_exceeded = parsed[
'ResponseMetadata'].get('MaxAttemptsReached', False)
return self._complete_api_call(context)
def _handle_after_call_error(self, context, exception, **kwargs):
# If the after-call-error was emitted and the error being raised
# was a retryable connection error, then the retries must have exceeded
# for that exception as this event gets emitted **after** retries
# happen.
context['current_api_call_event'].retries_exceeded = \
self._is_retryable_exception(exception)
return self._complete_api_call(context)
def _is_retryable_exception(self, exception):
return isinstance(
exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']))
def _complete_api_call(self, context):
call_event = context.pop('current_api_call_event')
call_event.latency = self._get_latency(call_event)
return call_event
def _get_latency(self, event):
return self._get_current_time() - event.timestamp
def _get_current_time(self):
return int(self._time() * 1000)
class BaseMonitorEvent(object):
def __init__(self, service, operation, timestamp):
"""Base monitor event
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
"""
self.service = service
self.operation = operation
self.timestamp = timestamp
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
class APICallEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp, latency=None,
attempts=None, retries_exceeded=False):
"""Monitor event for a single API call
This event corresponds to a single client method call, which includes
every HTTP requests attempt made in order to complete the client call
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
:type latency: int
:param latency: The time in milliseconds to complete the client call
:type attempts: list
:param attempts: The list of APICallAttempts associated to the
APICall
:type retries_exceeded: bool
:param retries_exceeded: True if API call exceeded retries. False
otherwise
"""
super(APICallEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp)
self.latency = latency
self.attempts = attempts
if attempts is None:
self.attempts = []
self.retries_exceeded = retries_exceeded
def new_api_call_attempt(self, timestamp):
"""Instantiates APICallAttemptEvent associated to the APICallEvent
:type timestamp: int
:param timestamp: Epoch time in milliseconds to associate to the
APICallAttemptEvent
"""
attempt_event = APICallAttemptEvent(
service=self.service,
operation=self.operation,
timestamp=timestamp
)
self.attempts.append(attempt_event)
return attempt_event
class APICallAttemptEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp,
latency=None, url=None, http_status_code=None,
request_headers=None, response_headers=None,
parsed_error=None, wire_exception=None):
"""Monitor event for a single API call attempt
This event corresponds to a single HTTP request attempt in completing
the entire client method call.
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the HTTP request
started
:type latency: int
:param latency: The time in milliseconds to complete the HTTP request
whether it succeeded or failed
:type url: str
:param url: The URL the attempt was sent to
:type http_status_code: int
:param http_status_code: The HTTP status code of the HTTP response
if there was a response
:type request_headers: dict
:param request_headers: The HTTP headers sent in making the HTTP
request
:type response_headers: dict
:param response_headers: The HTTP headers returned in the HTTP response
if there was a response
:type parsed_error: dict
:param parsed_error: The error parsed if the service returned an
error back
:type wire_exception: Exception
:param wire_exception: The exception raised in sending the HTTP
request (i.e. ConnectionError)
"""
super(APICallAttemptEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp
)
self.latency = latency
self.url = url
self.http_status_code = http_status_code
self.request_headers = request_headers
self.response_headers = response_headers
self.parsed_error = parsed_error
self.wire_exception = wire_exception
class CSMSerializer(object):
_MAX_CLIENT_ID_LENGTH = 255
_MAX_EXCEPTION_CLASS_LENGTH = 128
_MAX_ERROR_CODE_LENGTH = 128
_MAX_USER_AGENT_LENGTH = 256
_MAX_MESSAGE_LENGTH = 512
_RESPONSE_HEADERS_TO_EVENT_ENTRIES = {
'x-amzn-requestid': 'XAmznRequestId',
'x-amz-request-id': 'XAmzRequestId',
'x-amz-id-2': 'XAmzId2',
}
_AUTH_REGEXS = {
'v4': re.compile(
r'AWS4-HMAC-SHA256 '
r'Credential=(?P<access_key>\w+)/\d+/'
r'(?P<signing_region>[a-z0-9-]+)/'
),
's3': re.compile(
r'AWS (?P<access_key>\w+):'
)
}
_SERIALIZEABLE_EVENT_PROPERTIES = [
'service',
'operation',
'timestamp',
'attempts',
'latency',
'retries_exceeded',
'url',
'request_headers',
'http_status_code',
'response_headers',
'parsed_error',
'wire_exception',
]
def __init__(self, csm_client_id):
"""Serializes monitor events to CSM (Client Side Monitoring) format
:type csm_client_id: str
:param csm_client_id: The application identifier to associate
to the serialized events
"""
self._validate_client_id(csm_client_id)
self.csm_client_id = csm_client_id
def _validate_client_id(self, csm_client_id):
if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH:
raise ValueError(
'The value provided for csm_client_id: %s exceeds the '
'maximum length of %s characters' % (
csm_client_id, self._MAX_CLIENT_ID_LENGTH)
)
def serialize(self, event):
"""Serializes a monitor event to the CSM format
:type event: BaseMonitorEvent
:param event: The event to serialize to bytes
:rtype: bytes
:returns: The CSM serialized form of the event
"""
event_dict = self._get_base_event_dict(event)
event_type = self._get_event_type(event)
event_dict['Type'] = event_type
for attr in self._SERIALIZEABLE_EVENT_PROPERTIES:
value = getattr(event, attr, None)
if value is not None:
getattr(self, '_serialize_' + attr)(
value, event_dict, event_type=event_type)
return ensure_bytes(
json.dumps(event_dict, separators=(',', ':')))
def _get_base_event_dict(self, event):
return {
'Version': 1,
'ClientId': self.csm_client_id,
}
def _serialize_service(self, service, event_dict, **kwargs):
event_dict['Service'] = service
def _serialize_operation(self, operation, event_dict, **kwargs):
event_dict['Api'] = operation
def _serialize_timestamp(self, timestamp, event_dict, **kwargs):
event_dict['Timestamp'] = timestamp
def _serialize_attempts(self, attempts, event_dict, **kwargs):
event_dict['AttemptCount'] = len(attempts)
if attempts:
self._add_fields_from_last_attempt(event_dict, attempts[-1])
def _add_fields_from_last_attempt(self, event_dict, last_attempt):
if last_attempt.request_headers:
# It does not matter which attempt to use to grab the region
# for the ApiCall event, but SDKs typically do the last one.
region = self._get_region(last_attempt.request_headers)
if region is not None:
event_dict['Region'] = region
event_dict['UserAgent'] = self._get_user_agent(
last_attempt.request_headers)
if last_attempt.http_status_code is not None:
event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code
if last_attempt.parsed_error is not None:
self._serialize_parsed_error(
last_attempt.parsed_error, event_dict, 'ApiCall')
if last_attempt.wire_exception is not None:
self._serialize_wire_exception(
last_attempt.wire_exception, event_dict, 'ApiCall')
def _serialize_latency(self, latency, event_dict, event_type):
if event_type == 'ApiCall':
event_dict['Latency'] = latency
elif event_type == 'ApiCallAttempt':
event_dict['AttemptLatency'] = latency
def _serialize_retries_exceeded(self, retries_exceeded, event_dict,
**kwargs):
event_dict['MaxRetriesExceeded'] = (1 if retries_exceeded else 0)
def _serialize_url(self, url, event_dict, **kwargs):
event_dict['Fqdn'] = urlparse(url).netloc
def _serialize_request_headers(self, request_headers, event_dict,
**kwargs):
event_dict['UserAgent'] = self._get_user_agent(request_headers)
if self._is_signed(request_headers):
event_dict['AccessKey'] = self._get_access_key(request_headers)
region = self._get_region(request_headers)
if region is not None:
event_dict['Region'] = region
if 'X-Amz-Security-Token' in request_headers:
event_dict['SessionToken'] = request_headers[
'X-Amz-Security-Token']
def _serialize_http_status_code(self, http_status_code, event_dict,
**kwargs):
event_dict['HttpStatusCode'] = http_status_code
def _serialize_response_headers(self, response_headers, event_dict,
**kwargs):
for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items():
if header in response_headers:
event_dict[entry] = response_headers[header]
def _serialize_parsed_error(self, parsed_error, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'AwsException'] = self._truncate(
parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH)
event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate(
parsed_error['Message'], self._MAX_MESSAGE_LENGTH)
def _serialize_wire_exception(self, wire_exception, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'SdkException'] = self._truncate(
wire_exception.__class__.__name__,
self._MAX_EXCEPTION_CLASS_LENGTH)
event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate(
str(wire_exception), self._MAX_MESSAGE_LENGTH)
def _get_event_type(self, event):
if isinstance(event, APICallEvent):
return 'ApiCall'
elif isinstance(event, APICallAttemptEvent):
return 'ApiCallAttempt'
def _get_access_key(self, request_headers):
auth_val = self._get_auth_value(request_headers)
_, auth_match = self._get_auth_match(auth_val)
return auth_match.group('access_key')
def _get_region(self, request_headers):
if not self._is_signed(request_headers):
return None
auth_val = self._get_auth_value(request_headers)
signature_version, auth_match = self._get_auth_match(auth_val)
if signature_version != 'v4':
return None
return auth_match.group('signing_region')
def _get_user_agent(self, request_headers):
return self._truncate(
ensure_unicode(request_headers.get('User-Agent', '')),
self._MAX_USER_AGENT_LENGTH
)
def _is_signed(self, request_headers):
return 'Authorization' in request_headers
def _get_auth_value(self, request_headers):
return ensure_unicode(request_headers['Authorization'])
def _get_auth_match(self, auth_val):
for signature_version, regex in self._AUTH_REGEXS.items():
match = regex.match(auth_val)
if match:
return signature_version, match
return None, None
def _truncate(self, text, max_length):
if len(text) > max_length:
logger.debug(
'Truncating following value to maximum length of '
'%s: %s', text, max_length)
return text[:max_length]
return text
class SocketPublisher(object):
_MAX_MONITOR_EVENT_LENGTH = 8 * 1024
def __init__(self, socket, host, port, serializer):
"""Publishes monitor events to a socket
:type socket: socket.socket
:param socket: The socket object to use to publish events
:type host: string
:param host: The host to send events to
:type port: integer
:param port: The port on the host to send events to
:param serializer: The serializer to use to serialize the event
to a form that can be published to the socket. This must
have a `serialize()` method that accepts a monitor event
and return bytes
"""
self._socket = socket
self._address = (host, port)
self._serializer = serializer
def publish(self, event):
"""Publishes a specified monitor event
:type event: BaseMonitorEvent
:param event: The monitor event to be sent
over the publisher's socket to the desired address.
"""
serialized_event = self._serializer.serialize(event)
if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH:
logger.debug(
'Serialized event of size %s exceeds the maximum length '
'allowed: %s. Not sending event to socket.',
len(serialized_event), self._MAX_MONITOR_EVENT_LENGTH
)
return
self._socket.sendto(serialized_event, self._address)
| 20,586 | Python | 36.362976 | 79 | 0.612601 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/client.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import functools
from botocore import waiter, xform_name
from botocore.args import ClientArgsCreator
from botocore.auth import AUTH_TYPE_MAPS
from botocore.awsrequest import prepare_request_dict
from botocore.docs.docstring import ClientMethodDocstring
from botocore.docs.docstring import PaginatorDocstring
from botocore.exceptions import (
ClientError, DataNotFoundError, OperationNotPageableError,
UnknownSignatureVersionError, InvalidEndpointDiscoveryConfigurationError
)
from botocore.hooks import first_non_none_response
from botocore.model import ServiceModel
from botocore.paginate import Paginator
from botocore.utils import (
CachedProperty, get_service_module_name, S3RegionRedirector,
S3ArnParamHandler, S3EndpointSetter, ensure_boolean,
S3ControlArnParamHandler, S3ControlEndpointSetter,
)
from botocore.args import ClientArgsCreator
from botocore import UNSIGNED
# Keep this imported. There's pre-existing code that uses
# "from botocore.client import Config".
from botocore.config import Config
from botocore.history import get_global_history_recorder
from botocore.discovery import (
EndpointDiscoveryHandler, EndpointDiscoveryManager,
block_endpoint_discovery_required_operations
)
from botocore.retries import standard
from botocore.retries import adaptive
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
class ClientCreator(object):
"""Creates client objects for a service."""
def __init__(self, loader, endpoint_resolver, user_agent, event_emitter,
retry_handler_factory, retry_config_translator,
response_parser_factory=None, exceptions_factory=None,
config_store=None):
self._loader = loader
self._endpoint_resolver = endpoint_resolver
self._user_agent = user_agent
self._event_emitter = event_emitter
self._retry_handler_factory = retry_handler_factory
self._retry_config_translator = retry_config_translator
self._response_parser_factory = response_parser_factory
self._exceptions_factory = exceptions_factory
# TODO: Migrate things away from scoped_config in favor of the
# config_store. The config store can pull things from both the scoped
# config and environment variables (and potentially more in the
# future).
self._config_store = config_store
def create_client(self, service_name, region_name, is_secure=True,
endpoint_url=None, verify=None,
credentials=None, scoped_config=None,
api_version=None,
client_config=None):
responses = self._event_emitter.emit(
'choose-service-name', service_name=service_name)
service_name = first_non_none_response(responses, default=service_name)
service_model = self._load_service_model(service_name, api_version)
cls = self._create_client_class(service_name, service_model)
endpoint_bridge = ClientEndpointBridge(
self._endpoint_resolver, scoped_config, client_config,
service_signing_name=service_model.metadata.get('signingName'))
client_args = self._get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
service_client = cls(**client_args)
self._register_retries(service_client)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_s3_control_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_endpoint_discovery(
service_client, endpoint_url, client_config
)
return service_client
def create_client_class(self, service_name, api_version=None):
service_model = self._load_service_model(service_name, api_version)
return self._create_client_class(service_name, service_model)
def _create_client_class(self, service_name, service_model):
class_attributes = self._create_methods(service_model)
py_name_to_operation_name = self._create_name_mapping(service_model)
class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name
bases = [BaseClient]
service_id = service_model.service_id.hyphenize()
self._event_emitter.emit(
'creating-client-class.%s' % service_id,
class_attributes=class_attributes,
base_classes=bases)
class_name = get_service_module_name(service_model)
cls = type(str(class_name), tuple(bases), class_attributes)
return cls
def _load_service_model(self, service_name, api_version=None):
json_model = self._loader.load_service_model(service_name, 'service-2',
api_version=api_version)
service_model = ServiceModel(json_model, service_name=service_name)
return service_model
def _register_retries(self, client):
retry_mode = client.meta.config.retries['mode']
if retry_mode == 'standard':
self._register_v2_standard_retries(client)
elif retry_mode == 'adaptive':
self._register_v2_standard_retries(client)
self._register_v2_adaptive_retries(client)
elif retry_mode == 'legacy':
self._register_legacy_retries(client)
def _register_v2_standard_retries(self, client):
max_attempts = client.meta.config.retries.get('total_max_attempts')
kwargs = {'client': client}
if max_attempts is not None:
kwargs['max_attempts'] = max_attempts
standard.register_retry_handler(**kwargs)
def _register_v2_adaptive_retries(self, client):
adaptive.register_retry_handler(client)
def _register_legacy_retries(self, client):
endpoint_prefix = client.meta.service_model.endpoint_prefix
service_id = client.meta.service_model.service_id
service_event_name = service_id.hyphenize()
# First, we load the entire retry config for all services,
# then pull out just the information we need.
original_config = self._loader.load_data('_retry')
if not original_config:
return
retries = self._transform_legacy_retries(client.meta.config.retries)
retry_config = self._retry_config_translator.build_retry_config(
endpoint_prefix, original_config.get('retry', {}),
original_config.get('definitions', {}),
retries
)
logger.debug("Registering retry handlers for service: %s",
client.meta.service_model.service_name)
handler = self._retry_handler_factory.create_retry_handler(
retry_config, endpoint_prefix)
unique_id = 'retry-config-%s' % service_event_name
client.meta.events.register(
'needs-retry.%s' % service_event_name, handler,
unique_id=unique_id
)
def _transform_legacy_retries(self, retries):
if retries is None:
return
copied_args = retries.copy()
if 'total_max_attempts' in retries:
copied_args = retries.copy()
copied_args['max_attempts'] = (
copied_args.pop('total_max_attempts') - 1)
return copied_args
def _get_retry_mode(self, client, config_store):
client_retries = client.meta.config.retries
if client_retries is not None and \
client_retries.get('mode') is not None:
return client_retries['mode']
return config_store.get_config_variable('retry_mode') or 'legacy'
def _register_endpoint_discovery(self, client, endpoint_url, config):
if endpoint_url is not None:
# Don't register any handlers in the case of a custom endpoint url
return
# Only attach handlers if the service supports discovery
if client.meta.service_model.endpoint_discovery_operation is None:
return
events = client.meta.events
service_id = client.meta.service_model.service_id.hyphenize()
enabled = False
if config and config.endpoint_discovery_enabled is not None:
enabled = config.endpoint_discovery_enabled
elif self._config_store:
enabled = self._config_store.get_config_variable(
'endpoint_discovery_enabled')
enabled = self._normalize_endpoint_discovery_config(enabled)
if enabled and self._requires_endpoint_discovery(client, enabled):
discover = enabled is True
manager = EndpointDiscoveryManager(client, always_discover=discover)
handler = EndpointDiscoveryHandler(manager)
handler.register(events, service_id)
else:
events.register('before-parameter-build',
block_endpoint_discovery_required_operations)
def _normalize_endpoint_discovery_config(self, enabled):
"""Config must either be a boolean-string or string-literal 'auto'"""
if isinstance(enabled, str):
enabled = enabled.lower().strip()
if enabled == 'auto':
return enabled
elif enabled in ('true', 'false'):
return ensure_boolean(enabled)
elif isinstance(enabled, bool):
return enabled
raise InvalidEndpointDiscoveryConfigurationError(config_value=enabled)
def _requires_endpoint_discovery(self, client, enabled):
if enabled == "auto":
return client.meta.service_model.endpoint_discovery_required
return enabled
def _register_s3_events(self, client, endpoint_bridge, endpoint_url,
client_config, scoped_config):
if client.meta.service_model.service_name != 's3':
return
S3RegionRedirector(endpoint_bridge, client).register()
S3ArnParamHandler().register(client.meta.events)
S3EndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
self._set_s3_presign_signature_version(
client.meta, client_config, scoped_config)
def _register_s3_control_events(
self, client, endpoint_bridge,
endpoint_url, client_config, scoped_config
):
if client.meta.service_model.service_name != 's3control':
return
S3ControlArnParamHandler().register(client.meta.events)
S3ControlEndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
def _set_s3_presign_signature_version(self, client_meta,
client_config, scoped_config):
# This will return the manually configured signature version, or None
# if none was manually set. If a customer manually sets the signature
# version, we always want to use what they set.
provided_signature_version = _get_configured_signature_version(
's3', client_config, scoped_config)
if provided_signature_version is not None:
return
# Check to see if the region is a region that we know about. If we
# don't know about a region, then we can safely assume it's a new
# region that is sigv4 only, since all new S3 regions only allow sigv4.
# The only exception is aws-global. This is a pseudo-region for the
# global endpoint, we should respect the signature versions it
# supports, which includes v2.
regions = self._endpoint_resolver.get_available_endpoints(
's3', client_meta.partition)
if client_meta.region_name != 'aws-global' and \
client_meta.region_name not in regions:
return
# If it is a region we know about, we want to default to sigv2, so here
# we check to see if it is available.
endpoint = self._endpoint_resolver.construct_endpoint(
's3', client_meta.region_name)
signature_versions = endpoint['signatureVersions']
if 's3' not in signature_versions:
return
# We now know that we're in a known region that supports sigv2 and
# the customer hasn't set a signature version so we default the
# signature version to sigv2.
client_meta.events.register(
'choose-signer.s3', self._default_s3_presign_to_sigv2)
def _default_s3_presign_to_sigv2(self, signature_version, **kwargs):
"""
Returns the 's3' (sigv2) signer if presigning an s3 request. This is
intended to be used to set the default signature version for the signer
to sigv2.
:type signature_version: str
:param signature_version: The current client signature version.
:type signing_name: str
:param signing_name: The signing name of the service.
:return: 's3' if the request is an s3 presign request, None otherwise
"""
for suffix in ['-query', '-presign-post']:
if signature_version.endswith(suffix):
return 's3' + suffix
def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials,
scoped_config, client_config, endpoint_bridge):
args_creator = ClientArgsCreator(
self._event_emitter, self._user_agent,
self._response_parser_factory, self._loader,
self._exceptions_factory, config_store=self._config_store)
return args_creator.get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
def _create_methods(self, service_model):
op_dict = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
op_dict[py_operation_name] = self._create_api_method(
py_operation_name, operation_name, service_model)
return op_dict
def _create_name_mapping(self, service_model):
# py_name -> OperationName, for every operation available
# for a service.
mapping = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
mapping[py_operation_name] = operation_name
return mapping
def _create_api_method(self, py_operation_name, operation_name,
service_model):
def _api_call(self, *args, **kwargs):
# We're accepting *args so that we can give a more helpful
# error message than TypeError: _api_call takes exactly
# 1 argument.
if args:
raise TypeError(
"%s() only accepts keyword arguments." % py_operation_name)
# The "self" in this scope is referring to the BaseClient.
return self._make_api_call(operation_name, kwargs)
_api_call.__name__ = str(py_operation_name)
# Add the docstring to the client method
operation_model = service_model.operation_model(operation_name)
docstring = ClientMethodDocstring(
operation_model=operation_model,
method_name=operation_name,
event_emitter=self._event_emitter,
method_description=operation_model.documentation,
example_prefix='response = client.%s' % py_operation_name,
include_signature=False
)
_api_call.__doc__ = docstring
return _api_call
class ClientEndpointBridge(object):
"""Bridges endpoint data and client creation
This class handles taking out the relevant arguments from the endpoint
resolver and determining which values to use, taking into account any
client configuration options and scope configuration options.
This class also handles determining what, if any, region to use if no
explicit region setting is provided. For example, Amazon S3 client will
utilize "us-east-1" by default if no region can be resolved."""
DEFAULT_ENDPOINT = '{service}.{region}.amazonaws.com'
_DUALSTACK_ENABLED_SERVICES = ['s3', 's3-control']
def __init__(self, endpoint_resolver, scoped_config=None,
client_config=None, default_endpoint=None,
service_signing_name=None):
self.service_signing_name = service_signing_name
self.endpoint_resolver = endpoint_resolver
self.scoped_config = scoped_config
self.client_config = client_config
self.default_endpoint = default_endpoint or self.DEFAULT_ENDPOINT
def resolve(self, service_name, region_name=None, endpoint_url=None,
is_secure=True):
region_name = self._check_default_region(service_name, region_name)
resolved = self.endpoint_resolver.construct_endpoint(
service_name, region_name)
# If we can't resolve the region, we'll attempt to get a global
# endpoint for non-regionalized services (iam, route53, etc)
if not resolved:
# TODO: fallback partition_name should be configurable in the
# future for users to define as needed.
resolved = self.endpoint_resolver.construct_endpoint(
service_name, region_name, partition_name='aws')
if resolved:
return self._create_endpoint(
resolved, service_name, region_name, endpoint_url, is_secure)
else:
return self._assume_endpoint(service_name, region_name,
endpoint_url, is_secure)
def _check_default_region(self, service_name, region_name):
if region_name is not None:
return region_name
# Use the client_config region if no explicit region was provided.
if self.client_config and self.client_config.region_name is not None:
return self.client_config.region_name
def _create_endpoint(self, resolved, service_name, region_name,
endpoint_url, is_secure):
explicit_region = region_name is not None
region_name, signing_region = self._pick_region_values(
resolved, region_name, endpoint_url)
if endpoint_url is None:
if self._is_s3_dualstack_mode(service_name):
endpoint_url = self._create_dualstack_endpoint(
service_name, region_name,
resolved['dnsSuffix'], is_secure, explicit_region)
else:
# Use the sslCommonName over the hostname for Python 2.6 compat.
hostname = resolved.get('sslCommonName', resolved.get('hostname'))
endpoint_url = self._make_url(hostname, is_secure,
resolved.get('protocols', []))
signature_version = self._resolve_signature_version(
service_name, resolved)
signing_name = self._resolve_signing_name(service_name, resolved)
return self._create_result(
service_name=service_name, region_name=region_name,
signing_region=signing_region, signing_name=signing_name,
endpoint_url=endpoint_url, metadata=resolved,
signature_version=signature_version)
def _is_s3_dualstack_mode(self, service_name):
if service_name not in self._DUALSTACK_ENABLED_SERVICES:
return False
# TODO: This normalization logic is duplicated from the
# ClientArgsCreator class. Consolidate everything to
# ClientArgsCreator. _resolve_signature_version also has similarly
# duplicated logic.
client_config = self.client_config
if client_config is not None and client_config.s3 is not None and \
'use_dualstack_endpoint' in client_config.s3:
# Client config trumps scoped config.
return client_config.s3['use_dualstack_endpoint']
if self.scoped_config is None:
return False
enabled = self.scoped_config.get('s3', {}).get(
'use_dualstack_endpoint', False)
if enabled in [True, 'True', 'true']:
return True
return False
def _create_dualstack_endpoint(self, service_name, region_name,
dns_suffix, is_secure, explicit_region):
if not explicit_region and region_name == 'aws-global':
# If the region_name passed was not explicitly set, default to
# us-east-1 instead of the modeled default aws-global. Dualstack
# does not support aws-global
region_name = 'us-east-1'
hostname = '{service}.dualstack.{region}.{dns_suffix}'.format(
service=service_name, region=region_name,
dns_suffix=dns_suffix)
# Dualstack supports http and https so were hardcoding this value for
# now. This can potentially move into the endpoints.json file.
return self._make_url(hostname, is_secure, ['http', 'https'])
def _assume_endpoint(self, service_name, region_name, endpoint_url,
is_secure):
if endpoint_url is None:
# Expand the default hostname URI template.
hostname = self.default_endpoint.format(
service=service_name, region=region_name)
endpoint_url = self._make_url(hostname, is_secure,
['http', 'https'])
logger.debug('Assuming an endpoint for %s, %s: %s',
service_name, region_name, endpoint_url)
# We still want to allow the user to provide an explicit version.
signature_version = self._resolve_signature_version(
service_name, {'signatureVersions': ['v4']})
signing_name = self._resolve_signing_name(service_name, resolved={})
return self._create_result(
service_name=service_name, region_name=region_name,
signing_region=region_name, signing_name=signing_name,
signature_version=signature_version, endpoint_url=endpoint_url,
metadata={})
def _create_result(self, service_name, region_name, signing_region,
signing_name, endpoint_url, signature_version,
metadata):
return {
'service_name': service_name,
'region_name': region_name,
'signing_region': signing_region,
'signing_name': signing_name,
'endpoint_url': endpoint_url,
'signature_version': signature_version,
'metadata': metadata
}
def _make_url(self, hostname, is_secure, supported_protocols):
if is_secure and 'https' in supported_protocols:
scheme = 'https'
else:
scheme = 'http'
return '%s://%s' % (scheme, hostname)
def _resolve_signing_name(self, service_name, resolved):
# CredentialScope overrides everything else.
if 'credentialScope' in resolved \
and 'service' in resolved['credentialScope']:
return resolved['credentialScope']['service']
# Use the signingName from the model if present.
if self.service_signing_name:
return self.service_signing_name
# Just assume is the same as the service name.
return service_name
def _pick_region_values(self, resolved, region_name, endpoint_url):
signing_region = region_name
if endpoint_url is None:
# Do not use the region name or signing name from the resolved
# endpoint if the user explicitly provides an endpoint_url. This
# would happen if we resolve to an endpoint where the service has
# a "defaults" section that overrides all endpoint with a single
# hostname and credentialScope. This has been the case historically
# for how STS has worked. The only way to resolve an STS endpoint
# was to provide a region_name and an endpoint_url. In that case,
# we would still resolve an endpoint, but we would not use the
# resolved endpointName or signingRegion because we want to allow
# custom endpoints.
region_name = resolved['endpointName']
signing_region = region_name
if 'credentialScope' in resolved \
and 'region' in resolved['credentialScope']:
signing_region = resolved['credentialScope']['region']
return region_name, signing_region
def _resolve_signature_version(self, service_name, resolved):
configured_version = _get_configured_signature_version(
service_name, self.client_config, self.scoped_config)
if configured_version is not None:
return configured_version
# Pick a signature version from the endpoint metadata if present.
if 'signatureVersions' in resolved:
potential_versions = resolved['signatureVersions']
if service_name == 's3':
return 's3v4'
if 'v4' in potential_versions:
return 'v4'
# Now just iterate over the signature versions in order until we
# find the first one that is known to Botocore.
for known in potential_versions:
if known in AUTH_TYPE_MAPS:
return known
raise UnknownSignatureVersionError(
signature_version=resolved.get('signatureVersions'))
class BaseClient(object):
# This is actually reassigned with the py->op_name mapping
# when the client creator creates the subclass. This value is used
# because calls such as client.get_paginator('list_objects') use the
# snake_case name, but we need to know the ListObjects form.
# xform_name() does the ListObjects->list_objects conversion, but
# we need the reverse mapping here.
_PY_TO_OP_NAME = {}
def __init__(self, serializer, endpoint, response_parser,
event_emitter, request_signer, service_model, loader,
client_config, partition, exceptions_factory):
self._serializer = serializer
self._endpoint = endpoint
self._response_parser = response_parser
self._request_signer = request_signer
self._cache = {}
self._loader = loader
self._client_config = client_config
self.meta = ClientMeta(event_emitter, self._client_config,
endpoint.host, service_model,
self._PY_TO_OP_NAME, partition)
self._exceptions_factory = exceptions_factory
self._exceptions = None
self._register_handlers()
def __getattr__(self, item):
event_name = 'getattr.%s.%s' % (
self._service_model.service_id.hyphenize(), item
)
handler, event_response = self.meta.events.emit_until_response(
event_name, client=self)
if event_response is not None:
return event_response
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, item)
)
def _register_handlers(self):
# Register the handler required to sign requests.
service_id = self.meta.service_model.service_id.hyphenize()
self.meta.events.register(
'request-created.%s' % service_id,
self._request_signer.handler
)
@property
def _service_model(self):
return self.meta.service_model
def _make_api_call(self, operation_name, api_params):
operation_model = self._service_model.operation_model(operation_name)
service_name = self._service_model.service_name
history_recorder.record('API_CALL', {
'service': service_name,
'operation': operation_name,
'params': api_params,
})
if operation_model.deprecated:
logger.debug('Warning: %s.%s() is deprecated',
service_name, operation_name)
request_context = {
'client_region': self.meta.region_name,
'client_config': self.meta.config,
'has_streaming_input': operation_model.has_streaming_input,
'auth_type': operation_model.auth_type,
}
request_dict = self._convert_to_request_dict(
api_params, operation_model, context=request_context)
service_id = self._service_model.service_id.hyphenize()
handler, event_response = self.meta.events.emit_until_response(
'before-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
model=operation_model, params=request_dict,
request_signer=self._request_signer, context=request_context)
if event_response is not None:
http, parsed_response = event_response
else:
http, parsed_response = self._make_request(
operation_model, request_dict, request_context)
self.meta.events.emit(
'after-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
http_response=http, parsed=parsed_response,
model=operation_model, context=request_context
)
if http.status_code >= 300:
error_code = parsed_response.get("Error", {}).get("Code")
error_class = self.exceptions.from_code(error_code)
raise error_class(parsed_response, operation_name)
else:
return parsed_response
def _make_request(self, operation_model, request_dict, request_context):
try:
return self._endpoint.make_request(operation_model, request_dict)
except Exception as e:
self.meta.events.emit(
'after-call-error.{service_id}.{operation_name}'.format(
service_id=self._service_model.service_id.hyphenize(),
operation_name=operation_model.name),
exception=e, context=request_context
)
raise
def _convert_to_request_dict(self, api_params, operation_model,
context=None):
api_params = self._emit_api_params(
api_params, operation_model, context)
request_dict = self._serializer.serialize_to_request(
api_params, operation_model)
if not self._client_config.inject_host_prefix:
request_dict.pop('host_prefix', None)
prepare_request_dict(request_dict, endpoint_url=self._endpoint.host,
user_agent=self._client_config.user_agent,
context=context)
return request_dict
def _emit_api_params(self, api_params, operation_model, context):
# Given the API params provided by the user and the operation_model
# we can serialize the request to a request_dict.
operation_name = operation_model.name
# Emit an event that allows users to modify the parameters at the
# beginning of the method. It allows handlers to modify existing
# parameters or return a new set of parameters to use.
service_id = self._service_model.service_id.hyphenize()
responses = self.meta.events.emit(
'provide-client-params.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
api_params = first_non_none_response(responses, default=api_params)
event_name = (
'before-parameter-build.{service_id}.{operation_name}')
self.meta.events.emit(
event_name.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
return api_params
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Add the docstring for the paginate method.
paginate.__doc__ = PaginatorDocstring(
paginator_name=actual_operation_name,
event_emitter=self.meta.events,
service_model=self.meta.service_model,
paginator_config=paginator_config,
include_signature=False
)
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {'paginate': paginate})
operation_model = self._service_model.operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator
def can_paginate(self, operation_name):
"""Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
if 'page_config' not in self._cache:
try:
page_config = self._loader.load_service_model(
self._service_model.service_name,
'paginators-1',
self._service_model.api_version)['pagination']
self._cache['page_config'] = page_config
except DataNotFoundError:
self._cache['page_config'] = {}
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
return actual_operation_name in self._cache['page_config']
def _get_waiter_config(self):
if 'waiter_config' not in self._cache:
try:
waiter_config = self._loader.load_service_model(
self._service_model.service_name,
'waiters-2',
self._service_model.api_version)
self._cache['waiter_config'] = waiter_config
except DataNotFoundError:
self._cache['waiter_config'] = {}
return self._cache['waiter_config']
def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self)
@CachedProperty
def waiter_names(self):
"""Returns a list of all available waiters."""
config = self._get_waiter_config()
if not config:
return []
model = waiter.WaiterModel(config)
# Waiter configs is a dict, we just want the waiter names
# which are the keys in the dict.
return [xform_name(name) for name in model.waiter_names]
@property
def exceptions(self):
if self._exceptions is None:
self._exceptions = self._load_exceptions()
return self._exceptions
def _load_exceptions(self):
return self._exceptions_factory.create_client_exceptions(
self._service_model)
class ClientMeta(object):
"""Holds additional client methods.
This class holds additional information for clients. It exists for
two reasons:
* To give advanced functionality to clients
* To namespace additional client attributes from the operation
names which are mapped to methods at runtime. This avoids
ever running into collisions with operation names.
"""
def __init__(self, events, client_config, endpoint_url, service_model,
method_to_api_mapping, partition):
self.events = events
self._client_config = client_config
self._endpoint_url = endpoint_url
self._service_model = service_model
self._method_to_api_mapping = method_to_api_mapping
self._partition = partition
@property
def service_model(self):
return self._service_model
@property
def region_name(self):
return self._client_config.region_name
@property
def endpoint_url(self):
return self._endpoint_url
@property
def config(self):
return self._client_config
@property
def method_to_api_mapping(self):
return self._method_to_api_mapping
@property
def partition(self):
return self._partition
def _get_configured_signature_version(service_name, client_config,
scoped_config):
"""
Gets the manually configured signature version.
:returns: the customer configured signature version, or None if no
signature version was configured.
"""
# Client config overrides everything.
if client_config and client_config.signature_version is not None:
return client_config.signature_version
# Scoped config overrides picking from the endpoint metadata.
if scoped_config is not None:
# A given service may have service specific configuration in the
# config file, so we need to check there as well.
service_config = scoped_config.get(service_name)
if service_config is not None and isinstance(service_config, dict):
version = service_config.get('signature_version')
if version:
logger.debug(
"Switching signature version for service %s "
"to version %s based on config file override.",
service_name, version)
return version
return None
| 41,514 | Python | 42.977754 | 88 | 0.624368 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/session.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
import re
import socket
import warnings
from botocore import __version__
from botocore import UNSIGNED
import botocore.configloader
import botocore.credentials
import botocore.client
from botocore.configprovider import ConfigValueStore
from botocore.configprovider import ConfigChainFactory
from botocore.configprovider import create_botocore_default_config_mapping
from botocore.configprovider import BOTOCORE_DEFAUT_SESSION_VARIABLES
from botocore.exceptions import (
ConfigNotFound, ProfileNotFound, UnknownServiceError,
PartialCredentialsError,
)
from botocore.errorfactory import ClientExceptionsFactory
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.hooks import EventAliaser
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore.regions import EndpointResolver
from botocore.model import ServiceModel
from botocore import monitoring
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
from botocore import utils
from botocore.utils import EVENT_ALIASES, validate_region_name
from botocore.compat import MutableMapping
logger = logging.getLogger(__name__)
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES)
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
if event_hooks is None:
self._original_handler = HierarchicalEmitter()
else:
self._original_handler = event_hooks
self._events = EventAliaser(self._original_handler)
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._last_client_region_used = None
self._components = ComponentLocator()
self._internal_components = ComponentLocator()
self._register_components()
self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES)
if session_vars is not None:
self.session_var_map.update(session_vars)
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
self._register_exceptions_factory()
self._register_config_store()
self._register_monitor()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider', self._create_credential_resolver)
def _create_credential_resolver(self):
return botocore.credentials.create_credential_resolver(
self, region_name=self._last_client_region_used
)
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
def create_default_resolver():
loader = self.get_component('data_loader')
endpoints = loader.load_data('endpoints')
return EndpointResolver(endpoints)
self._internal_components.lazy_register_component(
'endpoint_resolver', create_default_resolver)
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_exceptions_factory(self):
self._internal_components.register_component(
'exceptions_factory', ClientExceptionsFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
def _register_config_store(self):
config_store_component = ConfigValueStore(
mapping=create_botocore_default_config_mapping(self)
)
self._components.register_component('config_store',
config_store_component)
def _register_monitor(self):
self._internal_components.lazy_register_component(
'monitor', self._create_csm_monitor)
def _create_csm_monitor(self):
if self.get_config_variable('csm_enabled'):
client_id = self.get_config_variable('csm_client_id')
host = self.get_config_variable('csm_host')
port = self.get_config_variable('csm_port')
handler = monitoring.Monitor(
adapter=monitoring.MonitorEventAdapter(),
publisher=monitoring.SocketPublisher(
socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
host=host,
port=port,
serializer=monitoring.CSMSerializer(
csm_client_id=client_id)
)
)
return handler
return None
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name, methods=None):
if methods is not None:
return self._get_config_variable_with_custom_methods(
logical_name, methods)
return self.get_component('config_store').get_config_variable(
logical_name)
def _get_config_variable_with_custom_methods(self, logical_name, methods):
# If a custom list of methods was supplied we need to perserve the
# behavior with the new system. To do so a new chain that is a copy of
# the old one will be constructed, but only with the supplied methods
# being added to the chain. This chain will be consulted for a value
# and then thrown out. This is not efficient, nor is the methods arg
# used in botocore, this is just for backwards compatibility.
chain_builder = SubsetChainConfigFactory(session=self, methods=methods)
mapping = create_botocore_default_config_mapping(self)
for name, config_options in self.session_var_map.items():
config_name, env_vars, default, typecast = config_options
build_chain_config_args = {
'conversion_func': typecast,
'default': default,
}
if 'instance' in methods:
build_chain_config_args['instance_name'] = name
if 'env' in methods:
build_chain_config_args['env_var_names'] = env_vars
if 'config' in methods:
build_chain_config_args['config_property_name'] = config_name
mapping[name] = chain_builder.create_config_chain(
**build_chain_config_args
)
config_store_component = ConfigValueStore(
mapping=mapping
)
value = config_store_component.get_config_variable(logical_name)
return value
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
logger.debug(
"Setting config variable for %s to %r",
logical_name,
value,
)
self._session_instance_vars[logical_name] = value
def instance_variables(self):
return copy.copy(self._session_instance_vars)
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.configloader.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.configloader.raw_config_parse(
cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver> <exec_env>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Botocore` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
- exec_env is exec-env/$AWS_EXECUTION_ENV
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if os.environ.get('AWS_EXECUTION_ENV') is not None:
base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV')
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
service_id = EVENT_ALIASES.get(service_name, service_name)
self._events.emit('service-data-loaded.%s' % service_id,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
try:
return self._components.get_component(name)
except ValueError:
if name in ['endpoint_resolver', 'exceptions_factory']:
warnings.warn(
'Fetching the %s component with the get_component() '
'method is deprecated as the component has always been '
'considered an internal interface of botocore' % name,
DeprecationWarning)
return self._internal_components.get_component(name)
raise
def _get_internal_component(self, name):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.get_component(name)
def _register_internal_component(self, name, component):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.register_component(name, component)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
region_name = self._resolve_region_name(region_name, config)
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
if api_version is None:
api_version = self.get_config_variable('api_versions').get(
service_name, None)
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if config is not None and config.signature_version is UNSIGNED:
credentials = None
elif aws_access_key_id is not None and aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
elif self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key):
raise PartialCredentialsError(
provider='explicit',
cred_var=self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key))
else:
credentials = self.get_credentials()
endpoint_resolver = self._get_internal_component('endpoint_resolver')
exceptions_factory = self._get_internal_component('exceptions_factory')
config_store = self.get_component('config_store')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory,
exceptions_factory, config_store)
client = client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
monitor = self._get_internal_component('monitor')
if monitor is not None:
monitor.register(client.meta.events)
return client
def _resolve_region_name(self, region_name, config):
# Figure out the user-provided region based on the various
# configuration options.
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
validate_region_name(region_name)
# For any client that we create in retrieving credentials
# we want to create it using the same region as specified in
# creating this client. It is important to note though that the
# credentials client is only created once per session. So if a new
# client is created with a different region, its credential resolver
# will use the region of the first client. However, that is not an
# issue as of now because the credential resolver uses only STS and
# the credentials returned at regional endpoints are valid across
# all regions in the partition.
self._last_client_region_used = region_name
return region_name
def _missing_cred_vars(self, access_key, secret_key):
if access_key is not None and secret_key is None:
return 'aws_secret_access_key'
if secret_key is not None and access_key is None:
return 'aws_access_key_id'
return None
def get_available_partitions(self):
"""Lists the available partitions found on disk
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
resolver = self._get_internal_component('endpoint_resolver')
return resolver.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
This parameter accepts a service name (e.g., "elb") or endpoint
prefix (e.g., "elasticloadbalancing").
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
resolver = self._get_internal_component('endpoint_resolver')
results = []
try:
service_data = self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
class SessionVarDict(MutableMapping):
def __init__(self, session, session_vars):
self._session = session
self._store = copy.copy(session_vars)
def __getitem__(self, key):
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
self._update_config_store_from_session_vars(key, value)
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def _update_config_store_from_session_vars(self, logical_name,
config_options):
# This is for backwards compatibility. The new preferred way to
# modify configuration logic is to use the component system to get
# the config_store component from the session, and then update
# a key with a custom config provider(s).
# This backwards compatibility method takes the old session_vars
# list of tuples and and transforms that into a set of updates to
# the config_store component.
config_chain_builder = ConfigChainFactory(session=self._session)
config_name, env_vars, default, typecast = config_options
config_store = self._session.get_component('config_store')
config_store.set_config_provider(
logical_name,
config_chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=env_vars,
config_property_names=config_name,
default=default,
conversion_func=typecast,
)
)
class SubsetChainConfigFactory(object):
"""A class for creating backwards compatible configuration chains.
This class can be used instead of
:class:`botocore.configprovider.ConfigChainFactory` to make it honor the
methods argument to get_config_variable. This class can be used to filter
out providers that are not in the methods tuple when creating a new config
chain.
"""
def __init__(self, session, methods, environ=None):
self._factory = ConfigChainFactory(session, environ)
self._supported_methods = methods
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_name=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
This config chain factory will omit any providers not in the methods
tuple provided at initialization. For example if given the tuple
('instance', 'config',) it will not inject the environment provider
into the standard config chain. This lets the botocore session support
the custom ``methods`` argument for all the default botocore config
variables when calling ``get_config_variable``.
"""
if 'instance' not in self._supported_methods:
instance_name = None
if 'env' not in self._supported_methods:
env_var_names = None
if 'config' not in self._supported_methods:
config_property_name = None
return self._factory.create_config_chain(
instance_name=instance_name,
env_var_names=env_var_names,
config_property_names=config_property_name,
default=default,
conversion_func=conversion_func,
)
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
| 43,433 | Python | 40.803657 | 86 | 0.623627 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/compat.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import datetime
import sys
import inspect
import warnings
import hashlib
import logging
import shlex
from math import floor
from botocore.vendored import six
from botocore.exceptions import MD5UnavailableError
from dateutil.tz import tzlocal
from urllib3 import exceptions
logger = logging.getLogger(__name__)
if six.PY3:
from botocore.vendored.six.moves import http_client
class HTTPHeaders(http_client.HTTPMessage):
pass
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import unquote_plus
from urllib.parse import urlparse
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from urllib.parse import urljoin
from urllib.parse import parse_qsl
from urllib.parse import parse_qs
from http.client import HTTPResponse
from io import IOBase as _IOBase
from base64 import encodebytes
from email.utils import formatdate
from itertools import zip_longest
file_type = _IOBase
zip = zip
# In python3, unquote takes a str() object, url decodes it,
# then takes the bytestring and decodes it to utf-8.
# Python2 we'll have to do this ourself (see below).
unquote_str = unquote_plus
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp.raw._sock.settimeout(timeout)
def accepts_kwargs(func):
# In python3.4.1, there's backwards incompatible
# changes when using getargspec with functools.partials.
return inspect.getfullargspec(func)[2]
def ensure_unicode(s, encoding=None, errors=None):
# NOOP in Python 3, because every string is already unicode
return s
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise ValueError("Expected str or bytes, received %s." % type(s))
else:
from urllib import quote
from urllib import urlencode
from urllib import unquote
from urllib import unquote_plus
from urlparse import urlparse
from urlparse import urlsplit
from urlparse import urlunsplit
from urlparse import urljoin
from urlparse import parse_qsl
from urlparse import parse_qs
from email.message import Message
from email.Utils import formatdate
file_type = file
from itertools import izip as zip
from itertools import izip_longest as zip_longest
from httplib import HTTPResponse
from base64 import encodestring as encodebytes
class HTTPHeaders(Message):
# The __iter__ method is not available in python2.x, so we have
# to port the py3 version.
def __iter__(self):
for field, value in self._headers:
yield field
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
# bits, but not the unicode parts. We need to decode this manually.
# unquote has special logic in which if it receives a unicode object it
# will decode it to latin1. This is hard coded. To avoid this, we'll
# encode the string with the passed in encoding before trying to
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp._sock.settimeout(timeout)
def accepts_kwargs(func):
return inspect.getargspec(func)[2]
def ensure_unicode(s, encoding='utf-8', errors='strict'):
if isinstance(s, six.text_type):
return s
return unicode(s, encoding, errors)
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
if isinstance(s, str):
return s
raise ValueError("Expected str or unicode, received %s." % type(s))
from collections import OrderedDict
try:
import xml.etree.cElementTree as ETree
except ImportError:
# cElementTree does not exist from Python3.9+
import xml.etree.ElementTree as ETree
XMLParseError = ETree.ParseError
import json
def filter_ssl_warnings():
# Ignore warnings related to SNI as it is not being used in validations.
warnings.filterwarnings(
'ignore',
message="A true SSLContext object is not available.*",
category=exceptions.InsecurePlatformWarning,
module=r".*urllib3\.util\.ssl_")
@classmethod
def from_dict(cls, d):
new_instance = cls()
for key, value in d.items():
new_instance[key] = value
return new_instance
@classmethod
def from_pairs(cls, pairs):
new_instance = cls()
for key, value in pairs:
new_instance[key] = value
return new_instance
HTTPHeaders.from_dict = from_dict
HTTPHeaders.from_pairs = from_pairs
def copy_kwargs(kwargs):
"""
This used to be a compat shim for 2.6 but is now just an alias.
"""
copy_kwargs = copy.copy(kwargs)
return copy_kwargs
def total_seconds(delta):
"""
Returns the total seconds in a ``datetime.timedelta``.
This used to be a compat shim for 2.6 but is now just an alias.
:param delta: The timedelta object
:type delta: ``datetime.timedelta``
"""
return delta.total_seconds()
# Checks to see if md5 is available on this system. A given system might not
# have access to it for various reasons, such as FIPS mode being enabled.
try:
hashlib.md5()
MD5_AVAILABLE = True
except ValueError:
MD5_AVAILABLE = False
def get_md5(*args, **kwargs):
"""
Attempts to get an md5 hashing object.
:param raise_error_if_unavailable: raise an error if md5 is unavailable on
this system. If False, None will be returned if it is unavailable.
:type raise_error_if_unavailable: bool
:param args: Args to pass to the MD5 constructor
:param kwargs: Key word arguments to pass to the MD5 constructor
:return: An MD5 hashing object if available. If it is unavailable, None
is returned if raise_error_if_unavailable is set to False.
"""
if MD5_AVAILABLE:
return hashlib.md5(*args, **kwargs)
else:
raise MD5UnavailableError()
def compat_shell_split(s, platform=None):
if platform is None:
platform = sys.platform
if platform == "win32":
return _windows_shell_split(s)
else:
return shlex.split(s)
def _windows_shell_split(s):
"""Splits up a windows command as the built-in command parser would.
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime (which is what python does
when you call popen) the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: The command string to split up into parts.
:return: A list of command components.
"""
if not s:
return []
components = []
buff = []
is_quoted = False
num_backslashes = 0
for character in s:
if character == '\\':
# We can't simply append backslashes because we don't know if
# they are being used as escape characters or not. Instead we
# keep track of how many we've encountered and handle them when
# we encounter a different character.
num_backslashes += 1
elif character == '"':
if num_backslashes > 0:
# The backslashes are in a chain leading up to a double
# quote, so they are escaping each other.
buff.append('\\' * int(floor(num_backslashes / 2)))
remainder = num_backslashes % 2
num_backslashes = 0
if remainder == 1:
# The number of backslashes is uneven, so they are also
# escaping the double quote, so it needs to be added to
# the current component buffer.
buff.append('"')
continue
# We've encountered a double quote that is not escaped,
# so we toggle is_quoted.
is_quoted = not is_quoted
# If there are quotes, then we may want an empty string. To be
# safe, we add an empty string to the buffer so that we make
# sure it sticks around if there's nothing else between quotes.
# If there is other stuff between quotes, the empty string will
# disappear during the joining process.
buff.append('')
elif character in [' ', '\t'] and not is_quoted:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
# Excess whitespace is ignored, so only add the components list
# if there is anything in the buffer.
if buff:
components.append(''.join(buff))
buff = []
else:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
buff.append(character)
# Quotes must be terminated.
if is_quoted:
raise ValueError('No closing quotation in string: %s' % s)
# There may be some leftover backslashes, so we need to add them in.
# There's no quote so we add the exact number.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
# Add the final component in if there is anything in the buffer.
if buff:
components.append(''.join(buff))
return components
def get_tzinfo_options():
# Due to dateutil/dateutil#197, Windows may fail to parse times in the past
# with the system clock. We can alternatively fallback to tzwininfo when
# this happens, which will get time info from the Windows registry.
if sys.platform == 'win32':
from dateutil.tz import tzwinlocal
return (tzlocal, tzwinlocal)
else:
return (tzlocal,)
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
| 11,715 | Python | 32.284091 | 79 | 0.656594 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/regions.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Resolves regions and endpoints.
This module implements endpoint resolution, including resolving endpoints for a
given service and region and resolving the available endpoints for a service
in a specific AWS partition.
"""
import logging
import re
from botocore.exceptions import NoRegionError
LOG = logging.getLogger(__name__)
DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}'
DEFAULT_SERVICE_DATA = {'endpoints': {}}
class BaseEndpointResolver(object):
"""Resolves regions and endpoints. Must be subclassed."""
def construct_endpoint(self, service_name, region_name=None):
"""Resolves an endpoint for a service and region combination.
:type service_name: string
:param service_name: Name of the service to resolve an endpoint for
(e.g., s3)
:type region_name: string
:param region_name: Region/endpoint name to resolve (e.g., us-east-1)
if no region is provided, the first found partition-wide endpoint
will be used if available.
:rtype: dict
:return: Returns a dict containing the following keys:
- partition: (string, required) Resolved partition name
- endpointName: (string, required) Resolved endpoint name
- hostname: (string, required) Hostname to use for this endpoint
- sslCommonName: (string) sslCommonName to use for this endpoint.
- credentialScope: (dict) Signature version 4 credential scope
- region: (string) region name override when signing.
- service: (string) service name override when signing.
- signatureVersions: (list<string>) A list of possible signature
versions, including s3, v4, v2, and s3v4
- protocols: (list<string>) A list of supported protocols
(e.g., http, https)
- ...: Other keys may be included as well based on the metadata
"""
raise NotImplementedError
def get_available_partitions(self):
"""Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]).
"""
raise NotImplementedError
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3)
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
raise NotImplementedError
class EndpointResolver(BaseEndpointResolver):
"""Resolves endpoints based on partition endpoint metadata"""
def __init__(self, endpoint_data):
"""
:param endpoint_data: A dict of partition data.
"""
if 'partitions' not in endpoint_data:
raise ValueError('Missing "partitions" in endpoint data')
self._endpoint_data = endpoint_data
def get_available_partitions(self):
result = []
for partition in self._endpoint_data['partitions']:
result.append(partition['partition'])
return result
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
result = []
for partition in self._endpoint_data['partitions']:
if partition['partition'] != partition_name:
continue
services = partition['services']
if service_name not in services:
continue
for endpoint_name in services[service_name]['endpoints']:
if allow_non_regional or endpoint_name in partition['regions']:
result.append(endpoint_name)
return result
def construct_endpoint(self, service_name, region_name=None, partition_name=None):
if partition_name is not None:
valid_partition = None
for partition in self._endpoint_data['partitions']:
if partition['partition'] == partition_name:
valid_partition = partition
if valid_partition is not None:
result = self._endpoint_for_partition(valid_partition, service_name,
region_name, True)
return result
return None
# Iterate over each partition until a match is found.
for partition in self._endpoint_data['partitions']:
result = self._endpoint_for_partition(
partition, service_name, region_name)
if result:
return result
def _endpoint_for_partition(self, partition, service_name, region_name,
force_partition=False):
# Get the service from the partition, or an empty template.
service_data = partition['services'].get(
service_name, DEFAULT_SERVICE_DATA)
# Use the partition endpoint if no region is supplied.
if region_name is None:
if 'partitionEndpoint' in service_data:
region_name = service_data['partitionEndpoint']
else:
raise NoRegionError()
# Attempt to resolve the exact region for this partition.
if region_name in service_data['endpoints']:
return self._resolve(
partition, service_name, service_data, region_name)
# Check to see if the endpoint provided is valid for the partition.
if self._region_match(partition, region_name) or force_partition:
# Use the partition endpoint if set and not regionalized.
partition_endpoint = service_data.get('partitionEndpoint')
is_regionalized = service_data.get('isRegionalized', True)
if partition_endpoint and not is_regionalized:
LOG.debug('Using partition endpoint for %s, %s: %s',
service_name, region_name, partition_endpoint)
return self._resolve(
partition, service_name, service_data, partition_endpoint)
LOG.debug('Creating a regex based endpoint for %s, %s',
service_name, region_name)
return self._resolve(
partition, service_name, service_data, region_name)
def _region_match(self, partition, region_name):
if region_name in partition['regions']:
return True
if 'regionRegex' in partition:
return re.compile(partition['regionRegex']).match(region_name)
return False
def _resolve(self, partition, service_name, service_data, endpoint_name):
result = service_data['endpoints'].get(endpoint_name, {})
result['partition'] = partition['partition']
result['endpointName'] = endpoint_name
# Merge in the service defaults then the partition defaults.
self._merge_keys(service_data.get('defaults', {}), result)
self._merge_keys(partition.get('defaults', {}), result)
hostname = result.get('hostname', DEFAULT_URI_TEMPLATE)
result['hostname'] = self._expand_template(
partition, result['hostname'], service_name, endpoint_name)
if 'sslCommonName' in result:
result['sslCommonName'] = self._expand_template(
partition, result['sslCommonName'], service_name,
endpoint_name)
result['dnsSuffix'] = partition['dnsSuffix']
return result
def _merge_keys(self, from_data, result):
for key in from_data:
if key not in result:
result[key] = from_data[key]
def _expand_template(self, partition, template, service_name,
endpoint_name):
return template.format(
service=service_name, region=endpoint_name,
dnsSuffix=partition['dnsSuffix'])
| 8,975 | Python | 43.435643 | 86 | 0.62585 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/parsers.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Response parsers for the various protocol types.
The module contains classes that can take an HTTP response, and given
an output shape, parse the response into a dict according to the
rules in the output shape.
There are many similarities amongst the different protocols with regard
to response parsing, and the code is structured in a way to avoid
code duplication when possible. The diagram below is a diagram
showing the inheritance hierarchy of the response classes.
::
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +-------------------+
| | |
+----------+----------+ +------+-------+ +-------+------+
|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser|
+---------------------+ +--------------+ +--------------+
^ ^ ^ ^ ^ ^
| | | | | |
| | | | | |
| ++----------+-+ +-+-----------++ |
| |RestXMLParser| |RestJSONParser| |
+-----+-----+ +-------------+ +--------------+ +----+-----+
|QueryParser| |JSONParser|
+-----------+ +----------+
The diagram above shows that there is a base class, ``ResponseParser`` that
contains logic that is similar amongst all the different protocols (``query``,
``json``, ``rest-json``, ``rest-xml``). Amongst the various services there
is shared logic that can be grouped several ways:
* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the
same way.
* The ``json`` and ``rest-json`` protocols both have JSON bodies that are
parsed in the same way.
* The ``rest-json`` and ``rest-xml`` protocols have additional attributes
besides body parameters that are parsed the same (headers, query string,
status code).
This is reflected in the class diagram above. The ``BaseXMLResponseParser``
and the BaseJSONParser contain logic for parsing the XML/JSON body,
and the BaseRestParser contains logic for parsing out attributes that
come from other parts of the HTTP response. Classes like the
``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the
XML body parsing logic and the ``BaseRestParser`` to get the HTTP
header/status code/query string parsing.
Additionally, there are event stream parsers that are used by the other parsers
to wrap streaming bodies that represent a stream of events. The
BaseEventStreamParser extends from ResponseParser and defines the logic for
parsing values from the headers and payload of a message from the underlying
binary encoding protocol. Currently, event streams support parsing bodies
encoded as JSON and XML through the following hierarchy.
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +------------------+
| | |
+----------+----------+ +----------+----------+ +-------+------+
|BaseXMLResponseParser| |BaseEventStreamParser| |BaseJSONParser|
+---------------------+ +---------------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
+-+----------------+-+ +-+-----------------+-+
|EventStreamXMLParser| |EventStreamJSONParser|
+--------------------+ +---------------------+
Return Values
=============
Each call to ``parse()`` returns a dict has this form::
Standard Response
{
"ResponseMetadata": {"RequestId": <requestid>}
<response keys>
}
Error response
{
"ResponseMetadata": {"RequestId": <requestid>}
"Error": {
"Code": <string>,
"Message": <string>,
"Type": <string>,
<additional keys>
}
}
"""
import re
import base64
import json
import logging
from botocore.compat import six, ETree, XMLParseError
from botocore.eventstream import EventStream, NoInitialResponseError
from botocore.utils import parse_timestamp, merge_dicts, \
is_json_value_header, lowercase_dict
LOG = logging.getLogger(__name__)
DEFAULT_TIMESTAMP_PARSER = parse_timestamp
class ResponseParserFactory(object):
def __init__(self):
self._defaults = {}
def set_parser_defaults(self, **kwargs):
"""Set default arguments when a parser instance is created.
You can specify any kwargs that are allowed by a ResponseParser
class. There are currently two arguments:
* timestamp_parser - A callable that can parse a timestamp string
* blob_parser - A callable that can parse a blob type
"""
self._defaults.update(kwargs)
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
def create_parser(protocol):
return ResponseParserFactory().create_parser(protocol)
def _text_content(func):
# This decorator hides the difference between
# an XML node with text or a plain string. It's used
# to ensure that scalar processing operates only on text
# strings, which allows the same scalar handlers to be used
# for XML nodes from the body and HTTP headers.
def _get_text_content(self, shape, node_or_string):
if hasattr(node_or_string, 'text'):
text = node_or_string.text
if text is None:
# If an XML node is empty <foo></foo>,
# we want to parse that as an empty string,
# not as a null/None value.
text = ''
else:
text = node_or_string
return func(self, shape, text)
return _get_text_content
class ResponseParserError(Exception):
pass
class ResponseParser(object):
"""Base class for response parsing.
This class represents the interface that all ResponseParsers for the
various protocols must implement.
This class will take an HTTP response and a model shape and parse the
HTTP response into a dictionary.
There is a single public method exposed: ``parse``. See the ``parse``
docstring for more info.
"""
DEFAULT_ENCODING = 'utf-8'
EVENT_STREAM_PARSER_CLS = None
def __init__(self, timestamp_parser=None, blob_parser=None):
if timestamp_parser is None:
timestamp_parser = DEFAULT_TIMESTAMP_PARSER
self._timestamp_parser = timestamp_parser
if blob_parser is None:
blob_parser = self._default_blob_parser
self._blob_parser = blob_parser
self._event_stream_parser = None
if self.EVENT_STREAM_PARSER_CLS is not None:
self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS(
timestamp_parser, blob_parser)
def _default_blob_parser(self, value):
# Blobs are always returned as bytes type (this matters on python3).
# We don't decode this to a str because it's entirely possible that the
# blob contains binary data that actually can't be decoded.
return base64.b64decode(value)
def parse(self, response, shape):
"""Parse the HTTP response given a shape.
:param response: The HTTP response dictionary. This is a dictionary
that represents the HTTP request. The dictionary must have the
following keys, ``body``, ``headers``, and ``status_code``.
:param shape: The model shape describing the expected output.
:return: Returns a dictionary representing the parsed response
described by the model. In addition to the shape described from
the model, each response will also have a ``ResponseMetadata``
which contains metadata about the response, which contains at least
two keys containing ``RequestId`` and ``HTTPStatusCode``. Some
responses may populate additional keys, but ``RequestId`` will
always be present.
"""
LOG.debug('Response headers: %s', response['headers'])
LOG.debug('Response body:\n%s', response['body'])
if response['status_code'] >= 301:
if self._is_generic_error_response(response):
parsed = self._do_generic_error_parse(response)
elif self._is_modeled_error_shape(shape):
parsed = self._do_modeled_error_parse(response, shape)
# We don't want to decorate the modeled fields with metadata
return parsed
else:
parsed = self._do_error_parse(response, shape)
else:
parsed = self._do_parse(response, shape)
# We don't want to decorate event stream responses with metadata
if shape and shape.serialization.get('eventstream'):
return parsed
# Add ResponseMetadata if it doesn't exist and inject the HTTP
# status code and headers from the response.
if isinstance(parsed, dict):
response_metadata = parsed.get('ResponseMetadata', {})
response_metadata['HTTPStatusCode'] = response['status_code']
# Ensure that the http header keys are all lower cased. Older
# versions of urllib3 (< 1.11) would unintentionally do this for us
# (see urllib3#633). We need to do this conversion manually now.
headers = response['headers']
response_metadata['HTTPHeaders'] = lowercase_dict(headers)
parsed['ResponseMetadata'] = response_metadata
return parsed
def _is_modeled_error_shape(self, shape):
return shape is not None and shape.metadata.get('exception', False)
def _is_generic_error_response(self, response):
# There are times when a service will respond with a generic
# error response such as:
# '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
#
# This can also happen if you're going through a proxy.
# In this case the protocol specific _do_error_parse will either
# fail to parse the response (in the best case) or silently succeed
# and treat the HTML above as an XML response and return
# non sensical parsed data.
# To prevent this case from happening we first need to check
# whether or not this response looks like the generic response.
if response['status_code'] >= 500:
if 'body' not in response or response['body'] is None:
return True
body = response['body'].strip()
return body.startswith(b'<html>') or not body
def _do_generic_error_parse(self, response):
# There's not really much we can do when we get a generic
# html response.
LOG.debug("Received a non protocol specific error response from the "
"service, unable to populate error code and message.")
return {
'Error': {'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], '')},
'ResponseMetadata': {},
}
def _do_parse(self, response, shape):
raise NotImplementedError("%s._do_parse" % self.__class__.__name__)
def _do_error_parse(self, response, shape):
raise NotImplementedError(
"%s._do_error_parse" % self.__class__.__name__)
def _do_modeled_error_parse(self, response, shape, parsed):
raise NotImplementedError(
"%s._do_modeled_error_parse" % self.__class__.__name__)
def _parse_shape(self, shape, node):
handler = getattr(self, '_handle_%s' % shape.type_name,
self._default_handle)
return handler(shape, node)
def _handle_list(self, shape, node):
# Enough implementations share list serialization that it's moved
# up here in the base class.
parsed = []
member_shape = shape.member
for item in node:
parsed.append(self._parse_shape(member_shape, item))
return parsed
def _default_handle(self, shape, value):
return value
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return EventStream(response['body'], shape, parser, name)
class BaseXMLResponseParser(ResponseParser):
def __init__(self, timestamp_parser=None, blob_parser=None):
super(BaseXMLResponseParser, self).__init__(timestamp_parser,
blob_parser)
self._namespace_re = re.compile('{.*}')
def _handle_map(self, shape, node):
parsed = {}
key_shape = shape.key
value_shape = shape.value
key_location_name = key_shape.serialization.get('name') or 'key'
value_location_name = value_shape.serialization.get('name') or 'value'
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
for keyval_node in node:
for single_pair in keyval_node:
# Within each <entry> there's a <key> and a <value>
tag_name = self._node_tag(single_pair)
if tag_name == key_location_name:
key_name = self._parse_shape(key_shape, single_pair)
elif tag_name == value_location_name:
val_name = self._parse_shape(value_shape, single_pair)
else:
raise ResponseParserError("Unknown tag: %s" % tag_name)
parsed[key_name] = val_name
return parsed
def _node_tag(self, node):
return self._namespace_re.sub('', node.tag)
def _handle_list(self, shape, node):
# When we use _build_name_to_xml_node, repeated elements are aggregated
# into a list. However, we can't tell the difference between a scalar
# value and a single element flattened list. So before calling the
# real _handle_list, we know that "node" should actually be a list if
# it's flattened, and if it's not, then we make it a one element list.
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
return super(BaseXMLResponseParser, self)._handle_list(shape, node)
def _handle_structure(self, shape, node):
parsed = {}
members = shape.members
if shape.metadata.get('exception', False):
node = self._get_error_root(node)
xml_dict = self._build_name_to_xml_node(node)
for member_name in members:
member_shape = members[member_name]
if 'location' in member_shape.serialization or \
member_shape.serialization.get('eventheader'):
# All members with locations have already been handled,
# so we don't need to parse these members.
continue
xml_name = self._member_key_name(member_shape, member_name)
member_node = xml_dict.get(xml_name)
if member_node is not None:
parsed[member_name] = self._parse_shape(
member_shape, member_node)
elif member_shape.serialization.get('xmlAttribute'):
attribs = {}
location_name = member_shape.serialization['name']
for key, value in node.attrib.items():
new_key = self._namespace_re.sub(
location_name.split(':')[0] + ':', key)
attribs[new_key] = value
if location_name in attribs:
parsed[member_name] = attribs[location_name]
return parsed
def _get_error_root(self, original_root):
if self._node_tag(original_root) == 'ErrorResponse':
for child in original_root:
if self._node_tag(child) == 'Error':
return child
return original_root
def _member_key_name(self, shape, member_name):
# This method is needed because we have to special case flattened list
# with a serialization name. If this is the case we use the
# locationName from the list's member shape as the key name for the
# surrounding structure.
if shape.type_name == 'list' and shape.serialization.get('flattened'):
list_member_serialized_name = shape.member.serialization.get(
'name')
if list_member_serialized_name is not None:
return list_member_serialized_name
serialized_name = shape.serialization.get('name')
if serialized_name is not None:
return serialized_name
return member_name
def _build_name_to_xml_node(self, parent_node):
# If the parent node is actually a list. We should not be trying
# to serialize it to a dictionary. Instead, return the first element
# in the list.
if isinstance(parent_node, list):
return self._build_name_to_xml_node(parent_node[0])
xml_dict = {}
for item in parent_node:
key = self._node_tag(item)
if key in xml_dict:
# If the key already exists, the most natural
# way to handle this is to aggregate repeated
# keys into a single list.
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
if isinstance(xml_dict[key], list):
xml_dict[key].append(item)
else:
# Convert from a scalar to a list.
xml_dict[key] = [xml_dict[key], item]
else:
xml_dict[key] = item
return xml_dict
def _parse_xml_string_to_dom(self, xml_string):
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding=self.DEFAULT_ENCODING)
parser.feed(xml_string)
root = parser.close()
except XMLParseError as e:
raise ResponseParserError(
"Unable to parse response (%s), "
"invalid XML received. Further retries may succeed:\n%s" %
(e, xml_string))
return root
def _replace_nodes(self, parsed):
for key, value in parsed.items():
if list(value):
sub_dict = self._build_name_to_xml_node(value)
parsed[key] = self._replace_nodes(sub_dict)
else:
parsed[key] = value.text
return parsed
@_text_content
def _handle_boolean(self, shape, text):
if text == 'true':
return True
else:
return False
@_text_content
def _handle_float(self, shape, text):
return float(text)
@_text_content
def _handle_timestamp(self, shape, text):
return self._timestamp_parser(text)
@_text_content
def _handle_integer(self, shape, text):
return int(text)
@_text_content
def _handle_string(self, shape, text):
return text
@_text_content
def _handle_blob(self, shape, text):
return self._blob_parser(text)
_handle_character = _handle_string
_handle_double = _handle_float
_handle_long = _handle_integer
class QueryParser(BaseXMLResponseParser):
def _do_error_parse(self, response, shape):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
# Once we've converted xml->dict, we need to make one or two
# more adjustments to extract nested errors and to be consistent
# with ResponseMetadata for non-error responses:
# 1. {"Errors": {"Error": {...}}} -> {"Error": {...}}
# 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}}
if 'Errors' in parsed:
parsed.update(parsed.pop('Errors'))
if 'RequestId' in parsed:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=False)
def _do_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=True)
def _parse_body_as_xml(self, response, shape, inject_metadata=True):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = {}
if shape is not None:
start = root
if 'resultWrapper' in shape.serialization:
start = self._find_result_wrapped_shape(
shape.serialization['resultWrapper'],
root)
parsed = self._parse_shape(shape, start)
if inject_metadata:
self._inject_response_metadata(root, parsed)
return parsed
def _find_result_wrapped_shape(self, element_name, xml_root_node):
mapping = self._build_name_to_xml_node(xml_root_node)
return mapping[element_name]
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('ResponseMetadata')
if child_node is not None:
sub_mapping = self._build_name_to_xml_node(child_node)
for key, value in sub_mapping.items():
sub_mapping[key] = value.text
inject_into['ResponseMetadata'] = sub_mapping
class EC2QueryParser(QueryParser):
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('requestId')
if child_node is not None:
inject_into['ResponseMetadata'] = {'RequestId': child_node.text}
def _do_error_parse(self, response, shape):
# EC2 errors look like:
# <Response>
# <Errors>
# <Error>
# <Code>InvalidInstanceID.Malformed</Code>
# <Message>Invalid id: "1343124"</Message>
# </Error>
# </Errors>
# <RequestID>12345</RequestID>
# </Response>
# This is different from QueryParser in that it's RequestID,
# not RequestId
original = super(EC2QueryParser, self)._do_error_parse(response, shape)
if 'RequestID' in original:
original['ResponseMetadata'] = {
'RequestId': original.pop('RequestID')
}
return original
def _get_error_root(self, original_root):
for child in original_root:
if self._node_tag(child) == 'Errors':
for errors_child in child:
if self._node_tag(errors_child) == 'Error':
return errors_child
return original_root
class BaseJSONParser(ResponseParser):
def _handle_structure(self, shape, value):
member_shapes = shape.members
if value is None:
# If the comes across the wire as "null" (None in python),
# we should be returning this unchanged, instead of as an
# empty dict.
return None
final_parsed = {}
for member_name in member_shapes:
member_shape = member_shapes[member_name]
json_name = member_shape.serialization.get('name', member_name)
raw_value = value.get(json_name)
if raw_value is not None:
final_parsed[member_name] = self._parse_shape(
member_shapes[member_name],
raw_value)
return final_parsed
def _handle_map(self, shape, value):
parsed = {}
key_shape = shape.key
value_shape = shape.value
for key, value in value.items():
actual_key = self._parse_shape(key_shape, key)
actual_value = self._parse_shape(value_shape, value)
parsed[actual_key] = actual_value
return parsed
def _handle_blob(self, shape, value):
return self._blob_parser(value)
def _handle_timestamp(self, shape, value):
return self._timestamp_parser(value)
def _do_error_parse(self, response, shape):
body = self._parse_body_as_json(response['body'])
error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}}
# Error responses can have slightly different structures for json.
# The basic structure is:
#
# {"__type":"ConnectClientException",
# "message":"The error message."}
# The error message can either come in the 'message' or 'Message' key
# so we need to check for both.
error['Error']['Message'] = body.get('message',
body.get('Message', ''))
# if the message did not contain an error code
# include the response status code
response_code = response.get('status_code')
code = body.get('__type', response_code and str(response_code))
if code is not None:
# code has a couple forms as well:
# * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException"
# * "ResourceNotFoundException"
if '#' in code:
code = code.rsplit('#', 1)[1]
error['Error']['Code'] = code
self._inject_response_metadata(error, response['headers'])
return error
def _inject_response_metadata(self, parsed, headers):
if 'x-amzn-requestid' in headers:
parsed.setdefault('ResponseMetadata', {})['RequestId'] = (
headers['x-amzn-requestid'])
def _parse_body_as_json(self, body_contents):
if not body_contents:
return {}
body = body_contents.decode(self.DEFAULT_ENCODING)
try:
original_parsed = json.loads(body)
return original_parsed
except ValueError:
# if the body cannot be parsed, include
# the literal string as the message
return { 'message': body }
class BaseEventStreamParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
if shape.serialization.get('eventstream'):
event_type = response['headers'].get(':event-type')
event_shape = shape.members.get(event_type)
if event_shape:
final_parsed[event_type] = self._do_parse(response, event_shape)
else:
self._parse_non_payload_attrs(response, shape,
shape.members, final_parsed)
self._parse_payload(response, shape, shape.members, final_parsed)
return final_parsed
def _do_error_parse(self, response, shape):
exception_type = response['headers'].get(':exception-type')
exception_shape = shape.members.get(exception_type)
if exception_shape is not None:
original_parsed = self._initial_body_parse(response['body'])
body = self._parse_shape(exception_shape, original_parsed)
error = {
'Error': {
'Code': exception_type,
'Message': body.get('Message', body.get('message', ''))
}
}
else:
error = {
'Error': {
'Code': response['headers'].get(':error-code', ''),
'Message': response['headers'].get(':error-message', ''),
}
}
return error
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if shape.serialization.get('event'):
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventpayload'):
body = response['body']
if member_shape.type_name == 'blob':
parsed_body = body
elif member_shape.type_name == 'string':
parsed_body = body.decode(self.DEFAULT_ENCODING)
else:
raw_parse = self._initial_body_parse(body)
parsed_body = self._parse_shape(member_shape, raw_parse)
final_parsed[name] = parsed_body
return
# If we didn't find an explicit payload, use the current shape
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventheader'):
if name in headers:
value = headers[name]
if member_shape.type_name == 'timestamp':
# Event stream timestamps are an in milleseconds so we
# divide by 1000 to convert to seconds.
value = self._timestamp_parser(value / 1000.0)
final_parsed[name] = value
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser):
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser):
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
class JSONParser(BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
"""Response parser for the "json" protocol."""
def _do_parse(self, response, shape):
parsed = {}
if shape is not None:
event_name = shape.event_stream_name
if event_name:
parsed = self._handle_event_stream(response, shape, event_name)
else:
parsed = self._handle_json_body(response['body'], shape)
self._inject_response_metadata(parsed, response['headers'])
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._handle_json_body(response['body'], shape)
def _handle_event_stream(self, response, shape, event_name):
event_stream_shape = shape.members[event_name]
event_stream = self._create_event_stream(response, event_stream_shape)
try:
event = event_stream.get_initial_response()
except NoInitialResponseError:
error_msg = 'First event was not of type initial-response'
raise ResponseParserError(error_msg)
parsed = self._handle_json_body(event.payload, shape)
parsed[event_name] = event_stream
return parsed
def _handle_json_body(self, raw_body, shape):
# The json.loads() gives us the primitive JSON types,
# but we need to traverse the parsed JSON data to convert
# to richer types (blobs, timestamps, etc.
parsed_json = self._parse_body_as_json(raw_body)
return self._parse_shape(shape, parsed_json)
class BaseRestParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
final_parsed['ResponseMetadata'] = self._populate_response_metadata(
response)
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _add_modeled_parse(self, response, shape, final_parsed):
if shape is None:
return final_parsed
member_shapes = shape.members
self._parse_non_payload_attrs(response, shape,
member_shapes, final_parsed)
self._parse_payload(response, shape, member_shapes, final_parsed)
def _do_modeled_error_parse(self, response, shape):
final_parsed = {}
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _populate_response_metadata(self, response):
metadata = {}
headers = response['headers']
if 'x-amzn-requestid' in headers:
metadata['RequestId'] = headers['x-amzn-requestid']
elif 'x-amz-request-id' in headers:
metadata['RequestId'] = headers['x-amz-request-id']
# HostId is what it's called whenever this value is returned
# in an XML response body, so to be consistent, we'll always
# call is HostId.
metadata['HostId'] = headers.get('x-amz-id-2', '')
return metadata
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if 'payload' in shape.serialization:
# If a payload is specified in the output shape, then only that
# shape is used for the body payload.
payload_member_name = shape.serialization['payload']
body_shape = member_shapes[payload_member_name]
if body_shape.serialization.get('eventstream'):
body = self._create_event_stream(response, body_shape)
final_parsed[payload_member_name] = body
elif body_shape.type_name in ['string', 'blob']:
# This is a stream
body = response['body']
if isinstance(body, bytes):
body = body.decode(self.DEFAULT_ENCODING)
final_parsed[payload_member_name] = body
else:
original_parsed = self._initial_body_parse(response['body'])
final_parsed[payload_member_name] = self._parse_shape(
body_shape, original_parsed)
else:
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
location = member_shape.serialization.get('location')
if location is None:
continue
elif location == 'statusCode':
final_parsed[name] = self._parse_shape(
member_shape, response['status_code'])
elif location == 'headers':
final_parsed[name] = self._parse_header_map(member_shape,
headers)
elif location == 'header':
header_name = member_shape.serialization.get('name', name)
if header_name in headers:
final_parsed[name] = self._parse_shape(
member_shape, headers[header_name])
def _parse_header_map(self, shape, headers):
# Note that headers are case insensitive, so we .lower()
# all header names and header prefixes.
parsed = {}
prefix = shape.serialization.get('name', '').lower()
for header_name in headers:
if header_name.lower().startswith(prefix):
# The key name inserted into the parsed hash
# strips off the prefix.
name = header_name[len(prefix):]
parsed[name] = headers[header_name]
return parsed
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
def _handle_string(self, shape, value):
parsed = value
if is_json_value_header(shape):
decoded = base64.b64decode(value).decode(self.DEFAULT_ENCODING)
parsed = json.loads(decoded)
return parsed
class RestJSONParser(BaseRestParser, BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
def _do_error_parse(self, response, shape):
error = super(RestJSONParser, self)._do_error_parse(response, shape)
self._inject_error_code(error, response)
return error
def _inject_error_code(self, error, response):
# The "Code" value can come from either a response
# header or a value in the JSON body.
body = self._initial_body_parse(response['body'])
if 'x-amzn-errortype' in response['headers']:
code = response['headers']['x-amzn-errortype']
# Could be:
# x-amzn-errortype: ValidationException:
code = code.split(':')[0]
error['Error']['Code'] = code
elif 'code' in body or 'Code' in body:
error['Error']['Code'] = body.get(
'code', body.get('Code', ''))
class RestXMLParser(BaseRestParser, BaseXMLResponseParser):
EVENT_STREAM_PARSER_CLS = EventStreamXMLParser
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
def _do_error_parse(self, response, shape):
# We're trying to be service agnostic here, but S3 does have a slightly
# different response structure for its errors compared to other
# rest-xml serivces (route53/cloudfront). We handle this by just
# trying to parse both forms.
# First:
# <ErrorResponse xmlns="...">
# <Error>
# <Type>Sender</Type>
# <Code>InvalidInput</Code>
# <Message>Invalid resource type: foo</Message>
# </Error>
# <RequestId>request-id</RequestId>
# </ErrorResponse>
if response['body']:
# If the body ends up being invalid xml, the xml parser should not
# blow up. It should at least try to pull information about the
# the error response from other sources like the HTTP status code.
try:
return self._parse_error_from_body(response)
except ResponseParserError as e:
LOG.debug(
'Exception caught when parsing error response body:',
exc_info=True)
return self._parse_error_from_http_status(response)
def _parse_error_from_http_status(self, response):
return {
'Error': {
'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], ''),
},
'ResponseMetadata': {
'RequestId': response['headers'].get('x-amz-request-id', ''),
'HostId': response['headers'].get('x-amz-id-2', ''),
}
}
def _parse_error_from_body(self, response):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == 'Error':
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop('RequestId', '')
parsed.pop('HostId', '')
return {'Error': parsed, 'ResponseMetadata': metadata}
elif 'RequestId' in parsed:
# Other rest-xml serivces:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
default = {'Error': {'Message': '', 'Code': ''}}
merge_dicts(default, parsed)
return default
@_text_content
def _handle_string(self, shape, text):
text = super(RestXMLParser, self)._handle_string(shape, text)
return text
PROTOCOL_PARSERS = {
'ec2': EC2QueryParser,
'query': QueryParser,
'json': JSONParser,
'rest-json': RestJSONParser,
'rest-xml': RestXMLParser,
}
| 42,301 | Python | 40.431929 | 80 | 0.574431 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/throttling.py | from collections import namedtuple
CubicParams = namedtuple('CubicParams', ['w_max', 'k', 'last_fail'])
class CubicCalculator(object):
_SCALE_CONSTANT = 0.4
_BETA = 0.7
def __init__(self, starting_max_rate,
start_time,
scale_constant=_SCALE_CONSTANT, beta=_BETA):
self._w_max = starting_max_rate
self._scale_constant = scale_constant
self._beta = beta
self._k = self._calculate_zero_point()
self._last_fail = start_time
def _calculate_zero_point(self):
k = ((self._w_max * (1 - self._beta)) / self._scale_constant) ** (1 / 3.0)
return k
def success_received(self, timestamp):
dt = timestamp - self._last_fail
new_rate = (
self._scale_constant * (dt - self._k) ** 3 + self._w_max
)
return new_rate
def error_received(self, current_rate, timestamp):
# Consider not having this be the current measured rate.
# We have a new max rate, which is the current rate we were sending
# at when we received an error response.
self._w_max = current_rate
self._k = self._calculate_zero_point()
self._last_fail = timestamp
return current_rate * self._beta
def get_params_snapshot(self):
"""Return a read-only object of the current cubic parameters.
These parameters are intended to be used for debug/troubleshooting
purposes. These object is a read-only snapshot and cannot be used
to modify the behavior of the CUBIC calculations.
New parameters may be added to this object in the future.
"""
return CubicParams(
w_max=self._w_max,
k=self._k,
last_fail=self._last_fail
)
| 1,788 | Python | 31.527272 | 82 | 0.595638 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/quota.py | """Retry quota implementation.
"""
import threading
class RetryQuota(object):
INITIAL_CAPACITY = 500
def __init__(self, initial_capacity=INITIAL_CAPACITY, lock=None):
self._max_capacity = initial_capacity
self._available_capacity = initial_capacity
if lock is None:
lock = threading.Lock()
self._lock = lock
def acquire(self, capacity_amount):
"""Attempt to aquire a certain amount of capacity.
If there's not sufficient amount of capacity available, ``False``
is returned. Otherwise, ``True`` is returned, which indicates that
capacity was successfully allocated.
"""
# The acquire() is only called when we encounter a retryable
# response so we aren't worried about locking the entire method.
with self._lock:
if capacity_amount > self._available_capacity:
return False
self._available_capacity -= capacity_amount
return True
def release(self, capacity_amount):
"""Release capacity back to the retry quota.
The capacity being released will be truncated if necessary
to ensure the max capacity is never exceeded.
"""
# Implementation note: The release() method is called as part
# of the "after-call" event, which means it gets invoked for
# every API call. In the common case where the request is
# successful and we're at full capacity, we can avoid locking.
# We can't exceed max capacity so there's no work we have to do.
if self._max_capacity == self._available_capacity:
return
with self._lock:
amount = min(
self._max_capacity - self._available_capacity,
capacity_amount
)
self._available_capacity += amount
@property
def available_capacity(self):
return self._available_capacity
| 1,963 | Python | 32.862068 | 75 | 0.621498 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/base.py | class BaseRetryBackoff(object):
def delay_amount(self, context):
"""Calculate how long we should delay before retrying.
:type context: RetryContext
"""
raise NotImplementedError("delay_amount")
class BaseRetryableChecker(object):
"""Base class for determining if a retry should happen.
This base class checks for specific retryable conditions.
A single retryable checker doesn't necessarily indicate a retry
will happen. It's up to the ``RetryPolicy`` to use its
``BaseRetryableCheckers`` to make the final decision on whether a retry
should happen.
"""
def is_retryable(self, context):
"""Returns True if retryable, False if not.
:type context: RetryContext
"""
raise NotImplementedError("is_retryable") | 813 | Python | 29.148147 | 75 | 0.681427 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/adaptive.py | import math
import logging
import threading
from botocore.retries import bucket
from botocore.retries import throttling
from botocore.retries import standard
logger = logging.getLogger(__name__)
def register_retry_handler(client):
clock = bucket.Clock()
rate_adjustor = throttling.CubicCalculator(starting_max_rate=0,
start_time=clock.current_time())
token_bucket = bucket.TokenBucket(max_rate=1, clock=clock)
rate_clocker = RateClocker(clock)
throttling_detector = standard.ThrottlingErrorDetector(
retry_event_adapter=standard.RetryEventAdapter(),
)
limiter = ClientRateLimiter(
rate_adjustor=rate_adjustor,
rate_clocker=rate_clocker,
token_bucket=token_bucket,
throttling_detector=throttling_detector,
clock=clock,
)
client.meta.events.register(
'before-send', limiter.on_sending_request,
)
client.meta.events.register(
'needs-retry', limiter.on_receiving_response,
)
return limiter
class ClientRateLimiter(object):
_MAX_RATE_ADJUST_SCALE = 2.0
def __init__(self, rate_adjustor, rate_clocker, token_bucket,
throttling_detector, clock):
self._rate_adjustor = rate_adjustor
self._rate_clocker = rate_clocker
self._token_bucket = token_bucket
self._throttling_detector = throttling_detector
self._clock = clock
self._enabled = False
self._lock = threading.Lock()
def on_sending_request(self, request, **kwargs):
if self._enabled:
self._token_bucket.acquire()
# Hooked up to needs-retry.
def on_receiving_response(self, **kwargs):
measured_rate = self._rate_clocker.record()
timestamp = self._clock.current_time()
with self._lock:
if not self._throttling_detector.is_throttling_error(**kwargs):
throttling = False
new_rate = self._rate_adjustor.success_received(timestamp)
else:
throttling = True
if not self._enabled:
rate_to_use = measured_rate
else:
rate_to_use = min(measured_rate, self._token_bucket.max_rate)
new_rate = self._rate_adjustor.error_received(
rate_to_use, timestamp)
logger.debug("Throttling response received, new send rate: %s "
"measured rate: %s, token bucket capacity "
"available: %s", new_rate, measured_rate,
self._token_bucket.available_capacity)
self._enabled = True
self._token_bucket.max_rate = min(
new_rate, self._MAX_RATE_ADJUST_SCALE * measured_rate)
class RateClocker(object):
"""Tracks the rate at which a client is sending a request."""
_DEFAULT_SMOOTHING = 0.8
# Update the rate every _TIME_BUCKET_RANGE seconds.
_TIME_BUCKET_RANGE = 0.5
def __init__(self, clock, smoothing=_DEFAULT_SMOOTHING,
time_bucket_range=_TIME_BUCKET_RANGE):
self._clock = clock
self._measured_rate = 0
self._smoothing = smoothing
self._last_bucket = math.floor(self._clock.current_time())
self._time_bucket_scale = 1 / self._TIME_BUCKET_RANGE
self._count = 0
self._lock = threading.Lock()
def record(self, amount=1):
with self._lock:
t = self._clock.current_time()
bucket = math.floor(
t * self._time_bucket_scale) / self._time_bucket_scale
self._count += amount
if bucket > self._last_bucket:
current_rate = self._count / float(
bucket - self._last_bucket)
self._measured_rate = (
(current_rate * self._smoothing) +
(self._measured_rate * (1 - self._smoothing))
)
self._count = 0
self._last_bucket = bucket
return self._measured_rate
@property
def measured_rate(self):
return self._measured_rate
| 4,191 | Python | 34.525423 | 81 | 0.580768 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/__init__.py | """New retry v2 handlers.
This package obsoletes the botocore/retryhandler.py module and contains
new retry logic.
"""
| 121 | Python | 16.428569 | 71 | 0.768595 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/bucket.py | """This module implements token buckets used for client side throttling."""
import time
import threading
from botocore.exceptions import CapacityNotAvailableError
class Clock(object):
def __init__(self):
pass
def sleep(self, amount):
time.sleep(amount)
def current_time(self):
return time.time()
class TokenBucket(object):
_MIN_RATE = 0.5
def __init__(self, max_rate, clock, min_rate=_MIN_RATE):
self._fill_rate = None
self._max_capacity = None
self._current_capacity = 0
self._clock = clock
self._last_timestamp = None
self._min_rate = min_rate
self._lock = threading.Lock()
self._new_fill_rate_condition = threading.Condition(self._lock)
self.max_rate = max_rate
@property
def max_rate(self):
return self._fill_rate
@max_rate.setter
def max_rate(self, value):
with self._new_fill_rate_condition:
# Before we can change the rate we need to fill any pending
# tokens we might have based on the current rate. If we don't
# do this it means everything since the last recorded timestamp
# will accumulate at the rate we're about to set which isn't
# correct.
self._refill()
self._fill_rate = max(value, self._min_rate)
if value >= 1:
self._max_capacity = value
else:
self._max_capacity = 1
# If we're scaling down, we also can't have a capacity that's
# more than our max_capacity.
self._current_capacity = min(self._current_capacity,
self._max_capacity)
self._new_fill_rate_condition.notify()
@property
def max_capacity(self):
return self._max_capacity
@property
def available_capacity(self):
return self._current_capacity
def acquire(self, amount=1, block=True):
"""Acquire token or return amount of time until next token available.
If block is True, then this method will block until there's sufficient
capacity to acquire the desired amount.
If block is False, then this method will return True is capacity
was successfully acquired, False otherwise.
"""
with self._new_fill_rate_condition:
return self._acquire(amount=amount, block=block)
def _acquire(self, amount, block):
self._refill()
if amount <= self._current_capacity:
self._current_capacity -= amount
return True
else:
if not block:
raise CapacityNotAvailableError()
# Not enough capacity.
sleep_amount = self._sleep_amount(amount)
while sleep_amount > 0:
# Until python3.2, wait() always returned None so we can't
# tell if a timeout occurred waiting on the cond var.
# Because of this we'll unconditionally call _refill().
# The downside to this is that we were waken up via
# a notify(), we're calling unnecessarily calling _refill() an
# extra time.
self._new_fill_rate_condition.wait(sleep_amount)
self._refill()
sleep_amount = self._sleep_amount(amount)
self._current_capacity -= amount
return True
def _sleep_amount(self, amount):
return (amount - self._current_capacity) / self._fill_rate
def _refill(self):
timestamp = self._clock.current_time()
if self._last_timestamp is None:
self._last_timestamp = timestamp
return
current_capacity = self._current_capacity
fill_amount = (timestamp - self._last_timestamp) * self._fill_rate
new_capacity = min(self._max_capacity, current_capacity + fill_amount)
self._current_capacity = new_capacity
self._last_timestamp = timestamp
| 4,020 | Python | 33.965217 | 78 | 0.590299 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/standard.py | """Standard retry behavior.
This contains the default standard retry behavior.
It provides consistent behavior with other AWS SDKs.
The key base classes uses for retries:
* ``BaseRetryableChecker`` - Use to check a specific condition that
indicates a retry should happen. This can include things like
max attempts, HTTP status code checks, error code checks etc.
* ``RetryBackoff`` - Use to determine how long we should backoff until
we retry a request. This is the class that will implement delay such
as exponential backoff.
* ``RetryPolicy`` - Main class that determines if a retry should
happen. It can combine data from a various BaseRetryableCheckers
to make a final call as to whether or not a retry should happen.
It then uses a ``BaseRetryBackoff`` to determine how long to delay.
* ``RetryHandler`` - The bridge between botocore's event system
used by endpoint.py to manage retries and the interfaces defined
in this module.
This allows us to define an API that has minimal coupling to the event
based API used by botocore.
"""
import random
import logging
from botocore.exceptions import ConnectionError, HTTPClientError
from botocore.exceptions import ReadTimeoutError, ConnectTimeoutError
from botocore.retries import quota
from botocore.retries import special
from botocore.retries.base import BaseRetryBackoff, BaseRetryableChecker
DEFAULT_MAX_ATTEMPTS = 3
logger = logging.getLogger(__name__)
def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
retry_quota = RetryQuotaChecker(quota.RetryQuota())
service_id = client.meta.service_model.service_id
service_event_name = service_id.hyphenize()
client.meta.events.register('after-call.%s' % service_event_name,
retry_quota.release_retry_quota)
handler = RetryHandler(
retry_policy=RetryPolicy(
retry_checker=StandardRetryConditions(max_attempts=max_attempts),
retry_backoff=ExponentialBackoff(),
),
retry_event_adapter=RetryEventAdapter(),
retry_quota=retry_quota,
)
unique_id = 'retry-config-%s' % service_event_name
client.meta.events.register(
'needs-retry.%s' % service_event_name, handler.needs_retry,
unique_id=unique_id
)
return handler
class RetryHandler(object):
"""Bridge between botocore's event system and this module.
This class is intended to be hooked to botocore's event system
as an event handler.
"""
def __init__(self, retry_policy, retry_event_adapter, retry_quota):
self._retry_policy = retry_policy
self._retry_event_adapter = retry_event_adapter
self._retry_quota = retry_quota
def needs_retry(self, **kwargs):
"""Connect as a handler to the needs-retry event."""
retry_delay = None
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._retry_policy.should_retry(context):
# Before we can retry we need to ensure we have sufficient
# capacity in our retry quota.
if self._retry_quota.acquire_retry_quota(context):
retry_delay = self._retry_policy.compute_retry_delay(context)
logger.debug("Retry needed, retrying request after "
"delay of: %s", retry_delay)
else:
logger.debug("Retry needed but retry quota reached, "
"not retrying request.")
else:
logger.debug("Not retrying request.")
self._retry_event_adapter.adapt_retry_response_from_context(
context)
return retry_delay
class RetryEventAdapter(object):
"""Adapter to existing retry interface used in the endpoints layer.
This existing interface for determining if a retry needs to happen
is event based and used in ``botocore.endpoint``. The interface has
grown organically over the years and could use some cleanup. This
adapter converts that interface into the interface used by the
new retry strategies.
"""
def create_retry_context(self, **kwargs):
"""Create context based on needs-retry kwargs."""
response = kwargs['response']
if response is None:
# If response is None it means that an exception was raised
# because we never received a response from the service. This
# could be something like a ConnectionError we get from our
# http layer.
http_response = None
parsed_response = None
else:
http_response, parsed_response = response
# This provides isolation between the kwargs emitted in the
# needs-retry event, and what this module uses to check for
# retries.
context = RetryContext(
attempt_number=kwargs['attempts'],
operation_model=kwargs['operation'],
http_response=http_response,
parsed_response=parsed_response,
caught_exception=kwargs['caught_exception'],
request_context=kwargs['request_dict']['context'],
)
return context
def adapt_retry_response_from_context(self, context):
"""Modify response back to user back from context."""
# This will mutate attributes that are returned back to the end
# user. We do it this way so that all the various retry classes
# don't mutate any input parameters from the needs-retry event.
metadata = context.get_retry_metadata()
if context.parsed_response is not None:
context.parsed_response.setdefault(
'ResponseMetadata', {}).update(metadata)
# Implementation note: this is meant to encapsulate all the misc stuff
# that gets sent in the needs-retry event. This is mapped so that params
# are more clear and explicit.
class RetryContext(object):
"""Normalize a response that we use to check if a retry should occur.
This class smoothes over the different types of responses we may get
from a service including:
* A modeled error response from the service that contains a service
code and error message.
* A raw HTTP response that doesn't contain service protocol specific
error keys.
* An exception received while attempting to retrieve a response.
This could be a ConnectionError we receive from our HTTP layer which
could represent that we weren't able to receive a response from
the service.
This class guarantees that at least one of the above attributes will be
non None.
This class is meant to provide a read-only view into the properties
associated with a possible retryable response. None of the properties
are meant to be modified directly.
"""
def __init__(self, attempt_number, operation_model=None,
parsed_response=None, http_response=None,
caught_exception=None, request_context=None):
# 1-based attempt number.
self.attempt_number = attempt_number
self.operation_model = operation_model
# This is the parsed response dictionary we get from parsing
# the HTTP response from the service.
self.parsed_response = parsed_response
# This is an instance of botocore.awsrequest.AWSResponse.
self.http_response = http_response
# This is a subclass of Exception that will be non None if
# an exception was raised when retrying to retrieve a response.
self.caught_exception = caught_exception
# This is the request context dictionary that's added to the
# request dict. This is used to story any additional state
# about the request. We use this for storing retry quota
# capacity.
if request_context is None:
request_context = {}
self.request_context = request_context
self._retry_metadata = {}
# These are misc helper methods to avoid duplication in the various
# checkers.
def get_error_code(self):
"""Check if there was a parsed response with an error code.
If we could not find any error codes, ``None`` is returned.
"""
if self.parsed_response is None:
return
error = self.parsed_response.get('Error', {})
if not isinstance(error, dict):
return
return error.get('Code')
def add_retry_metadata(self, **kwargs):
"""Add key/value pairs to the retry metadata.
This allows any objects during the retry process to add
metadata about any checks/validations that happened.
This gets added to the response metadata in the retry handler.
"""
self._retry_metadata.update(**kwargs)
def get_retry_metadata(self):
return self._retry_metadata.copy()
class RetryPolicy(object):
def __init__(self, retry_checker, retry_backoff):
self._retry_checker = retry_checker
self._retry_backoff = retry_backoff
def should_retry(self, context):
return self._retry_checker.is_retryable(context)
def compute_retry_delay(self, context):
return self._retry_backoff.delay_amount(context)
class ExponentialBackoff(BaseRetryBackoff):
_BASE = 2
_MAX_BACKOFF = 20
def __init__(self, max_backoff=20, random=random.random):
self._base = self._BASE
self._max_backoff = max_backoff
self._random = random
def delay_amount(self, context):
"""Calculates delay based on exponential backoff.
This class implements truncated binary exponential backoff
with jitter::
t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
where ``i`` is the request attempt (0 based).
"""
# The context.attempt_number is a 1-based value, but we have
# to calculate the delay based on i based a 0-based value. We
# want the first delay to just be ``rand(0, 1)``.
return min(
self._random() * (self._base ** (context.attempt_number - 1)),
self._max_backoff
)
class MaxAttemptsChecker(BaseRetryableChecker):
def __init__(self, max_attempts):
self._max_attempts = max_attempts
def is_retryable(self, context):
under_max_attempts = context.attempt_number < self._max_attempts
if not under_max_attempts:
logger.debug("Max attempts of %s reached.", self._max_attempts)
context.add_retry_metadata(MaxAttemptsReached=True)
return under_max_attempts
class TransientRetryableChecker(BaseRetryableChecker):
_TRANSIENT_ERROR_CODES = [
'RequestTimeout',
'RequestTimeoutException',
'PriorRequestNotComplete',
]
_TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
_TRANSIENT_EXCEPTION_CLS = (
ConnectionError,
HTTPClientError,
)
def __init__(self, transient_error_codes=None,
transient_status_codes=None,
transient_exception_cls=None):
if transient_error_codes is None:
transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
if transient_status_codes is None:
transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
if transient_exception_cls is None:
transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
self._transient_error_codes = transient_error_codes
self._transient_status_codes = transient_status_codes
self._transient_exception_cls = transient_exception_cls
def is_retryable(self, context):
if context.get_error_code() in self._transient_error_codes:
return True
if context.http_response is not None:
if context.http_response.status_code in \
self._transient_status_codes:
return True
if context.caught_exception is not None:
return isinstance(context.caught_exception,
self._transient_exception_cls)
return False
class ThrottledRetryableChecker(BaseRetryableChecker):
# This is the union of all error codes we've seen that represent
# a throttled error.
_THROTTLED_ERROR_CODES = [
'Throttling',
'ThrottlingException',
'ThrottledException',
'RequestThrottledException',
'TooManyRequestsException',
'ProvisionedThroughputExceededException',
'TransactionInProgressException',
'RequestLimitExceeded',
'BandwidthLimitExceeded',
'LimitExceededException',
'RequestThrottled',
'SlowDown',
'PriorRequestNotComplete',
'EC2ThrottledException',
]
def __init__(self, throttled_error_codes=None):
if throttled_error_codes is None:
throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
self._throttled_error_codes = throttled_error_codes
def is_retryable(self, context):
# Only the error code from a parsed service response is used
# to determine if the response is a throttled response.
return context.get_error_code() in self._throttled_error_codes
class ModeledRetryableChecker(BaseRetryableChecker):
"""Check if an error has been modeled as retryable."""
def __init__(self):
self._error_detector = ModeledRetryErrorDetector()
def is_retryable(self, context):
error_code = context.get_error_code()
if error_code is None:
return False
return self._error_detector.detect_error_type(context) is not None
class ModeledRetryErrorDetector(object):
"""Checks whether or not an error is a modeled retryable error."""
# There are return values from the detect_error_type() method.
TRANSIENT_ERROR = 'TRANSIENT_ERROR'
THROTTLING_ERROR = 'THROTTLING_ERROR'
# This class is lower level than ModeledRetryableChecker, which
# implements BaseRetryableChecker. This object allows you to distinguish
# between the various types of retryable errors.
def detect_error_type(self, context):
"""Detect the error type associated with an error code and model.
This will either return:
* ``self.TRANSIENT_ERROR`` - If the error is a transient error
* ``self.THROTTLING_ERROR`` - If the error is a throttling error
* ``None`` - If the error is neither type of error.
"""
error_code = context.get_error_code()
op_model = context.operation_model
if op_model is None or not op_model.error_shapes:
return
for shape in op_model.error_shapes:
if shape.metadata.get('retryable') is not None:
# Check if this error code matches the shape. This can
# be either by name or by a modeled error code.
error_code_to_check = (
shape.metadata.get('error', {}).get('code')
or shape.name
)
if error_code == error_code_to_check:
if shape.metadata['retryable'].get('throttling'):
return self.THROTTLING_ERROR
return self.TRANSIENT_ERROR
class ThrottlingErrorDetector(object):
def __init__(self, retry_event_adapter):
self._modeled_error_detector = ModeledRetryErrorDetector()
self._fixed_error_code_detector = ThrottledRetryableChecker()
self._retry_event_adapter = retry_event_adapter
# This expects the kwargs from needs-retry to be passed through.
def is_throttling_error(self, **kwargs):
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._fixed_error_code_detector.is_retryable(context):
return True
error_type = self._modeled_error_detector.detect_error_type(context)
return error_type == self._modeled_error_detector.THROTTLING_ERROR
class StandardRetryConditions(BaseRetryableChecker):
"""Concrete class that implements the standard retry policy checks.
Specifically:
not max_attempts and (transient or throttled or modeled_retry)
"""
def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
# Note: This class is for convenience so you can have the
# standard retry condition in a single class.
self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
self._additional_checkers = OrRetryChecker([
TransientRetryableChecker(),
ThrottledRetryableChecker(),
ModeledRetryableChecker(),
OrRetryChecker([
special.RetryIDPCommunicationError(),
special.RetryDDBChecksumError(),
])
])
def is_retryable(self, context):
return (self._max_attempts_checker.is_retryable(context) and
self._additional_checkers.is_retryable(context))
class OrRetryChecker(BaseRetryableChecker):
def __init__(self, checkers):
self._checkers = checkers
def is_retryable(self, context):
return any(checker.is_retryable(context) for checker in self._checkers)
class RetryQuotaChecker(object):
_RETRY_COST = 5
_NO_RETRY_INCREMENT = 1
_TIMEOUT_RETRY_REQUEST = 10
_TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
# Implementation note: We're not making this a BaseRetryableChecker
# because this isn't just a check if we can retry. This also changes
# state so we have to careful when/how we call this. Making it
# a BaseRetryableChecker implies you can call .is_retryable(context)
# as many times as you want and not affect anything.
def __init__(self, quota):
self._quota = quota
# This tracks the last amount
self._last_amount_acquired = None
def acquire_retry_quota(self, context):
if self._is_timeout_error(context):
capacity_amount = self._TIMEOUT_RETRY_REQUEST
else:
capacity_amount = self._RETRY_COST
success = self._quota.acquire(capacity_amount)
if success:
# We add the capacity amount to the request context so we know
# how much to release later. The capacity amount can vary based
# on the error.
context.request_context['retry_quota_capacity'] = capacity_amount
return True
context.add_retry_metadata(RetryQuotaReached=True)
return False
def _is_timeout_error(self, context):
return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
# This is intended to be hooked up to ``after-call``.
def release_retry_quota(self, context, http_response, **kwargs):
# There's three possible options.
# 1. The HTTP response did not have a 2xx response. In that case we
# give no quota back.
# 2. The HTTP request was successful and was never retried. In
# that case we give _NO_RETRY_INCREMENT back.
# 3. The API call had retries, and we eventually receive an HTTP
# response with a 2xx status code. In that case we give back
# whatever quota was associated with the last acquisition.
if http_response is None:
return
status_code = http_response.status_code
if 200 <= status_code < 300:
if 'retry_quota_capacity' not in context:
self._quota.release(self._NO_RETRY_INCREMENT)
else:
capacity_amount = context['retry_quota_capacity']
self._quota.release(capacity_amount)
| 19,655 | Python | 38.390781 | 79 | 0.651793 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/special.py | """Special cased retries.
These are additional retry cases we still have to handle from the legacy
retry handler. They don't make sense as part of the standard mode retry
module. Ideally we should be able to remove this module.
"""
import logging
from binascii import crc32
from botocore.retries.base import BaseRetryableChecker
logger = logging.getLogger(__name__)
# TODO: This is an ideal candidate for the retryable trait once that's
# available.
class RetryIDPCommunicationError(BaseRetryableChecker):
_SERVICE_NAME = 'sts'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
error_code = context.get_error_code()
return error_code == 'IDPCommunicationError'
class RetryDDBChecksumError(BaseRetryableChecker):
_CHECKSUM_HEADER = 'x-amz-crc32'
_SERVICE_NAME = 'dynamodb'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
if context.http_response is None:
return False
checksum = context.http_response.headers.get(self._CHECKSUM_HEADER)
if checksum is None:
return False
actual_crc32 = crc32(context.http_response.content) & 0xffffffff
if actual_crc32 != int(checksum):
logger.debug("DynamoDB crc32 checksum does not match, "
"expected: %s, actual: %s", checksum, actual_crc32)
return True
| 1,611 | Python | 31.897959 | 76 | 0.680323 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/sharedexample.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from botocore.docs.utils import escape_controls
from botocore.compat import six
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example.get('input', {})
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.style.new_line()
output_section.write('Expected Output:')
output_section.style.new_line()
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
safe_value = escape_controls(value)
section.write(u"'%s'," % six.text_type(safe_value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub(r'^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
| 9,326 | Python | 40.638393 | 80 | 0.615698 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/method.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from botocore.docs.params import RequestParamsDocumenter
from botocore.docs.params import ResponseParamsDocumenter
from botocore.docs.example import ResponseExampleDocumenter
from botocore.docs.example import RequestExampleDocumenter
AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI'
def get_instance_public_methods(instance):
"""Retrieves an objects public methods
:param instance: The instance of the class to inspect
:rtype: dict
:returns: A dictionary that represents an instance's methods where
the keys are the name of the methods and the
values are the handler to the method.
"""
instance_members = inspect.getmembers(instance)
instance_methods = {}
for name, member in instance_members:
if not name.startswith('_'):
if inspect.ismethod(member):
instance_methods[name] = member
return instance_methods
def document_model_driven_signature(section, name, operation_model,
include=None, exclude=None):
"""Documents the signature of a model-driven method
:param section: The section to write the documentation to.
:param name: The name of the method
:param operation_model: The operation model for the method
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
params = {}
if operation_model.input_shape:
params = operation_model.input_shape.members
parameter_names = list(params.keys())
if include is not None:
for member in include:
parameter_names.append(member.name)
if exclude is not None:
for member in exclude:
if member in parameter_names:
parameter_names.remove(member)
signature_params = ''
if parameter_names:
signature_params = '**kwargs'
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_signature(section, name, method,
include=None, exclude=None):
"""Documents the signature of a custom method
:param section: The section to write the documentation to.
:param name: The name of the method
:param method: The handle to the method being documented
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
args, varargs, keywords, defaults = inspect.getargspec(method)
args = args[1:]
signature_params = inspect.formatargspec(
args, varargs, keywords, defaults)
signature_params = signature_params.lstrip('(')
signature_params = signature_params.rstrip(')')
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_method(section, method_name, method):
"""Documents a non-data driven method
:param section: The section to write the documentation to.
:param method_name: The name of the method
:param method: The handle to the method being documented
"""
document_custom_signature(
section, method_name, method)
method_intro_section = section.add_new_section('method-intro')
method_intro_section.writeln('')
doc_string = inspect.getdoc(method)
if doc_string is not None:
method_intro_section.style.write_py_doc_string(doc_string)
def document_model_driven_method(section, method_name, operation_model,
event_emitter, method_description=None,
example_prefix=None, include_input=None,
include_output=None, exclude_input=None,
exclude_output=None, document_output=True,
include_signature=True):
"""Documents an individual method
:param section: The section to write to
:param method_name: The name of the method
:param operation_model: The model of the operation
:param event_emitter: The event emitter to use to emit events
:param example_prefix: The prefix to use in the method example.
:type include_input: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
input documentation.
:type include_output: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
output documentation.
:type exclude_input: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
input documentation.
:type exclude_output: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
output documentation.
:param document_output: A boolean flag to indicate whether to
document the output.
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
# Add the signature if specified.
if include_signature:
document_model_driven_signature(
section, method_name, operation_model, include=include_input,
exclude=exclude_input)
# Add the description for the method.
method_intro_section = section.add_new_section('method-intro')
method_intro_section.include_doc_string(method_description)
if operation_model.deprecated:
method_intro_section.style.start_danger()
method_intro_section.writeln(
'This operation is deprecated and may not function as '
'expected. This operation should not be used going forward '
'and is only kept for the purpose of backwards compatiblity.')
method_intro_section.style.end_danger()
service_uid = operation_model.service_model.metadata.get('uid')
if service_uid is not None:
method_intro_section.style.new_paragraph()
method_intro_section.write("See also: ")
link = '%s/%s/%s' % (AWS_DOC_BASE, service_uid,
operation_model.name)
method_intro_section.style.external_link(title="AWS API Documentation",
link=link)
method_intro_section.writeln('')
# Add the example section.
example_section = section.add_new_section('example')
example_section.style.new_paragraph()
example_section.style.bold('Request Syntax')
context = {
'special_shape_types': {
'streaming_input_shape': operation_model.get_streaming_input(),
'streaming_output_shape': operation_model.get_streaming_output(),
'eventstream_output_shape': operation_model.get_event_stream_output(),
},
}
if operation_model.input_shape:
RequestExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_example(
example_section, operation_model.input_shape,
prefix=example_prefix, include=include_input,
exclude=exclude_input)
else:
example_section.style.new_paragraph()
example_section.style.start_codeblock()
example_section.write(example_prefix + '()')
# Add the request parameter documentation.
request_params_section = section.add_new_section('request-params')
if operation_model.input_shape:
RequestParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_params(
request_params_section, operation_model.input_shape,
include=include_input, exclude=exclude_input)
# Add the return value documentation
return_section = section.add_new_section('return')
return_section.style.new_line()
if operation_model.output_shape is not None and document_output:
return_section.write(':rtype: dict')
return_section.style.new_line()
return_section.write(':returns: ')
return_section.style.indent()
return_section.style.new_line()
# If the operation is an event stream, describe the tagged union
event_stream_output = operation_model.get_event_stream_output()
if event_stream_output:
event_section = return_section.add_new_section('event-stream')
event_section.style.new_paragraph()
event_section.write(
'The response of this operation contains an '
':class:`.EventStream` member. When iterated the '
':class:`.EventStream` will yield events based on the '
'structure below, where only one of the top level keys '
'will be present for any given event.'
)
event_section.style.new_line()
# Add an example return value
return_example_section = return_section.add_new_section('example')
return_example_section.style.new_line()
return_example_section.style.bold('Response Syntax')
return_example_section.style.new_paragraph()
ResponseExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_example(
return_example_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
# Add a description for the return value
return_description_section = return_section.add_new_section(
'description')
return_description_section.style.new_line()
return_description_section.style.bold('Response Structure')
return_description_section.style.new_paragraph()
ResponseParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_params(
return_description_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
else:
return_section.write(':returns: None')
| 11,548 | Python | 39.953901 | 82 | 0.668081 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/service.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import DataNotFoundError
from botocore.docs.utils import get_official_service_name
from botocore.docs.client import ClientDocumenter
from botocore.docs.client import ClientExceptionsDocumenter
from botocore.docs.waiter import WaiterDocumenter
from botocore.docs.paginator import PaginatorDocumenter
from botocore.docs.bcdoc.restdoc import DocumentStructure
class ServiceDocumenter(object):
def __init__(self, service_name, session):
self._session = session
self._service_name = service_name
self._client = self._session.create_client(
service_name, region_name='us-east-1', aws_access_key_id='foo',
aws_secret_access_key='bar')
self._event_emitter = self._client.meta.events
self.sections = [
'title',
'table-of-contents',
'client-api',
'client-exceptions',
'paginator-api',
'waiter-api'
]
def document_service(self):
"""Documents an entire service.
:returns: The reStructured text of the documented service.
"""
doc_structure = DocumentStructure(
self._service_name, section_names=self.sections,
target='html')
self.title(doc_structure.get_section('title'))
self.table_of_contents(doc_structure.get_section('table-of-contents'))
self.client_api(doc_structure.get_section('client-api'))
self.client_exceptions(doc_structure.get_section('client-exceptions'))
self.paginator_api(doc_structure.get_section('paginator-api'))
self.waiter_api(doc_structure.get_section('waiter-api'))
return doc_structure.flush_structure()
def title(self, section):
section.style.h1(self._client.__class__.__name__)
self._event_emitter.emit(
'docs.%s.%s' % ('title',
self._service_name),
section=section
)
def table_of_contents(self, section):
section.style.table_of_contents(title='Table of Contents', depth=2)
def client_api(self, section):
examples = None
try:
examples = self.get_examples(self._service_name)
except DataNotFoundError:
pass
ClientDocumenter(self._client, examples).document_client(section)
def client_exceptions(self, section):
ClientExceptionsDocumenter(self._client).document_exceptions(section)
def paginator_api(self, section):
try:
service_paginator_model = self._session.get_paginator_model(
self._service_name)
except DataNotFoundError:
return
paginator_documenter = PaginatorDocumenter(
self._client, service_paginator_model)
paginator_documenter.document_paginators(section)
def waiter_api(self, section):
if self._client.waiter_names:
service_waiter_model = self._session.get_waiter_model(
self._service_name)
waiter_documenter = WaiterDocumenter(
self._client, service_waiter_model)
waiter_documenter.document_waiters(section)
def get_examples(self, service_name, api_version=None):
loader = self._session.get_component('data_loader')
examples = loader.load_service_model(
service_name, 'examples-1', api_version)
return examples['examples']
| 3,986 | Python | 37.708737 | 78 | 0.655294 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/__init__.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from botocore.docs.service import ServiceDocumenter
def generate_docs(root_dir, session):
"""Generates the reference documentation for botocore
This will go through every available AWS service and output ReSTructured
text files documenting each service.
:param root_dir: The directory to write the reference files to. Each
service's reference documentation is loacated at
root_dir/reference/services/service-name.rst
"""
services_doc_path = os.path.join(root_dir, 'reference', 'services')
if not os.path.exists(services_doc_path):
os.makedirs(services_doc_path)
# Generate reference docs and write them out.
for service_name in session.get_available_services():
docs = ServiceDocumenter(service_name, session).document_service()
service_doc_path = os.path.join(
services_doc_path, service_name + '.rst')
with open(service_doc_path, 'wb') as f:
f.write(docs)
| 1,543 | Python | 38.589743 | 76 | 0.71873 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/waiter.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore import xform_name
from botocore.compat import OrderedDict
from botocore.docs.utils import DocumentedShape
from botocore.utils import get_service_module_name
from botocore.docs.method import document_model_driven_method
class WaiterDocumenter(object):
def __init__(self, client, service_waiter_model):
self._client = client
self._service_name = self._client.meta.service_model.service_name
self._service_waiter_model = service_waiter_model
def document_waiters(self, section):
"""Documents the various waiters for a service.
:param section: The section to write to.
"""
section.style.h2('Waiters')
section.style.new_line()
section.writeln('The available waiters are:')
for waiter_name in self._service_waiter_model.waiter_names:
section.style.li(
':py:class:`%s.Waiter.%s`' % (
self._client.__class__.__name__, waiter_name))
self._add_single_waiter(section, waiter_name)
def _add_single_waiter(self, section, waiter_name):
section = section.add_new_section(waiter_name)
section.style.start_sphinx_py_class(
class_name='%s.Waiter.%s' % (
self._client.__class__.__name__, waiter_name))
# Add example on how to instantiate waiter.
section.style.start_codeblock()
section.style.new_line()
section.write(
'waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name)
)
section.style.end_codeblock()
# Add information on the wait() method
section.style.new_line()
document_wait_method(
section=section,
waiter_name=waiter_name,
event_emitter=self._client.meta.events,
service_model=self._client.meta.service_model,
service_waiter_model=self._service_waiter_model
)
def document_wait_method(section, waiter_name, event_emitter,
service_model, service_waiter_model,
include_signature=True):
"""Documents a the wait method of a waiter
:param section: The section to write to
:param waiter_name: The name of the waiter
:param event_emitter: The event emitter to use to emit events
:param service_model: The service model
:param service_waiter_model: The waiter model associated to the service
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
waiter_model = service_waiter_model.get_waiter(waiter_name)
operation_model = service_model.operation_model(
waiter_model.operation)
waiter_config_members = OrderedDict()
waiter_config_members['Delay'] = DocumentedShape(
name='Delay', type_name='integer',
documentation=(
'<p>The amount of time in seconds to wait between '
'attempts. Default: {0}</p>'.format(waiter_model.delay)))
waiter_config_members['MaxAttempts'] = DocumentedShape(
name='MaxAttempts', type_name='integer',
documentation=(
'<p>The maximum number of attempts to be made. '
'Default: {0}</p>'.format(waiter_model.max_attempts)))
botocore_waiter_params = [
DocumentedShape(
name='WaiterConfig', type_name='structure',
documentation=(
'<p>A dictionary that provides parameters to control '
'waiting behavior.</p>'),
members=waiter_config_members)
]
wait_description = (
'Polls :py:meth:`{0}.Client.{1}` every {2} '
'seconds until a successful state is reached. An error is '
'returned after {3} failed checks.'.format(
get_service_module_name(service_model),
xform_name(waiter_model.operation),
waiter_model.delay, waiter_model.max_attempts)
)
document_model_driven_method(
section, 'wait', operation_model,
event_emitter=event_emitter,
method_description=wait_description,
example_prefix='waiter.wait',
include_input=botocore_waiter_params,
document_output=False,
include_signature=include_signature
)
| 4,823 | Python | 36.6875 | 75 | 0.642131 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/example.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.shape import ShapeDocumenter
from botocore.docs.utils import py_default
class BaseExampleDocumenter(ShapeDocumenter):
def document_example(self, section, shape, prefix=None, include=None,
exclude=None):
"""Generates an example based on a shape
:param section: The section to write the documentation to.
:param shape: The shape of the operation.
:param prefix: Anything to be included before the example
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
history = []
section.style.new_line()
section.style.start_codeblock()
if prefix is not None:
section.write(prefix)
self.traverse_and_document_shape(
section=section, shape=shape, history=history,
include=include, exclude=exclude)
def document_recursive_shape(self, section, shape, **kwargs):
section.write('{\'... recursive ...\'}')
def document_shape_default(self, section, shape, history, include=None,
exclude=None, **kwargs):
py_type = self._get_special_py_default(shape)
if py_type is None:
py_type = py_default(shape.type_name)
if self._context.get('streaming_shape') == shape:
py_type = 'StreamingBody()'
section.write(py_type)
def document_shape_type_string(self, section, shape, history,
include=None, exclude=None, **kwargs):
if 'enum' in shape.metadata:
for i, enum in enumerate(shape.metadata['enum']):
section.write('\'%s\'' % enum)
if i < len(shape.metadata['enum']) - 1:
section.write('|')
else:
self.document_shape_default(section, shape, history)
def document_shape_type_list(self, section, shape, history, include=None,
exclude=None, **kwargs):
param_shape = shape.member
list_section = section.add_new_section('list-value')
self._start_nested_param(list_section, '[')
param_section = list_section.add_new_section(
'member', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_section, shape=param_shape, history=history)
ending_comma_section = list_section.add_new_section('ending-comma')
ending_comma_section.write(',')
ending_bracket_section = list_section.add_new_section(
'ending-bracket')
self._end_nested_param(ending_bracket_section, ']')
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
if not shape.members:
section.write('{}')
return
section = section.add_new_section('structure-value')
self._start_nested_param(section, '{')
input_members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(input_members):
if exclude and param in exclude:
continue
param_section = section.add_new_section(param)
param_section.write('\'%s\': ' % param)
param_shape = input_members[param]
param_value_section = param_section.add_new_section(
'member-value', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_value_section, shape=param_shape,
history=history, name=param)
if i < len(input_members) - 1:
ending_comma_section = param_section.add_new_section(
'ending-comma')
ending_comma_section.write(',')
ending_comma_section.style.new_line()
self._end_structure(section, '{', '}')
def document_shape_type_map(self, section, shape, history,
include=None, exclude=None, **kwargs):
map_section = section.add_new_section('map-value')
self._start_nested_param(map_section, '{')
value_shape = shape.value
key_section = map_section.add_new_section(
'key', context={'shape': shape.key.name})
key_section.write('\'string\': ')
value_section = map_section.add_new_section(
'value', context={'shape': value_shape.name})
self.traverse_and_document_shape(
section=value_section, shape=value_shape, history=history)
end_bracket_section = map_section.add_new_section('ending-bracket')
self._end_nested_param(end_bracket_section, '}')
def _add_members_to_shape(self, members, include):
if include:
members = members.copy()
for param in include:
members[param.name] = param
return members
def _start_nested_param(self, section, start=None):
if start is not None:
section.write(start)
section.style.indent()
section.style.indent()
section.style.new_line()
def _end_nested_param(self, section, end=None):
section.style.dedent()
section.style.dedent()
section.style.new_line()
if end is not None:
section.write(end)
def _end_structure(self, section, start, end):
# If there are no members in the strucuture, then make sure the
# start and the end bracket are on the same line, by removing all
# previous text and writing the start and end.
if not section.available_sections:
section.clear_text()
section.write(start + end)
self._end_nested_param(section)
else:
end_bracket_section = section.add_new_section('ending-bracket')
self._end_nested_param(end_bracket_section, end)
class ResponseExampleDocumenter(BaseExampleDocumenter):
EVENT_NAME = 'response-example'
def document_shape_type_event_stream(self, section, shape, history,
**kwargs):
section.write('EventStream(')
self.document_shape_type_structure(section, shape, history, **kwargs)
end_section = section.add_new_section('event-stream-end')
end_section.write(')')
class RequestExampleDocumenter(BaseExampleDocumenter):
EVENT_NAME = 'request-example'
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
param_format = '\'%s\''
operator = ': '
start = '{'
end = '}'
if len(history) <= 1:
operator = '='
start = '('
end = ')'
param_format = '%s'
section = section.add_new_section('structure-value')
self._start_nested_param(section, start)
input_members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(input_members):
if exclude and param in exclude:
continue
param_section = section.add_new_section(param)
param_section.write(param_format % param)
param_section.write(operator)
param_shape = input_members[param]
param_value_section = param_section.add_new_section(
'member-value', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_value_section, shape=param_shape,
history=history, name=param)
if i < len(input_members) - 1:
ending_comma_section = param_section.add_new_section(
'ending-comma')
ending_comma_section.write(',')
ending_comma_section.style.new_line()
self._end_structure(section, start, end)
| 8,751 | Python | 40.875598 | 77 | 0.597989 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.