file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/client.py | from __future__ import annotations
import asyncio
import functools
import logging
import random
import urllib.parse
import warnings
from types import TracebackType
from typing import (
Any,
AsyncIterator,
Callable,
Generator,
List,
Optional,
Sequence,
Tuple,
Type,
cast,
)
from ..datastructures import Headers, HeadersLike
from ..exceptions import (
InvalidHandshake,
InvalidHeader,
InvalidMessage,
InvalidStatusCode,
NegotiationError,
RedirectHandshake,
SecurityError,
)
from ..extensions import ClientExtensionFactory, Extension
from ..extensions.permessage_deflate import enable_client_permessage_deflate
from ..headers import (
build_authorization_basic,
build_extension,
build_host,
build_subprotocol,
parse_extension,
parse_subprotocol,
validate_subprotocols,
)
from ..http import USER_AGENT
from ..typing import ExtensionHeader, LoggerLike, Origin, Subprotocol
from ..uri import WebSocketURI, parse_uri
from .handshake import build_request, check_response
from .http import read_response
from .protocol import WebSocketCommonProtocol
__all__ = ["connect", "unix_connect", "WebSocketClientProtocol"]
class WebSocketClientProtocol(WebSocketCommonProtocol):
"""
WebSocket client connection.
:class:`WebSocketClientProtocol` provides :meth:`recv` and :meth:`send`
coroutines for receiving and sending messages.
It supports asynchronous iteration to receive incoming messages::
async for message in websocket:
await process(message)
The iterator exits normally when the connection is closed with close code
1000 (OK) or 1001 (going away). It raises
a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection
is closed with any other code.
See :func:`connect` for the documentation of ``logger``, ``origin``,
``extensions``, ``subprotocols``, and ``extra_headers``.
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
"""
is_client = True
side = "client"
def __init__(
self,
*,
logger: Optional[LoggerLike] = None,
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
**kwargs: Any,
) -> None:
if logger is None:
logger = logging.getLogger("websockets.client")
super().__init__(logger=logger, **kwargs)
self.origin = origin
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.extra_headers = extra_headers
def write_http_request(self, path: str, headers: Headers) -> None:
"""
Write request line and headers to the HTTP request.
"""
self.path = path
self.request_headers = headers
if self.debug:
self.logger.debug("> GET %s HTTP/1.1", path)
for key, value in headers.raw_items():
self.logger.debug("> %s: %s", key, value)
# Since the path and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {path} HTTP/1.1\r\n"
request += str(headers)
self.transport.write(request.encode())
async def read_http_response(self) -> Tuple[int, Headers]:
"""
Read status line and headers from the HTTP response.
If the response contains a body, it may be read from ``self.reader``
after this coroutine returns.
Raises:
InvalidMessage: if the HTTP message is malformed or isn't an
HTTP/1.1 GET response.
"""
try:
status_code, reason, headers = await read_response(self.reader)
# Remove this branch when dropping support for Python < 3.8
# because CancelledError no longer inherits Exception.
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as exc:
raise InvalidMessage("did not receive a valid HTTP response") from exc
if self.debug:
self.logger.debug("< HTTP/1.1 %d %s", status_code, reason)
for key, value in headers.raw_items():
self.logger.debug("< %s: %s", key, value)
self.response_headers = headers
return status_code, self.response_headers
@staticmethod
def process_extensions(
headers: Headers,
available_extensions: Optional[Sequence[ClientExtensionFactory]],
) -> List[Extension]:
"""
Handle the Sec-WebSocket-Extensions HTTP response header.
Check that each extension is supported, as well as its parameters.
Return the list of accepted extensions.
Raise :exc:`~websockets.exceptions.InvalidHandshake` to abort the
connection.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension accepted by
the server, we check for a match with each extension available in the
client configuration. If no match is found, an exception is raised.
If several variants of the same extension are accepted by the server,
it may be configured several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
"""
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values:
if available_extensions is None:
raise InvalidHandshake("no extensions supported")
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, response_params in parsed_header_values:
for extension_factory in available_extensions:
# Skip non-matching extensions based on their name.
if extension_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
extension = extension_factory.process_response_params(
response_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the server sent. Fail the connection.
else:
raise NegotiationError(
f"Unsupported extension: "
f"name = {name}, params = {response_params}"
)
return accepted_extensions
@staticmethod
def process_subprotocol(
headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP response header.
Check that it contains exactly one supported subprotocol.
Return the selected subprotocol.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values:
if available_subprotocols is None:
raise InvalidHandshake("no subprotocols supported")
parsed_header_values: Sequence[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
if len(parsed_header_values) > 1:
subprotocols = ", ".join(parsed_header_values)
raise InvalidHandshake(f"multiple subprotocols: {subprotocols}")
subprotocol = parsed_header_values[0]
if subprotocol not in available_subprotocols:
raise NegotiationError(f"unsupported subprotocol: {subprotocol}")
return subprotocol
async def handshake(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
available_extensions: Optional[Sequence[ClientExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
) -> None:
"""
Perform the client side of the opening handshake.
Args:
wsuri: URI of the WebSocket server.
origin: value of the ``Origin`` header.
available_extensions: list of supported extensions, in order in
which they should be tried.
available_subprotocols: list of supported subprotocols, in order
of decreasing preference.
extra_headers: arbitrary HTTP headers to add to the request.
Raises:
InvalidHandshake: if the handshake fails.
"""
request_headers = Headers()
request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure)
if wsuri.user_info:
request_headers["Authorization"] = build_authorization_basic(
*wsuri.user_info
)
if origin is not None:
request_headers["Origin"] = origin
key = build_request(request_headers)
if available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in available_extensions
]
)
request_headers["Sec-WebSocket-Extensions"] = extensions_header
if available_subprotocols is not None:
protocol_header = build_subprotocol(available_subprotocols)
request_headers["Sec-WebSocket-Protocol"] = protocol_header
if self.extra_headers is not None:
request_headers.update(self.extra_headers)
request_headers.setdefault("User-Agent", USER_AGENT)
self.write_http_request(wsuri.resource_name, request_headers)
status_code, response_headers = await self.read_http_response()
if status_code in (301, 302, 303, 307, 308):
if "Location" not in response_headers:
raise InvalidHeader("Location")
raise RedirectHandshake(response_headers["Location"])
elif status_code != 101:
raise InvalidStatusCode(status_code, response_headers)
check_response(response_headers, key)
self.extensions = self.process_extensions(
response_headers, available_extensions
)
self.subprotocol = self.process_subprotocol(
response_headers, available_subprotocols
)
self.connection_open()
class Connect:
"""
Connect to the WebSocket server at ``uri``.
Awaiting :func:`connect` yields a :class:`WebSocketClientProtocol` which
can then be used to send and receive messages.
:func:`connect` can be used as a asynchronous context manager::
async with websockets.connect(...) as websocket:
...
The connection is closed automatically when exiting the context.
:func:`connect` can be used as an infinite asynchronous iterator to
reconnect automatically on errors::
async for websocket in websockets.connect(...):
try:
...
except websockets.ConnectionClosed:
continue
The connection is closed automatically after each iteration of the loop.
If an error occurs while establishing the connection, :func:`connect`
retries with exponential backoff. The backoff delay starts at three
seconds and increases up to one minute.
If an error occurs in the body of the loop, you can handle the exception
and :func:`connect` will reconnect with the next iteration; or you can
let the exception bubble up and break out of the loop. This lets you
decide which errors trigger a reconnection and which errors are fatal.
Args:
uri: URI of the WebSocket server.
create_protocol: factory for the :class:`asyncio.Protocol` managing
the connection; defaults to :class:`WebSocketClientProtocol`; may
be set to a wrapper or a subclass to customize connection handling.
logger: logger for this connection;
defaults to ``logging.getLogger("websockets.client")``;
see the :doc:`logging guide <../topics/logging>` for details.
compression: shortcut that enables the "permessage-deflate" extension
by default; may be set to :obj:`None` to disable compression;
see the :doc:`compression guide <../topics/compression>` for details.
origin: value of the ``Origin`` header. This is useful when connecting
to a server that validates the ``Origin`` header to defend against
Cross-Site WebSocket Hijacking attacks.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of decreasing
preference.
extra_headers: arbitrary HTTP headers to add to the request.
open_timeout: timeout for opening the connection in seconds;
:obj:`None` to disable the timeout
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
Any other keyword arguments are passed the event loop's
:meth:`~asyncio.loop.create_connection` method.
For example:
* You can set ``ssl`` to a :class:`~ssl.SSLContext` to enforce TLS
settings. When connecting to a ``wss://`` URI, if ``ssl`` isn't
provided, a TLS context is created
with :func:`~ssl.create_default_context`.
* You can set ``host`` and ``port`` to connect to a different host and
port from those found in ``uri``. This only changes the destination of
the TCP connection. The host name from ``uri`` is still used in the TLS
handshake for secure connections and in the ``Host`` header.
Returns:
WebSocketClientProtocol: WebSocket connection.
Raises:
InvalidURI: if ``uri`` isn't a valid WebSocket URI.
InvalidHandshake: if the opening handshake fails.
~asyncio.TimeoutError: if the opening handshake times out.
"""
MAX_REDIRECTS_ALLOWED = 10
def __init__(
self,
uri: str,
*,
create_protocol: Optional[Callable[[Any], WebSocketClientProtocol]] = None,
logger: Optional[LoggerLike] = None,
compression: Optional[str] = "deflate",
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
open_timeout: Optional[float] = 10,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2**20,
max_queue: Optional[int] = 2**5,
read_limit: int = 2**16,
write_limit: int = 2**16,
**kwargs: Any,
) -> None:
# Backwards compatibility: close_timeout used to be called timeout.
timeout: Optional[float] = kwargs.pop("timeout", None)
if timeout is None:
timeout = 10
else:
warnings.warn("rename timeout to close_timeout", DeprecationWarning)
# If both are specified, timeout is ignored.
if close_timeout is None:
close_timeout = timeout
# Backwards compatibility: create_protocol used to be called klass.
klass: Optional[Type[WebSocketClientProtocol]] = kwargs.pop("klass", None)
if klass is None:
klass = WebSocketClientProtocol
else:
warnings.warn("rename klass to create_protocol", DeprecationWarning)
# If both are specified, klass is ignored.
if create_protocol is None:
create_protocol = klass
# Backwards compatibility: recv() used to return None on closed connections
legacy_recv: bool = kwargs.pop("legacy_recv", False)
# Backwards compatibility: the loop parameter used to be supported.
_loop: Optional[asyncio.AbstractEventLoop] = kwargs.pop("loop", None)
if _loop is None:
loop = asyncio.get_event_loop()
else:
loop = _loop
warnings.warn("remove loop argument", DeprecationWarning)
wsuri = parse_uri(uri)
if wsuri.secure:
kwargs.setdefault("ssl", True)
elif kwargs.get("ssl") is not None:
raise ValueError(
"connect() received a ssl argument for a ws:// URI, "
"use a wss:// URI to enable TLS"
)
if compression == "deflate":
extensions = enable_client_permessage_deflate(extensions)
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
if subprotocols is not None:
validate_subprotocols(subprotocols)
factory = functools.partial(
create_protocol,
logger=logger,
origin=origin,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
max_size=max_size,
max_queue=max_queue,
read_limit=read_limit,
write_limit=write_limit,
host=wsuri.host,
port=wsuri.port,
secure=wsuri.secure,
legacy_recv=legacy_recv,
loop=_loop,
)
if kwargs.pop("unix", False):
path: Optional[str] = kwargs.pop("path", None)
create_connection = functools.partial(
loop.create_unix_connection, factory, path, **kwargs
)
else:
host: Optional[str]
port: Optional[int]
if kwargs.get("sock") is None:
host, port = wsuri.host, wsuri.port
else:
# If sock is given, host and port shouldn't be specified.
host, port = None, None
# If host and port are given, override values from the URI.
host = kwargs.pop("host", host)
port = kwargs.pop("port", port)
create_connection = functools.partial(
loop.create_connection, factory, host, port, **kwargs
)
self.open_timeout = open_timeout
if logger is None:
logger = logging.getLogger("websockets.client")
self.logger = logger
# This is a coroutine function.
self._create_connection = create_connection
self._uri = uri
self._wsuri = wsuri
def handle_redirect(self, uri: str) -> None:
# Update the state of this instance to connect to a new URI.
old_uri = self._uri
old_wsuri = self._wsuri
new_uri = urllib.parse.urljoin(old_uri, uri)
new_wsuri = parse_uri(new_uri)
# Forbid TLS downgrade.
if old_wsuri.secure and not new_wsuri.secure:
raise SecurityError("redirect from WSS to WS")
same_origin = (
old_wsuri.host == new_wsuri.host and old_wsuri.port == new_wsuri.port
)
# Rewrite the host and port arguments for cross-origin redirects.
# This preserves connection overrides with the host and port
# arguments if the redirect points to the same host and port.
if not same_origin:
# Replace the host and port argument passed to the protocol factory.
factory = self._create_connection.args[0]
factory = functools.partial(
factory.func,
*factory.args,
**dict(factory.keywords, host=new_wsuri.host, port=new_wsuri.port),
)
# Replace the host and port argument passed to create_connection.
self._create_connection = functools.partial(
self._create_connection.func,
*(factory, new_wsuri.host, new_wsuri.port),
**self._create_connection.keywords,
)
# Set the new WebSocket URI. This suffices for same-origin redirects.
self._uri = new_uri
self._wsuri = new_wsuri
# async for ... in connect(...):
BACKOFF_MIN = 1.92
BACKOFF_MAX = 60.0
BACKOFF_FACTOR = 1.618
BACKOFF_INITIAL = 5
async def __aiter__(self) -> AsyncIterator[WebSocketClientProtocol]:
backoff_delay = self.BACKOFF_MIN
while True:
try:
async with self as protocol:
yield protocol
# Remove this branch when dropping support for Python < 3.8
# because CancelledError no longer inherits Exception.
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
# Add a random initial delay between 0 and 5 seconds.
# See 7.2.3. Recovering from Abnormal Closure in RFC 6544.
if backoff_delay == self.BACKOFF_MIN:
initial_delay = random.random() * self.BACKOFF_INITIAL
self.logger.info(
"! connect failed; reconnecting in %.1f seconds",
initial_delay,
exc_info=True,
)
await asyncio.sleep(initial_delay)
else:
self.logger.info(
"! connect failed again; retrying in %d seconds",
int(backoff_delay),
exc_info=True,
)
await asyncio.sleep(int(backoff_delay))
# Increase delay with truncated exponential backoff.
backoff_delay = backoff_delay * self.BACKOFF_FACTOR
backoff_delay = min(backoff_delay, self.BACKOFF_MAX)
continue
else:
# Connection succeeded - reset backoff delay
backoff_delay = self.BACKOFF_MIN
# async with connect(...) as ...:
async def __aenter__(self) -> WebSocketClientProtocol:
return await self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
await self.protocol.close()
# ... = await connect(...)
def __await__(self) -> Generator[Any, None, WebSocketClientProtocol]:
# Create a suitable iterator by calling __await__ on a coroutine.
return self.__await_impl_timeout__().__await__()
async def __await_impl_timeout__(self) -> WebSocketClientProtocol:
return await asyncio.wait_for(self.__await_impl__(), self.open_timeout)
async def __await_impl__(self) -> WebSocketClientProtocol:
for redirects in range(self.MAX_REDIRECTS_ALLOWED):
transport, protocol = await self._create_connection()
protocol = cast(WebSocketClientProtocol, protocol)
try:
await protocol.handshake(
self._wsuri,
origin=protocol.origin,
available_extensions=protocol.available_extensions,
available_subprotocols=protocol.available_subprotocols,
extra_headers=protocol.extra_headers,
)
except RedirectHandshake as exc:
protocol.fail_connection()
await protocol.wait_closed()
self.handle_redirect(exc.uri)
# Avoid leaking a connected socket when the handshake fails.
except (Exception, asyncio.CancelledError):
protocol.fail_connection()
await protocol.wait_closed()
raise
else:
self.protocol = protocol
return protocol
else:
raise SecurityError("too many redirects")
# ... = yield from connect(...) - remove when dropping Python < 3.10
__iter__ = __await__
connect = Connect
def unix_connect(
path: Optional[str] = None,
uri: str = "ws://localhost/",
**kwargs: Any,
) -> Connect:
"""
Similar to :func:`connect`, but for connecting to a Unix socket.
This function builds upon the event loop's
:meth:`~asyncio.loop.create_unix_connection` method.
It is only available on Unix.
It's mainly useful for debugging servers listening on Unix sockets.
Args:
path: file system path to the Unix socket.
uri: URI of the WebSocket server; the host is used in the TLS
handshake for secure connections and in the ``Host`` header.
"""
return connect(uri=uri, path=path, unix=True, **kwargs)
| 26,009 | Python | 35.633803 | 87 | 0.608597 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/compatibility.py | from __future__ import annotations
import asyncio
import sys
from typing import Any, Dict
def loop_if_py_lt_38(loop: asyncio.AbstractEventLoop) -> Dict[str, Any]:
"""
Helper for the removal of the loop argument in Python 3.10.
"""
return {"loop": loop} if sys.version_info[:2] < (3, 8) else {}
| 314 | Python | 21.499998 | 72 | 0.665605 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cchardet/__init__.py | from cchardet import _cchardet
from .version import __version__
def detect(msg):
"""
Args:
msg: str
Returns:
{
"encoding": str,
"confidence": float
}
"""
encoding, confidence = _cchardet.detect_with_confidence(msg)
if isinstance(encoding, bytes):
encoding = encoding.decode()
return {"encoding": encoding, "confidence": confidence}
class UniversalDetector(object):
def __init__(self):
self._detector = _cchardet.UniversalDetector()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
return False
def reset(self):
self._detector.reset()
def feed(self, data):
self._detector.feed(data)
def close(self):
self._detector.close()
@property
def done(self):
return self._detector.done
@property
def result(self):
encoding, confidence = self._detector.result
if isinstance(encoding, bytes):
encoding = encoding.decode()
return {"encoding": encoding, "confidence": confidence}
| 1,161 | Python | 21.784313 | 67 | 0.592593 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cchardet/version.py | __version__ = '2.1.6'
| 22 | Python | 10.499995 | 21 | 0.454545 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/error_wrappers.py | import json
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union
from .json import pydantic_encoder
from .utils import Representation
if TYPE_CHECKING:
from typing_extensions import TypedDict
from .config import BaseConfig
from .types import ModelOrDc
from .typing import ReprArgs
Loc = Tuple[Union[int, str], ...]
class _ErrorDictRequired(TypedDict):
loc: Loc
msg: str
type: str
class ErrorDict(_ErrorDictRequired, total=False):
ctx: Dict[str, Any]
__all__ = 'ErrorWrapper', 'ValidationError'
class ErrorWrapper(Representation):
__slots__ = 'exc', '_loc'
def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None:
self.exc = exc
self._loc = loc
def loc_tuple(self) -> 'Loc':
if isinstance(self._loc, tuple):
return self._loc
else:
return (self._loc,)
def __repr_args__(self) -> 'ReprArgs':
return [('exc', self.exc), ('loc', self.loc_tuple())]
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
ErrorList = Union[Sequence[Any], ErrorWrapper]
class ValidationError(Representation, ValueError):
__slots__ = 'raw_errors', 'model', '_error_cache'
def __init__(self, errors: Sequence[ErrorList], model: 'ModelOrDc') -> None:
self.raw_errors = errors
self.model = model
self._error_cache: Optional[List['ErrorDict']] = None
def errors(self) -> List['ErrorDict']:
if self._error_cache is None:
try:
config = self.model.__config__ # type: ignore
except AttributeError:
config = self.model.__pydantic_model__.__config__ # type: ignore
self._error_cache = list(flatten_errors(self.raw_errors, config))
return self._error_cache
def json(self, *, indent: Union[None, int, str] = 2) -> str:
return json.dumps(self.errors(), indent=indent, default=pydantic_encoder)
def __str__(self) -> str:
errors = self.errors()
no_errors = len(errors)
return (
f'{no_errors} validation error{"" if no_errors == 1 else "s"} for {self.model.__name__}\n'
f'{display_errors(errors)}'
)
def __repr_args__(self) -> 'ReprArgs':
return [('model', self.model.__name__), ('errors', self.errors())]
def display_errors(errors: List['ErrorDict']) -> str:
return '\n'.join(f'{_display_error_loc(e)}\n {e["msg"]} ({_display_error_type_and_ctx(e)})' for e in errors)
def _display_error_loc(error: 'ErrorDict') -> str:
return ' -> '.join(str(e) for e in error['loc'])
def _display_error_type_and_ctx(error: 'ErrorDict') -> str:
t = 'type=' + error['type']
ctx = error.get('ctx')
if ctx:
return t + ''.join(f'; {k}={v}' for k, v in ctx.items())
else:
return t
def flatten_errors(
errors: Sequence[Any], config: Type['BaseConfig'], loc: Optional['Loc'] = None
) -> Generator['ErrorDict', None, None]:
for error in errors:
if isinstance(error, ErrorWrapper):
if loc:
error_loc = loc + error.loc_tuple()
else:
error_loc = error.loc_tuple()
if isinstance(error.exc, ValidationError):
yield from flatten_errors(error.exc.raw_errors, config, error_loc)
else:
yield error_dict(error.exc, config, error_loc)
elif isinstance(error, list):
yield from flatten_errors(error, config, loc=loc)
else:
raise RuntimeError(f'Unknown error object: {error}')
def error_dict(exc: Exception, config: Type['BaseConfig'], loc: 'Loc') -> 'ErrorDict':
type_ = get_exc_type(exc.__class__)
msg_template = config.error_msg_templates.get(type_) or getattr(exc, 'msg_template', None)
ctx = exc.__dict__
if msg_template:
msg = msg_template.format(**ctx)
else:
msg = str(exc)
d: 'ErrorDict' = {'loc': loc, 'msg': msg, 'type': type_}
if ctx:
d['ctx'] = ctx
return d
_EXC_TYPE_CACHE: Dict[Type[Exception], str] = {}
def get_exc_type(cls: Type[Exception]) -> str:
# slightly more efficient than using lru_cache since we don't need to worry about the cache filling up
try:
return _EXC_TYPE_CACHE[cls]
except KeyError:
r = _get_exc_type(cls)
_EXC_TYPE_CACHE[cls] = r
return r
def _get_exc_type(cls: Type[Exception]) -> str:
if issubclass(cls, AssertionError):
return 'assertion_error'
base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'
if cls in (TypeError, ValueError):
# just TypeError or ValueError, no extra code
return base_name
# if it's not a TypeError or ValueError, we just take the lowercase of the exception name
# no chaining or snake case logic, use "code" for more complex error types.
code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()
return base_name + '.' + code
| 5,142 | Python | 30.552147 | 113 | 0.601322 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/parse.py | import json
import pickle
from enum import Enum
from pathlib import Path
from typing import Any, Callable, Union
from .types import StrBytes
class Protocol(str, Enum):
json = 'json'
pickle = 'pickle'
def load_str_bytes(
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
) -> Any:
if proto is None and content_type:
if content_type.endswith(('json', 'javascript')):
pass
elif allow_pickle and content_type.endswith('pickle'):
proto = Protocol.pickle
else:
raise TypeError(f'Unknown content-type: {content_type}')
proto = proto or Protocol.json
if proto == Protocol.json:
if isinstance(b, bytes):
b = b.decode(encoding)
return json_loads(b)
elif proto == Protocol.pickle:
if not allow_pickle:
raise RuntimeError('Trying to decode with pickle with allow_pickle=False')
bb = b if isinstance(b, bytes) else b.encode()
return pickle.loads(bb)
else:
raise TypeError(f'Unknown protocol: {proto}')
def load_file(
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
) -> Any:
path = Path(path)
b = path.read_bytes()
if content_type is None:
if path.suffix in ('.js', '.json'):
proto = Protocol.json
elif path.suffix == '.pkl':
proto = Protocol.pickle
return load_str_bytes(
b, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle, json_loads=json_loads
)
| 1,810 | Python | 26.02985 | 118 | 0.608287 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/validators.py | import re
from collections import OrderedDict, deque
from collections.abc import Hashable as CollectionsHashable
from datetime import date, datetime, time, timedelta
from decimal import Decimal, DecimalException
from enum import Enum, IntEnum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Deque,
Dict,
FrozenSet,
Generator,
Hashable,
List,
NamedTuple,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from uuid import UUID
from . import errors
from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
from .typing import (
AnyCallable,
ForwardRef,
all_literal_values,
display_as_type,
get_class,
is_callable_type,
is_literal_type,
is_namedtuple,
is_none_type,
is_typeddict,
)
from .utils import almost_equal_floats, lenient_issubclass, sequence_like
if TYPE_CHECKING:
from typing_extensions import Literal, TypedDict
from .config import BaseConfig
from .fields import ModelField
from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt
ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt]
AnyOrderedDict = OrderedDict[Any, Any]
Number = Union[int, float, Decimal]
StrBytes = Union[str, bytes]
def str_validator(v: Any) -> Union[str]:
if isinstance(v, str):
if isinstance(v, Enum):
return v.value
else:
return v
elif isinstance(v, (float, int, Decimal)):
# is there anything else we want to add here? If you think so, create an issue.
return str(v)
elif isinstance(v, (bytes, bytearray)):
return v.decode()
else:
raise errors.StrError()
def strict_str_validator(v: Any) -> Union[str]:
if isinstance(v, str) and not isinstance(v, Enum):
return v
raise errors.StrError()
def bytes_validator(v: Any) -> Union[bytes]:
if isinstance(v, bytes):
return v
elif isinstance(v, bytearray):
return bytes(v)
elif isinstance(v, str):
return v.encode()
elif isinstance(v, (float, int, Decimal)):
return str(v).encode()
else:
raise errors.BytesError()
def strict_bytes_validator(v: Any) -> Union[bytes]:
if isinstance(v, bytes):
return v
elif isinstance(v, bytearray):
return bytes(v)
else:
raise errors.BytesError()
BOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'}
BOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'}
def bool_validator(v: Any) -> bool:
if v is True or v is False:
return v
if isinstance(v, bytes):
v = v.decode()
if isinstance(v, str):
v = v.lower()
try:
if v in BOOL_TRUE:
return True
if v in BOOL_FALSE:
return False
except TypeError:
raise errors.BoolError()
raise errors.BoolError()
def int_validator(v: Any) -> int:
if isinstance(v, int) and not (v is True or v is False):
return v
try:
return int(v)
except (TypeError, ValueError):
raise errors.IntegerError()
def strict_int_validator(v: Any) -> int:
if isinstance(v, int) and not (v is True or v is False):
return v
raise errors.IntegerError()
def float_validator(v: Any) -> float:
if isinstance(v, float):
return v
try:
return float(v)
except (TypeError, ValueError):
raise errors.FloatError()
def strict_float_validator(v: Any) -> float:
if isinstance(v, float):
return v
raise errors.FloatError()
def number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number':
field_type: ConstrainedNumber = field.type_
if field_type.multiple_of is not None:
mod = float(v) / float(field_type.multiple_of) % 1
if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0):
raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of)
return v
def number_size_validator(v: 'Number', field: 'ModelField') -> 'Number':
field_type: ConstrainedNumber = field.type_
if field_type.gt is not None and not v > field_type.gt:
raise errors.NumberNotGtError(limit_value=field_type.gt)
elif field_type.ge is not None and not v >= field_type.ge:
raise errors.NumberNotGeError(limit_value=field_type.ge)
if field_type.lt is not None and not v < field_type.lt:
raise errors.NumberNotLtError(limit_value=field_type.lt)
if field_type.le is not None and not v <= field_type.le:
raise errors.NumberNotLeError(limit_value=field_type.le)
return v
def constant_validator(v: 'Any', field: 'ModelField') -> 'Any':
"""Validate ``const`` fields.
The value provided for a ``const`` field must be equal to the default value
of the field. This is to support the keyword of the same name in JSON
Schema.
"""
if v != field.default:
raise errors.WrongConstantError(given=v, permitted=[field.default])
return v
def anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes':
v_len = len(v)
min_length = config.min_anystr_length
if v_len < min_length:
raise errors.AnyStrMinLengthError(limit_value=min_length)
max_length = config.max_anystr_length
if max_length is not None and v_len > max_length:
raise errors.AnyStrMaxLengthError(limit_value=max_length)
return v
def anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes':
return v.strip()
def anystr_lower(v: 'StrBytes') -> 'StrBytes':
return v.lower()
def ordered_dict_validator(v: Any) -> 'AnyOrderedDict':
if isinstance(v, OrderedDict):
return v
try:
return OrderedDict(v)
except (TypeError, ValueError):
raise errors.DictError()
def dict_validator(v: Any) -> Dict[Any, Any]:
if isinstance(v, dict):
return v
try:
return dict(v)
except (TypeError, ValueError):
raise errors.DictError()
def list_validator(v: Any) -> List[Any]:
if isinstance(v, list):
return v
elif sequence_like(v):
return list(v)
else:
raise errors.ListError()
def tuple_validator(v: Any) -> Tuple[Any, ...]:
if isinstance(v, tuple):
return v
elif sequence_like(v):
return tuple(v)
else:
raise errors.TupleError()
def set_validator(v: Any) -> Set[Any]:
if isinstance(v, set):
return v
elif sequence_like(v):
return set(v)
else:
raise errors.SetError()
def frozenset_validator(v: Any) -> FrozenSet[Any]:
if isinstance(v, frozenset):
return v
elif sequence_like(v):
return frozenset(v)
else:
raise errors.FrozenSetError()
def deque_validator(v: Any) -> Deque[Any]:
if isinstance(v, deque):
return v
elif sequence_like(v):
return deque(v)
else:
raise errors.DequeError()
def enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum:
try:
enum_v = field.type_(v)
except ValueError:
# field.type_ should be an enum, so will be iterable
raise errors.EnumMemberError(enum_values=list(field.type_))
return enum_v.value if config.use_enum_values else enum_v
def uuid_validator(v: Any, field: 'ModelField') -> UUID:
try:
if isinstance(v, str):
v = UUID(v)
elif isinstance(v, (bytes, bytearray)):
try:
v = UUID(v.decode())
except ValueError:
# 16 bytes in big-endian order as the bytes argument fail
# the above check
v = UUID(bytes=v)
except ValueError:
raise errors.UUIDError()
if not isinstance(v, UUID):
raise errors.UUIDError()
required_version = getattr(field.type_, '_required_version', None)
if required_version and v.version != required_version:
raise errors.UUIDVersionError(required_version=required_version)
return v
def decimal_validator(v: Any) -> Decimal:
if isinstance(v, Decimal):
return v
elif isinstance(v, (bytes, bytearray)):
v = v.decode()
v = str(v).strip()
try:
v = Decimal(v)
except DecimalException:
raise errors.DecimalError()
if not v.is_finite():
raise errors.DecimalIsNotFiniteError()
return v
def hashable_validator(v: Any) -> Hashable:
if isinstance(v, Hashable):
return v
raise errors.HashableError()
def ip_v4_address_validator(v: Any) -> IPv4Address:
if isinstance(v, IPv4Address):
return v
try:
return IPv4Address(v)
except ValueError:
raise errors.IPv4AddressError()
def ip_v6_address_validator(v: Any) -> IPv6Address:
if isinstance(v, IPv6Address):
return v
try:
return IPv6Address(v)
except ValueError:
raise errors.IPv6AddressError()
def ip_v4_network_validator(v: Any) -> IPv4Network:
"""
Assume IPv4Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network
"""
if isinstance(v, IPv4Network):
return v
try:
return IPv4Network(v)
except ValueError:
raise errors.IPv4NetworkError()
def ip_v6_network_validator(v: Any) -> IPv6Network:
"""
Assume IPv6Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network
"""
if isinstance(v, IPv6Network):
return v
try:
return IPv6Network(v)
except ValueError:
raise errors.IPv6NetworkError()
def ip_v4_interface_validator(v: Any) -> IPv4Interface:
if isinstance(v, IPv4Interface):
return v
try:
return IPv4Interface(v)
except ValueError:
raise errors.IPv4InterfaceError()
def ip_v6_interface_validator(v: Any) -> IPv6Interface:
if isinstance(v, IPv6Interface):
return v
try:
return IPv6Interface(v)
except ValueError:
raise errors.IPv6InterfaceError()
def path_validator(v: Any) -> Path:
if isinstance(v, Path):
return v
try:
return Path(v)
except TypeError:
raise errors.PathError()
def path_exists_validator(v: Any) -> Path:
if not v.exists():
raise errors.PathNotExistsError(path=v)
return v
def callable_validator(v: Any) -> AnyCallable:
"""
Perform a simple check if the value is callable.
Note: complete matching of argument type hints and return types is not performed
"""
if callable(v):
return v
raise errors.CallableError(value=v)
def enum_validator(v: Any) -> Enum:
if isinstance(v, Enum):
return v
raise errors.EnumError(value=v)
def int_enum_validator(v: Any) -> IntEnum:
if isinstance(v, IntEnum):
return v
raise errors.IntEnumError(value=v)
def make_literal_validator(type_: Any) -> Callable[[Any], Any]:
permitted_choices = all_literal_values(type_)
# To have a O(1) complexity and still return one of the values set inside the `Literal`,
# we create a dict with the set values (a set causes some problems with the way intersection works).
# In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`)
allowed_choices = {v: v for v in permitted_choices}
def literal_validator(v: Any) -> Any:
try:
return allowed_choices[v]
except KeyError:
raise errors.WrongConstantError(given=v, permitted=permitted_choices)
return literal_validator
def constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
v_len = len(v)
min_length = field.type_.min_length if field.type_.min_length is not None else config.min_anystr_length
if v_len < min_length:
raise errors.AnyStrMinLengthError(limit_value=min_length)
max_length = field.type_.max_length if field.type_.max_length is not None else config.max_anystr_length
if max_length is not None and v_len > max_length:
raise errors.AnyStrMaxLengthError(limit_value=max_length)
return v
def constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace
if strip_whitespace:
v = v.strip()
return v
def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
lower = field.type_.to_lower or config.anystr_lower
if lower:
v = v.lower()
return v
def validate_json(v: Any, config: 'BaseConfig') -> Any:
if v is None:
# pass None through to other validators
return v
try:
return config.json_loads(v) # type: ignore
except ValueError:
raise errors.JsonError()
except TypeError:
raise errors.JsonTypeError()
T = TypeVar('T')
def make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]:
def arbitrary_type_validator(v: Any) -> T:
if isinstance(v, type_):
return v
raise errors.ArbitraryTypeError(expected_arbitrary_type=type_)
return arbitrary_type_validator
def make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]:
def class_validator(v: Any) -> Type[T]:
if lenient_issubclass(v, type_):
return v
raise errors.SubclassError(expected_class=type_)
return class_validator
def any_class_validator(v: Any) -> Type[T]:
if isinstance(v, type):
return v
raise errors.ClassError()
def none_validator(v: Any) -> 'Literal[None]':
if v is None:
return v
raise errors.NotNoneError()
def pattern_validator(v: Any) -> Pattern[str]:
if isinstance(v, Pattern):
return v
str_value = str_validator(v)
try:
return re.compile(str_value)
except re.error:
raise errors.PatternError()
NamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple)
def make_namedtuple_validator(namedtuple_cls: Type[NamedTupleT]) -> Callable[[Tuple[Any, ...]], NamedTupleT]:
from .annotated_types import create_model_from_namedtuple
NamedTupleModel = create_model_from_namedtuple(
namedtuple_cls,
__module__=namedtuple_cls.__module__,
)
namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined]
def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT:
annotations = NamedTupleModel.__annotations__
if len(values) > len(annotations):
raise errors.ListMaxLengthError(limit_value=len(annotations))
dict_values: Dict[str, Any] = dict(zip(annotations, values))
validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values))
return namedtuple_cls(**validated_dict_values)
return namedtuple_validator
def make_typeddict_validator(
typeddict_cls: Type['TypedDict'], config: Type['BaseConfig'] # type: ignore[valid-type]
) -> Callable[[Any], Dict[str, Any]]:
from .annotated_types import create_model_from_typeddict
TypedDictModel = create_model_from_typeddict(
typeddict_cls,
__config__=config,
__module__=typeddict_cls.__module__,
)
typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined]
def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]: # type: ignore[valid-type]
return TypedDictModel.parse_obj(values).dict(exclude_unset=True)
return typeddict_validator
class IfConfig:
def __init__(self, validator: AnyCallable, *config_attr_names: str) -> None:
self.validator = validator
self.config_attr_names = config_attr_names
def check(self, config: Type['BaseConfig']) -> bool:
return any(getattr(config, name) not in {None, False} for name in self.config_attr_names)
# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same,
# IPv4Interface before IPv4Address, etc
_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [
(IntEnum, [int_validator, enum_member_validator]),
(Enum, [enum_member_validator]),
(
str,
[
str_validator,
IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),
IfConfig(anystr_lower, 'anystr_lower'),
IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),
],
),
(
bytes,
[
bytes_validator,
IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),
IfConfig(anystr_lower, 'anystr_lower'),
IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),
],
),
(bool, [bool_validator]),
(int, [int_validator]),
(float, [float_validator]),
(Path, [path_validator]),
(datetime, [parse_datetime]),
(date, [parse_date]),
(time, [parse_time]),
(timedelta, [parse_duration]),
(OrderedDict, [ordered_dict_validator]),
(dict, [dict_validator]),
(list, [list_validator]),
(tuple, [tuple_validator]),
(set, [set_validator]),
(frozenset, [frozenset_validator]),
(deque, [deque_validator]),
(UUID, [uuid_validator]),
(Decimal, [decimal_validator]),
(IPv4Interface, [ip_v4_interface_validator]),
(IPv6Interface, [ip_v6_interface_validator]),
(IPv4Address, [ip_v4_address_validator]),
(IPv6Address, [ip_v6_address_validator]),
(IPv4Network, [ip_v4_network_validator]),
(IPv6Network, [ip_v6_network_validator]),
]
def find_validators( # noqa: C901 (ignore complexity)
type_: Type[Any], config: Type['BaseConfig']
) -> Generator[AnyCallable, None, None]:
from .dataclasses import is_builtin_dataclass, make_dataclass_validator
if type_ is Any or type_ is object:
return
type_type = type_.__class__
if type_type == ForwardRef or type_type == TypeVar:
return
if is_none_type(type_):
yield none_validator
return
if type_ is Pattern:
yield pattern_validator
return
if type_ is Hashable or type_ is CollectionsHashable:
yield hashable_validator
return
if is_callable_type(type_):
yield callable_validator
return
if is_literal_type(type_):
yield make_literal_validator(type_)
return
if is_builtin_dataclass(type_):
yield from make_dataclass_validator(type_, config)
return
if type_ is Enum:
yield enum_validator
return
if type_ is IntEnum:
yield int_enum_validator
return
if is_namedtuple(type_):
yield tuple_validator
yield make_namedtuple_validator(type_)
return
if is_typeddict(type_):
yield make_typeddict_validator(type_, config)
return
class_ = get_class(type_)
if class_ is not None:
if isinstance(class_, type):
yield make_class_validator(class_)
else:
yield any_class_validator
return
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
for v in validators:
if isinstance(v, IfConfig):
if v.check(config):
yield v.validator
else:
yield v
return
except TypeError:
raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})')
if config.arbitrary_types_allowed:
yield make_arbitrary_type_validator(type_)
else:
raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config')
| 20,030 | Python | 26.667127 | 115 | 0.635397 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/mypy.py | from configparser import ConfigParser
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type as TypingType, Union
from mypy.errorcodes import ErrorCode
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR2,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Context,
Decorator,
EllipsisExpr,
FuncBase,
FuncDef,
JsonDict,
MemberExpr,
NameExpr,
PassStmt,
PlaceholderNode,
RefExpr,
StrExpr,
SymbolNode,
SymbolTableNode,
TempNode,
TypeInfo,
TypeVarExpr,
Var,
)
from mypy.options import Options
from mypy.plugin import CheckerPluginInterface, ClassDefContext, MethodContext, Plugin, SemanticAnalyzerPluginInterface
from mypy.plugins import dataclasses
from mypy.semanal import set_callable_name # type: ignore
from mypy.server.trigger import make_wildcard_trigger
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
Type,
TypeOfAny,
TypeType,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
from mypy.util import get_unique_redefinition_name
from mypy.version import __version__ as mypy_version
from pydantic.utils import is_valid_field
try:
from mypy.types import TypeVarDef # type: ignore[attr-defined]
except ImportError: # pragma: no cover
# Backward-compatible with TypeVarDef from Mypy 0.910.
from mypy.types import TypeVarType as TypeVarDef
CONFIGFILE_KEY = 'pydantic-mypy'
METADATA_KEY = 'pydantic-mypy-metadata'
BASEMODEL_FULLNAME = 'pydantic.main.BaseModel'
BASESETTINGS_FULLNAME = 'pydantic.env_settings.BaseSettings'
FIELD_FULLNAME = 'pydantic.fields.Field'
DATACLASS_FULLNAME = 'pydantic.dataclasses.dataclass'
BUILTINS_NAME = 'builtins' if float(mypy_version) >= 0.930 else '__builtins__'
def plugin(version: str) -> 'TypingType[Plugin]':
"""
`version` is the mypy version string
We might want to use this to print a warning if the mypy version being used is
newer, or especially older, than we expect (or need).
"""
return PydanticPlugin
class PydanticPlugin(Plugin):
def __init__(self, options: Options) -> None:
self.plugin_config = PydanticPluginConfig(options)
super().__init__(options)
def get_base_class_hook(self, fullname: str) -> 'Optional[Callable[[ClassDefContext], None]]':
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo): # pragma: no branch
# No branching may occur if the mypy cache has not been cleared
if any(get_fullname(base) == BASEMODEL_FULLNAME for base in sym.node.mro):
return self._pydantic_model_class_maker_callback
return None
def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]:
if fullname.endswith('.from_orm'):
return from_orm_callback
return None
def get_class_decorator_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]:
if fullname == DATACLASS_FULLNAME:
return dataclasses.dataclass_class_maker_callback
return None
def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None:
transformer = PydanticModelTransformer(ctx, self.plugin_config)
transformer.transform()
class PydanticPluginConfig:
__slots__ = ('init_forbid_extra', 'init_typed', 'warn_required_dynamic_aliases', 'warn_untyped_fields')
init_forbid_extra: bool
init_typed: bool
warn_required_dynamic_aliases: bool
warn_untyped_fields: bool
def __init__(self, options: Options) -> None:
if options.config_file is None: # pragma: no cover
return
toml_config = parse_toml(options.config_file)
if toml_config is not None:
config = toml_config.get('tool', {}).get('pydantic-mypy', {})
for key in self.__slots__:
setting = config.get(key, False)
if not isinstance(setting, bool):
raise ValueError(f'Configuration value must be a boolean for key: {key}')
setattr(self, key, setting)
else:
plugin_config = ConfigParser()
plugin_config.read(options.config_file)
for key in self.__slots__:
setting = plugin_config.getboolean(CONFIGFILE_KEY, key, fallback=False)
setattr(self, key, setting)
def from_orm_callback(ctx: MethodContext) -> Type:
"""
Raise an error if orm_mode is not enabled
"""
model_type: Instance
if isinstance(ctx.type, CallableType) and isinstance(ctx.type.ret_type, Instance):
model_type = ctx.type.ret_type # called on the class
elif isinstance(ctx.type, Instance):
model_type = ctx.type # called on an instance (unusual, but still valid)
else: # pragma: no cover
detail = f'ctx.type: {ctx.type} (of type {ctx.type.__class__.__name__})'
error_unexpected_behavior(detail, ctx.api, ctx.context)
return ctx.default_return_type
pydantic_metadata = model_type.type.metadata.get(METADATA_KEY)
if pydantic_metadata is None:
return ctx.default_return_type
orm_mode = pydantic_metadata.get('config', {}).get('orm_mode')
if orm_mode is not True:
error_from_orm(get_name(model_type.type), ctx.api, ctx.context)
return ctx.default_return_type
class PydanticModelTransformer:
tracked_config_fields: Set[str] = {
'extra',
'allow_mutation',
'frozen',
'orm_mode',
'allow_population_by_field_name',
'alias_generator',
}
def __init__(self, ctx: ClassDefContext, plugin_config: PydanticPluginConfig) -> None:
self._ctx = ctx
self.plugin_config = plugin_config
def transform(self) -> None:
"""
Configures the BaseModel subclass according to the plugin settings.
In particular:
* determines the model config and fields,
* adds a fields-aware signature for the initializer and construct methods
* freezes the class if allow_mutation = False or frozen = True
* stores the fields, config, and if the class is settings in the mypy metadata for access by subclasses
"""
ctx = self._ctx
info = self._ctx.cls.info
config = self.collect_config()
fields = self.collect_fields(config)
for field in fields:
if info[field.name].type is None:
if not ctx.api.final_iteration:
ctx.api.defer()
is_settings = any(get_fullname(base) == BASESETTINGS_FULLNAME for base in info.mro[:-1])
self.add_initializer(fields, config, is_settings)
self.add_construct_method(fields)
self.set_frozen(fields, frozen=config.allow_mutation is False or config.frozen is True)
info.metadata[METADATA_KEY] = {
'fields': {field.name: field.serialize() for field in fields},
'config': config.set_values_dict(),
}
def collect_config(self) -> 'ModelConfigData':
"""
Collects the values of the config attributes that are used by the plugin, accounting for parent classes.
"""
ctx = self._ctx
cls = ctx.cls
config = ModelConfigData()
for stmt in cls.defs.body:
if not isinstance(stmt, ClassDef):
continue
if stmt.name == 'Config':
for substmt in stmt.defs.body:
if not isinstance(substmt, AssignmentStmt):
continue
config.update(self.get_config_update(substmt))
if (
config.has_alias_generator
and not config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
for info in cls.info.mro[1:]: # 0 is the current class
if METADATA_KEY not in info.metadata:
continue
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, value in info.metadata[METADATA_KEY]['config'].items():
config.setdefault(name, value)
return config
def collect_fields(self, model_config: 'ModelConfigData') -> List['PydanticModelField']:
"""
Collects the fields for the model, accounting for parent classes
"""
# First, collect fields belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
fields = [] # type: List[PydanticModelField]
known_fields = set() # type: Set[str]
for stmt in cls.defs.body:
if not isinstance(stmt, AssignmentStmt): # `and stmt.new_syntax` to require annotation
continue
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr) or not is_valid_field(lhs.name):
continue
if not stmt.new_syntax and self.plugin_config.warn_untyped_fields:
error_untyped_fields(ctx.api, stmt)
# if lhs.name == '__config__': # BaseConfig not well handled; I'm not sure why yet
# continue
sym = cls.info.names.get(lhs.name)
if sym is None: # pragma: no cover
# This is likely due to a star import (see the dataclasses plugin for a more detailed explanation)
# This is the same logic used in the dataclasses plugin
continue
node = sym.node
if isinstance(node, PlaceholderNode): # pragma: no cover
# See the PlaceholderNode docstring for more detail about how this can occur
# Basically, it is an edge case when dealing with complex import logic
# This is the same logic used in the dataclasses plugin
continue
if not isinstance(node, Var): # pragma: no cover
# Don't know if this edge case still happens with the `is_valid_field` check above
# but better safe than sorry
continue
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
is_required = self.get_is_required(cls, stmt, lhs)
alias, has_dynamic_alias = self.get_alias_info(stmt)
if (
has_dynamic_alias
and not model_config.allow_population_by_field_name
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(ctx.api, stmt)
fields.append(
PydanticModelField(
name=lhs.name,
is_required=is_required,
alias=alias,
has_dynamic_alias=has_dynamic_alias,
line=stmt.line,
column=stmt.column,
)
)
known_fields.add(lhs.name)
all_fields = fields.copy()
for info in cls.info.mro[1:]: # 0 is the current class, -2 is BaseModel, -1 is object
if METADATA_KEY not in info.metadata:
continue
superclass_fields = []
# Each class depends on the set of fields in its ancestors
ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info)))
for name, data in info.metadata[METADATA_KEY]['fields'].items():
if name not in known_fields:
field = PydanticModelField.deserialize(info, data)
known_fields.add(name)
superclass_fields.append(field)
else:
(field,) = [a for a in all_fields if a.name == name]
all_fields.remove(field)
superclass_fields.append(field)
all_fields = superclass_fields + all_fields
return all_fields
def add_initializer(self, fields: List['PydanticModelField'], config: 'ModelConfigData', is_settings: bool) -> None:
"""
Adds a fields-aware `__init__` method to the class.
The added `__init__` will be annotated with types vs. all `Any` depending on the plugin settings.
"""
ctx = self._ctx
typed = self.plugin_config.init_typed
use_alias = config.allow_population_by_field_name is not True
force_all_optional = is_settings or bool(
config.has_alias_generator and not config.allow_population_by_field_name
)
init_arguments = self.get_field_arguments(
fields, typed=typed, force_all_optional=force_all_optional, use_alias=use_alias
)
if not self.should_init_forbid_extra(fields, config):
var = Var('kwargs')
init_arguments.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2))
add_method(ctx, '__init__', init_arguments, NoneType())
def add_construct_method(self, fields: List['PydanticModelField']) -> None:
"""
Adds a fully typed `construct` classmethod to the class.
Similar to the fields-aware __init__ method, but always uses the field names (not aliases),
and does not treat settings fields as optional.
"""
ctx = self._ctx
set_str = ctx.api.named_type(f'{BUILTINS_NAME}.set', [ctx.api.named_type(f'{BUILTINS_NAME}.str')])
optional_set_str = UnionType([set_str, NoneType()])
fields_set_argument = Argument(Var('_fields_set', optional_set_str), optional_set_str, None, ARG_OPT)
construct_arguments = self.get_field_arguments(fields, typed=True, force_all_optional=False, use_alias=False)
construct_arguments = [fields_set_argument] + construct_arguments
obj_type = ctx.api.named_type(f'{BUILTINS_NAME}.object')
self_tvar_name = '_PydanticBaseModel' # Make sure it does not conflict with other names in the class
tvar_fullname = ctx.cls.fullname + '.' + self_tvar_name
tvd = TypeVarDef(self_tvar_name, tvar_fullname, -1, [], obj_type)
self_tvar_expr = TypeVarExpr(self_tvar_name, tvar_fullname, [], obj_type)
ctx.cls.info.names[self_tvar_name] = SymbolTableNode(MDEF, self_tvar_expr)
# Backward-compatible with TypeVarDef from Mypy 0.910.
if isinstance(tvd, TypeVarType):
self_type = tvd
else:
self_type = TypeVarType(tvd) # type: ignore[call-arg]
add_method(
ctx,
'construct',
construct_arguments,
return_type=self_type,
self_type=self_type,
tvar_def=tvd,
is_classmethod=True,
)
def set_frozen(self, fields: List['PydanticModelField'], frozen: bool) -> None:
"""
Marks all fields as properties so that attempts to set them trigger mypy errors.
This is the same approach used by the attrs and dataclasses plugins.
"""
info = self._ctx.cls.info
for field in fields:
sym_node = info.names.get(field.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = frozen
else:
var = field.to_var(info, use_alias=False)
var.info = info
var.is_property = frozen
var._fullname = get_fullname(info) + '.' + get_name(var)
info.names[get_name(var)] = SymbolTableNode(MDEF, var)
def get_config_update(self, substmt: AssignmentStmt) -> Optional['ModelConfigData']:
"""
Determines the config update due to a single statement in the Config class definition.
Warns if a tracked config attribute is set to a value the plugin doesn't know how to interpret (e.g., an int)
"""
lhs = substmt.lvalues[0]
if not (isinstance(lhs, NameExpr) and lhs.name in self.tracked_config_fields):
return None
if lhs.name == 'extra':
if isinstance(substmt.rvalue, StrExpr):
forbid_extra = substmt.rvalue.value == 'forbid'
elif isinstance(substmt.rvalue, MemberExpr):
forbid_extra = substmt.rvalue.name == 'forbid'
else:
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
return ModelConfigData(forbid_extra=forbid_extra)
if lhs.name == 'alias_generator':
has_alias_generator = True
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname == 'builtins.None':
has_alias_generator = False
return ModelConfigData(has_alias_generator=has_alias_generator)
if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname in ('builtins.True', 'builtins.False'):
return ModelConfigData(**{lhs.name: substmt.rvalue.fullname == 'builtins.True'})
error_invalid_config_value(lhs.name, self._ctx.api, substmt)
return None
@staticmethod
def get_is_required(cls: ClassDef, stmt: AssignmentStmt, lhs: NameExpr) -> bool:
"""
Returns a boolean indicating whether the field defined in `stmt` is a required field.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only, so only non-required if Optional
value_type = get_proper_type(cls.info[lhs.name].type)
if isinstance(value_type, UnionType) and any(isinstance(item, NoneType) for item in value_type.items):
# Annotated as Optional, or otherwise having NoneType in the union
return False
return True
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME:
# The "default value" is a call to `Field`; at this point, the field is
# only required if default is Ellipsis (i.e., `field_name: Annotation = Field(...)`)
return len(expr.args) > 0 and expr.args[0].__class__ is EllipsisExpr
# Only required if the "default value" is Ellipsis (i.e., `field_name: Annotation = ...`)
return isinstance(expr, EllipsisExpr)
@staticmethod
def get_alias_info(stmt: AssignmentStmt) -> Tuple[Optional[str], bool]:
"""
Returns a pair (alias, has_dynamic_alias), extracted from the declaration of the field defined in `stmt`.
`has_dynamic_alias` is True if and only if an alias is provided, but not as a string literal.
If `has_dynamic_alias` is True, `alias` will be None.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only
return None, False
if not (
isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME
):
# Assigned value is not a call to pydantic.fields.Field
return None, False
for i, arg_name in enumerate(expr.arg_names):
if arg_name != 'alias':
continue
arg = expr.args[i]
if isinstance(arg, StrExpr):
return arg.value, False
else:
return None, True
return None, False
def get_field_arguments(
self, fields: List['PydanticModelField'], typed: bool, force_all_optional: bool, use_alias: bool
) -> List[Argument]:
"""
Helper function used during the construction of the `__init__` and `construct` method signatures.
Returns a list of mypy Argument instances for use in the generated signatures.
"""
info = self._ctx.cls.info
arguments = [
field.to_argument(info, typed=typed, force_optional=force_all_optional, use_alias=use_alias)
for field in fields
if not (use_alias and field.has_dynamic_alias)
]
return arguments
def should_init_forbid_extra(self, fields: List['PydanticModelField'], config: 'ModelConfigData') -> bool:
"""
Indicates whether the generated `__init__` should get a `**kwargs` at the end of its signature
We disallow arbitrary kwargs if the extra config setting is "forbid", or if the plugin config says to,
*unless* a required dynamic alias is present (since then we can't determine a valid signature).
"""
if not config.allow_population_by_field_name:
if self.is_dynamic_alias_present(fields, bool(config.has_alias_generator)):
return False
if config.forbid_extra:
return True
return self.plugin_config.init_forbid_extra
@staticmethod
def is_dynamic_alias_present(fields: List['PydanticModelField'], has_alias_generator: bool) -> bool:
"""
Returns whether any fields on the model have a "dynamic alias", i.e., an alias that cannot be
determined during static analysis.
"""
for field in fields:
if field.has_dynamic_alias:
return True
if has_alias_generator:
for field in fields:
if field.alias is None:
return True
return False
class PydanticModelField:
def __init__(
self, name: str, is_required: bool, alias: Optional[str], has_dynamic_alias: bool, line: int, column: int
):
self.name = name
self.is_required = is_required
self.alias = alias
self.has_dynamic_alias = has_dynamic_alias
self.line = line
self.column = column
def to_var(self, info: TypeInfo, use_alias: bool) -> Var:
name = self.name
if use_alias and self.alias is not None:
name = self.alias
return Var(name, info[self.name].type)
def to_argument(self, info: TypeInfo, typed: bool, force_optional: bool, use_alias: bool) -> Argument:
if typed and info[self.name].type is not None:
type_annotation = info[self.name].type
else:
type_annotation = AnyType(TypeOfAny.explicit)
return Argument(
variable=self.to_var(info, use_alias),
type_annotation=type_annotation,
initializer=None,
kind=ARG_NAMED_OPT if force_optional or not self.is_required else ARG_NAMED,
)
def serialize(self) -> JsonDict:
return self.__dict__
@classmethod
def deserialize(cls, info: TypeInfo, data: JsonDict) -> 'PydanticModelField':
return cls(**data)
class ModelConfigData:
def __init__(
self,
forbid_extra: Optional[bool] = None,
allow_mutation: Optional[bool] = None,
frozen: Optional[bool] = None,
orm_mode: Optional[bool] = None,
allow_population_by_field_name: Optional[bool] = None,
has_alias_generator: Optional[bool] = None,
):
self.forbid_extra = forbid_extra
self.allow_mutation = allow_mutation
self.frozen = frozen
self.orm_mode = orm_mode
self.allow_population_by_field_name = allow_population_by_field_name
self.has_alias_generator = has_alias_generator
def set_values_dict(self) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if v is not None}
def update(self, config: Optional['ModelConfigData']) -> None:
if config is None:
return
for k, v in config.set_values_dict().items():
setattr(self, k, v)
def setdefault(self, key: str, value: Any) -> None:
if getattr(self, key) is None:
setattr(self, key, value)
ERROR_ORM = ErrorCode('pydantic-orm', 'Invalid from_orm call', 'Pydantic')
ERROR_CONFIG = ErrorCode('pydantic-config', 'Invalid config value', 'Pydantic')
ERROR_ALIAS = ErrorCode('pydantic-alias', 'Dynamic alias disallowed', 'Pydantic')
ERROR_UNEXPECTED = ErrorCode('pydantic-unexpected', 'Unexpected behavior', 'Pydantic')
ERROR_UNTYPED = ErrorCode('pydantic-field', 'Untyped field disallowed', 'Pydantic')
def error_from_orm(model_name: str, api: CheckerPluginInterface, context: Context) -> None:
api.fail(f'"{model_name}" does not have orm_mode=True', context, code=ERROR_ORM)
def error_invalid_config_value(name: str, api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail(f'Invalid value for "Config.{name}"', context, code=ERROR_CONFIG)
def error_required_dynamic_aliases(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Required dynamic aliases disallowed', context, code=ERROR_ALIAS)
def error_unexpected_behavior(detail: str, api: CheckerPluginInterface, context: Context) -> None: # pragma: no cover
# Can't think of a good way to test this, but I confirmed it renders as desired by adding to a non-error path
link = 'https://github.com/samuelcolvin/pydantic/issues/new/choose'
full_message = f'The pydantic mypy plugin ran into unexpected behavior: {detail}\n'
full_message += f'Please consider reporting this bug at {link} so we can try to fix it!'
api.fail(full_message, context, code=ERROR_UNEXPECTED)
def error_untyped_fields(api: SemanticAnalyzerPluginInterface, context: Context) -> None:
api.fail('Untyped fields disallowed', context, code=ERROR_UNTYPED)
def add_method(
ctx: ClassDefContext,
name: str,
args: List[Argument],
return_type: Type,
self_type: Optional[Type] = None,
tvar_def: Optional[TypeVarDef] = None,
is_classmethod: bool = False,
is_new: bool = False,
# is_staticmethod: bool = False,
) -> None:
"""
Adds a new method to a class.
This can be dropped if/when https://github.com/python/mypy/issues/7301 is merged
"""
info = ctx.cls.info
# First remove any previously generated methods with the same name
# to avoid clashes and problems in the semantic analyzer.
if name in info.names:
sym = info.names[name]
if sym.plugin_generated and isinstance(sym.node, FuncDef):
ctx.cls.defs.body.remove(sym.node)
self_type = self_type or fill_typevars(info)
if is_classmethod or is_new:
first = [Argument(Var('_cls'), TypeType.make_normalized(self_type), None, ARG_POS)]
# elif is_staticmethod:
# first = []
else:
self_type = self_type or fill_typevars(info)
first = [Argument(Var('__pydantic_self__'), self_type, None, ARG_POS)]
args = first + args
arg_types, arg_names, arg_kinds = [], [], []
for arg in args:
assert arg.type_annotation, 'All arguments must be fully typed.'
arg_types.append(arg.type_annotation)
arg_names.append(get_name(arg.variable))
arg_kinds.append(arg.kind)
function_type = ctx.api.named_type(f'{BUILTINS_NAME}.function')
signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type)
if tvar_def:
signature.variables = [tvar_def]
func = FuncDef(name, args, Block([PassStmt()]))
func.info = info
func.type = set_callable_name(signature, func)
func.is_class = is_classmethod
# func.is_static = is_staticmethod
func._fullname = get_fullname(info) + '.' + name
func.line = info.line
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
if is_classmethod: # or is_staticmethod:
func.is_decorated = True
v = Var(name, func.type)
v.info = info
v._fullname = func._fullname
# if is_classmethod:
v.is_classmethod = True
dec = Decorator(func, [NameExpr('classmethod')], v)
# else:
# v.is_staticmethod = True
# dec = Decorator(func, [NameExpr('staticmethod')], v)
dec.line = info.line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[name] = sym
info.defn.defs.body.append(func)
def get_fullname(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.fullname
if callable(fn): # pragma: no cover
return fn()
return fn
def get_name(x: Union[FuncBase, SymbolNode]) -> str:
"""
Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped.
"""
fn = x.name
if callable(fn): # pragma: no cover
return fn()
return fn
def parse_toml(config_file: str) -> Optional[Dict[str, Any]]:
if not config_file.endswith('.toml'):
return None
read_mode = 'rb'
try:
import tomli as toml_
except ImportError:
# older versions of mypy have toml as a dependency, not tomli
read_mode = 'r'
try:
import toml as toml_ # type: ignore[no-redef]
except ImportError: # pragma: no cover
import warnings
warnings.warn('No TOML parser installed, cannot read configuration from `pyproject.toml`.')
return None
with open(config_file, read_mode) as rf:
return toml_.load(rf) # type: ignore[arg-type]
| 29,726 | Python | 39.171622 | 120 | 0.619559 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/dataclasses.py | from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Optional, Type, TypeVar, Union, overload
from .class_validators import gather_all_validators
from .error_wrappers import ValidationError
from .errors import DataclassTypeError
from .fields import Field, FieldInfo, Required, Undefined
from .main import __dataclass_transform__, create_model, validate_model
from .typing import resolve_annotations
from .utils import ClassAttribute
if TYPE_CHECKING:
from .config import BaseConfig
from .main import BaseModel
from .typing import CallableGenerator, NoArgAnyCallable
DataclassT = TypeVar('DataclassT', bound='Dataclass')
class Dataclass:
__pydantic_model__: ClassVar[Type[BaseModel]]
__initialised__: ClassVar[bool]
__post_init_original__: ClassVar[Optional[Callable[..., None]]]
__processed__: ClassVar[Optional[ClassAttribute]]
__has_field_info_default__: ClassVar[bool] # whether or not a `pydantic.Field` is used as default value
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
@classmethod
def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator':
pass
@classmethod
def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
pass
def __call__(self: 'DataclassT', *args: Any, **kwargs: Any) -> 'DataclassT':
pass
def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
if isinstance(v, cls):
return v
elif isinstance(v, (list, tuple)):
return cls(*v)
elif isinstance(v, dict):
return cls(**v)
# In nested dataclasses, v can be of type `dataclasses.dataclass`.
# But to validate fields `cls` will be in fact a `pydantic.dataclasses.dataclass`,
# which inherits directly from the class of `v`.
elif is_builtin_dataclass(v) and cls.__bases__[0] is type(v):
import dataclasses
return cls(**dataclasses.asdict(v))
else:
raise DataclassTypeError(class_name=cls.__name__)
def _get_validators(cls: Type['Dataclass']) -> 'CallableGenerator':
yield cls.__validate__
def setattr_validate_assignment(self: 'Dataclass', name: str, value: Any) -> None:
if self.__initialised__:
d = dict(self.__dict__)
d.pop(name, None)
known_field = self.__pydantic_model__.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
object.__setattr__(self, name, value)
def is_builtin_dataclass(_cls: Type[Any]) -> bool:
"""
`dataclasses.is_dataclass` is True if one of the class parents is a `dataclass`.
This is why we also add a class attribute `__processed__` to only consider 'direct' built-in dataclasses
"""
import dataclasses
return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)
def _generate_pydantic_post_init(
post_init_original: Optional[Callable[..., None]], post_init_post_parse: Optional[Callable[..., None]]
) -> Callable[..., None]:
def _pydantic_post_init(self: 'Dataclass', *initvars: Any) -> None:
if post_init_original is not None:
post_init_original(self, *initvars)
if getattr(self, '__has_field_info_default__', False):
# We need to remove `FieldInfo` values since they are not valid as input
# It's ok to do that because they are obviously the default values!
input_data = {k: v for k, v in self.__dict__.items() if not isinstance(v, FieldInfo)}
else:
input_data = self.__dict__
d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__)
if validation_error:
raise validation_error
object.__setattr__(self, '__dict__', {**getattr(self, '__dict__', {}), **d})
object.__setattr__(self, '__initialised__', True)
if post_init_post_parse is not None:
post_init_post_parse(self, *initvars)
return _pydantic_post_init
def _process_class(
_cls: Type[Any],
init: bool,
repr: bool,
eq: bool,
order: bool,
unsafe_hash: bool,
frozen: bool,
config: Optional[Type[Any]],
) -> Type['Dataclass']:
import dataclasses
post_init_original = getattr(_cls, '__post_init__', None)
if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
post_init_original = None
if not post_init_original:
post_init_original = getattr(_cls, '__post_init_original__', None)
post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
_pydantic_post_init = _generate_pydantic_post_init(post_init_original, post_init_post_parse)
# If the class is already a dataclass, __post_init__ will not be called automatically
# so no validation will be added.
# We hence create dynamically a new dataclass:
# ```
# @dataclasses.dataclass
# class NewClass(_cls):
# __post_init__ = _pydantic_post_init
# ```
# with the exact same fields as the base dataclass
# and register it on module level to address pickle problem:
# https://github.com/samuelcolvin/pydantic/issues/2111
if is_builtin_dataclass(_cls):
uniq_class_name = f'_Pydantic_{_cls.__name__}_{id(_cls)}'
_cls = type(
# for pretty output new class will have the name as original
_cls.__name__,
(_cls,),
{
'__annotations__': resolve_annotations(_cls.__annotations__, _cls.__module__),
'__post_init__': _pydantic_post_init,
# attrs for pickle to find this class
'__module__': __name__,
'__qualname__': uniq_class_name,
},
)
globals()[uniq_class_name] = _cls
else:
_cls.__post_init__ = _pydantic_post_init
cls: Type['Dataclass'] = dataclasses.dataclass( # type: ignore
_cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
cls.__processed__ = ClassAttribute('__processed__', True)
field_definitions: Dict[str, Any] = {}
for field in dataclasses.fields(cls):
default: Any = Undefined
default_factory: Optional['NoArgAnyCallable'] = None
field_info: FieldInfo
if field.default is not dataclasses.MISSING:
default = field.default
elif field.default_factory is not dataclasses.MISSING:
default_factory = field.default_factory
else:
default = Required
if isinstance(default, FieldInfo):
field_info = default
cls.__has_field_info_default__ = True
else:
field_info = Field(default=default, default_factory=default_factory, **field.metadata)
field_definitions[field.name] = (field.type, field_info)
validators = gather_all_validators(cls)
cls.__pydantic_model__ = create_model(
cls.__name__,
__config__=config,
__module__=_cls.__module__,
__validators__=validators,
__cls_kwargs__={'__resolve_forward_refs__': False},
**field_definitions,
)
cls.__initialised__ = False
cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment]
cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment]
if post_init_original:
cls.__post_init_original__ = post_init_original
if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment]
cls.__pydantic_model__.__try_update_forward_refs__(**{cls.__name__: cls})
return cls
@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo))
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Callable[[Type[Any]], Type['Dataclass']]:
...
@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo))
@overload
def dataclass(
_cls: Type[Any],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Type['Dataclass']:
...
@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo))
def dataclass(
_cls: Optional[Type[Any]] = None,
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Union[Callable[[Type[Any]], Type['Dataclass']], Type['Dataclass']]:
"""
Like the python standard lib dataclasses but with type validation.
Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
as Config.validate_assignment.
"""
def wrap(cls: Type[Any]) -> Type['Dataclass']:
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)
if _cls is None:
return wrap
return wrap(_cls)
def make_dataclass_validator(_cls: Type[Any], config: Type['BaseConfig']) -> 'CallableGenerator':
"""
Create a pydantic.dataclass from a builtin dataclass to add type validation
and yield the validators
It retrieves the parameters of the dataclass and forwards them to the newly created dataclass
"""
dataclass_params = _cls.__dataclass_params__
stdlib_dataclass_parameters = {param: getattr(dataclass_params, param) for param in dataclass_params.__slots__}
cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters)
yield from _get_validators(cls)
| 10,007 | Python | 35 | 115 | 0.624663 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/tools.py | import json
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union
from .parse import Protocol, load_file, load_str_bytes
from .types import StrBytes
from .typing import display_as_type
__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')
NameFactory = Union[str, Callable[[Type[Any]], str]]
if TYPE_CHECKING:
from .typing import DictStrAny
def _generate_parsing_type_name(type_: Any) -> str:
return f'ParsingModel[{display_as_type(type_)}]'
@lru_cache(maxsize=2048)
def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
from pydantic.main import create_model
if type_name is None:
type_name = _generate_parsing_type_name
if not isinstance(type_name, str):
type_name = type_name(type_)
return create_model(type_name, __root__=(type_, ...))
T = TypeVar('T')
def parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:
model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
return model_type(__root__=obj).__root__
def parse_file_as(
type_: Type[T],
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_file(
path,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)
def parse_raw_as(
type_: Type[T],
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
json_loads: Callable[[str], Any] = json.loads,
type_name: Optional[NameFactory] = None,
) -> T:
obj = load_str_bytes(
b,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=json_loads,
)
return parse_obj_as(type_, obj, type_name=type_name)
def schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':
"""Generate a JSON schema (as dict) for the passed model or dynamically generated one"""
return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)
def schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:
"""Generate a JSON schema (as JSON) for the passed model or dynamically generated one"""
return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)
| 2,834 | Python | 29.483871 | 105 | 0.645025 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/main.py | import warnings
from abc import ABCMeta
from copy import deepcopy
from enum import Enum
from functools import partial
from pathlib import Path
from types import FunctionType
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
no_type_check,
overload,
)
from .class_validators import ValidatorGroup, extract_root_validators, extract_validators, inherit_validators
from .config import BaseConfig, Extra, inherit_config, prepare_config
from .error_wrappers import ErrorWrapper, ValidationError
from .errors import ConfigError, DictError, ExtraError, MissingError
from .fields import MAPPING_LIKE_SHAPES, Field, FieldInfo, ModelField, ModelPrivateAttr, PrivateAttr, Undefined
from .json import custom_pydantic_encoder, pydantic_encoder
from .parse import Protocol, load_file, load_str_bytes
from .schema import default_ref_template, model_schema
from .types import PyObject, StrBytes
from .typing import (
AnyCallable,
get_args,
get_origin,
is_classvar,
is_namedtuple,
is_union,
resolve_annotations,
update_model_forward_refs,
)
from .utils import (
ROOT_KEY,
ClassAttribute,
GetterDict,
Representation,
ValueItems,
generate_model_signature,
is_valid_field,
is_valid_private_name,
lenient_issubclass,
sequence_like,
smart_deepcopy,
unique_list,
validate_field_name,
)
if TYPE_CHECKING:
from inspect import Signature
from .class_validators import ValidatorListDict
from .types import ModelOrDc
from .typing import (
AbstractSetIntStr,
AnyClassMethod,
CallableGenerator,
DictAny,
DictStrAny,
MappingIntStrAny,
ReprArgs,
SetStr,
TupleGenerator,
)
Model = TypeVar('Model', bound='BaseModel')
try:
import cython # type: ignore
except ImportError:
compiled: bool = False
else: # pragma: no cover
try:
compiled = cython.compiled
except AttributeError:
compiled = False
__all__ = 'BaseModel', 'compiled', 'create_model', 'validate_model'
_T = TypeVar('_T')
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[_T], _T]:
return lambda a: a
def validate_custom_root_type(fields: Dict[str, ModelField]) -> None:
if len(fields) > 1:
raise ValueError(f'{ROOT_KEY} cannot be mixed with other fields')
def generate_hash_function(frozen: bool) -> Optional[Callable[[Any], int]]:
def hash_function(self_: Any) -> int:
return hash(self_.__class__) + hash(tuple(self_.__dict__.values()))
return hash_function if frozen else None
# If a field is of type `Callable`, its default value should be a function and cannot to ignored.
ANNOTATED_FIELD_UNTOUCHED_TYPES: Tuple[Any, ...] = (property, type, classmethod, staticmethod)
# When creating a `BaseModel` instance, we bypass all the methods, properties... added to the model
UNTOUCHED_TYPES: Tuple[Any, ...] = (FunctionType,) + ANNOTATED_FIELD_UNTOUCHED_TYPES
# Note `ModelMetaclass` refers to `BaseModel`, but is also used to *create* `BaseModel`, so we need to add this extra
# (somewhat hacky) boolean to keep track of whether we've created the `BaseModel` class yet, and therefore whether it's
# safe to refer to it. If it *hasn't* been created, we assume that the `__new__` call we're in the middle of is for
# the `BaseModel` class, since that's defined immediately after the metaclass.
_is_base_model_class_defined = False
@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo))
class ModelMetaclass(ABCMeta):
@no_type_check # noqa C901
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901
fields: Dict[str, ModelField] = {}
config = BaseConfig
validators: 'ValidatorListDict' = {}
pre_root_validators, post_root_validators = [], []
private_attributes: Dict[str, ModelPrivateAttr] = {}
base_private_attributes: Dict[str, ModelPrivateAttr] = {}
slots: SetStr = namespace.get('__slots__', ())
slots = {slots} if isinstance(slots, str) else set(slots)
class_vars: SetStr = set()
hash_func: Optional[Callable[[Any], int]] = None
for base in reversed(bases):
if _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel:
fields.update(smart_deepcopy(base.__fields__))
config = inherit_config(base.__config__, config)
validators = inherit_validators(base.__validators__, validators)
pre_root_validators += base.__pre_root_validators__
post_root_validators += base.__post_root_validators__
base_private_attributes.update(base.__private_attributes__)
class_vars.update(base.__class_vars__)
hash_func = base.__hash__
resolve_forward_refs = kwargs.pop('__resolve_forward_refs__', True)
allowed_config_kwargs: SetStr = {
key
for key in dir(config)
if not (key.startswith('__') and key.endswith('__')) # skip dunder methods and attributes
}
config_kwargs = {key: kwargs.pop(key) for key in kwargs.keys() & allowed_config_kwargs}
config_from_namespace = namespace.get('Config')
if config_kwargs and config_from_namespace:
raise TypeError('Specifying config in two places is ambiguous, use either Config attribute or class kwargs')
config = inherit_config(config_from_namespace, config, **config_kwargs)
validators = inherit_validators(extract_validators(namespace), validators)
vg = ValidatorGroup(validators)
for f in fields.values():
f.set_config(config)
extra_validators = vg.get_validators(f.name)
if extra_validators:
f.class_validators.update(extra_validators)
# re-run prepare to add extra validators
f.populate_validators()
prepare_config(config, name)
untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES
def is_untouched(v: Any) -> bool:
return isinstance(v, untouched_types) or v.__class__.__name__ == 'cython_function_or_method'
if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):
annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))
# annotation only fields need to come first in fields
for ann_name, ann_type in annotations.items():
if is_classvar(ann_type):
class_vars.add(ann_name)
elif is_valid_field(ann_name):
validate_field_name(bases, ann_name)
value = namespace.get(ann_name, Undefined)
allowed_types = get_args(ann_type) if is_union(get_origin(ann_type)) else (ann_type,)
if (
is_untouched(value)
and ann_type != PyObject
and not any(
lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types
)
):
continue
fields[ann_name] = ModelField.infer(
name=ann_name,
value=value,
annotation=ann_type,
class_validators=vg.get_validators(ann_name),
config=config,
)
elif ann_name not in namespace and config.underscore_attrs_are_private:
private_attributes[ann_name] = PrivateAttr()
untouched_types = UNTOUCHED_TYPES + config.keep_untouched
for var_name, value in namespace.items():
can_be_changed = var_name not in class_vars and not is_untouched(value)
if isinstance(value, ModelPrivateAttr):
if not is_valid_private_name(var_name):
raise NameError(
f'Private attributes "{var_name}" must not be a valid field name; '
f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"'
)
private_attributes[var_name] = value
elif config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed:
private_attributes[var_name] = PrivateAttr(default=value)
elif is_valid_field(var_name) and var_name not in annotations and can_be_changed:
validate_field_name(bases, var_name)
inferred = ModelField.infer(
name=var_name,
value=value,
annotation=annotations.get(var_name, Undefined),
class_validators=vg.get_validators(var_name),
config=config,
)
if var_name in fields:
if lenient_issubclass(inferred.type_, fields[var_name].type_):
inferred.type_ = fields[var_name].type_
else:
raise TypeError(
f'The type of {name}.{var_name} differs from the new default value; '
f'if you wish to change the type of this field, please use a type annotation'
)
fields[var_name] = inferred
_custom_root_type = ROOT_KEY in fields
if _custom_root_type:
validate_custom_root_type(fields)
vg.check_for_unused()
if config.json_encoders:
json_encoder = partial(custom_pydantic_encoder, config.json_encoders)
else:
json_encoder = pydantic_encoder
pre_rv_new, post_rv_new = extract_root_validators(namespace)
if hash_func is None:
hash_func = generate_hash_function(config.frozen)
exclude_from_namespace = fields | private_attributes.keys() | {'__slots__'}
new_namespace = {
'__config__': config,
'__fields__': fields,
'__exclude_fields__': {
name: field.field_info.exclude for name, field in fields.items() if field.field_info.exclude is not None
}
or None,
'__include_fields__': {
name: field.field_info.include for name, field in fields.items() if field.field_info.include is not None
}
or None,
'__validators__': vg.validators,
'__pre_root_validators__': unique_list(
pre_root_validators + pre_rv_new,
name_factory=lambda v: v.__name__,
),
'__post_root_validators__': unique_list(
post_root_validators + post_rv_new,
name_factory=lambda skip_on_failure_and_v: skip_on_failure_and_v[1].__name__,
),
'__schema_cache__': {},
'__json_encoder__': staticmethod(json_encoder),
'__custom_root_type__': _custom_root_type,
'__private_attributes__': {**base_private_attributes, **private_attributes},
'__slots__': slots | private_attributes.keys(),
'__hash__': hash_func,
'__class_vars__': class_vars,
**{n: v for n, v in namespace.items() if n not in exclude_from_namespace},
}
cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)
# set __signature__ attr only for model class, but not for its instances
cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config))
if resolve_forward_refs:
cls.__try_update_forward_refs__()
return cls
def __instancecheck__(self, instance: Any) -> bool:
"""
Avoid calling ABC _abc_subclasscheck unless we're pretty sure.
See #3829 and python/cpython#92810
"""
return hasattr(instance, '__fields__') and super().__instancecheck__(instance)
object_setattr = object.__setattr__
class BaseModel(Representation, metaclass=ModelMetaclass):
if TYPE_CHECKING:
# populated by the metaclass, defined here to help IDEs only
__fields__: ClassVar[Dict[str, ModelField]] = {}
__include_fields__: ClassVar[Optional[Mapping[str, Any]]] = None
__exclude_fields__: ClassVar[Optional[Mapping[str, Any]]] = None
__validators__: ClassVar[Dict[str, AnyCallable]] = {}
__pre_root_validators__: ClassVar[List[AnyCallable]]
__post_root_validators__: ClassVar[List[Tuple[bool, AnyCallable]]]
__config__: ClassVar[Type[BaseConfig]] = BaseConfig
__json_encoder__: ClassVar[Callable[[Any], Any]] = lambda x: x
__schema_cache__: ClassVar['DictAny'] = {}
__custom_root_type__: ClassVar[bool] = False
__signature__: ClassVar['Signature']
__private_attributes__: ClassVar[Dict[str, ModelPrivateAttr]]
__class_vars__: ClassVar[SetStr]
__fields_set__: ClassVar[SetStr] = set()
Config = BaseConfig
__slots__ = ('__dict__', '__fields_set__')
__doc__ = '' # Null out the Representation docstring
def __init__(__pydantic_self__, **data: Any) -> None:
"""
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be parsed to form a valid model.
"""
# Uses something other than `self` the first arg to allow "self" as a settable attribute
values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
if validation_error:
raise validation_error
try:
object_setattr(__pydantic_self__, '__dict__', values)
except TypeError as e:
raise TypeError(
'Model values must be a dict; you may not have returned a dictionary from a root validator'
) from e
object_setattr(__pydantic_self__, '__fields_set__', fields_set)
__pydantic_self__._init_private_attributes()
@no_type_check
def __setattr__(self, name, value): # noqa: C901 (ignore complexity)
if name in self.__private_attributes__:
return object_setattr(self, name, value)
if self.__config__.extra is not Extra.allow and name not in self.__fields__:
raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"')
elif not self.__config__.allow_mutation or self.__config__.frozen:
raise TypeError(f'"{self.__class__.__name__}" is immutable and does not support item assignment')
elif self.__config__.validate_assignment:
new_values = {**self.__dict__, name: value}
for validator in self.__pre_root_validators__:
try:
new_values = validator(self.__class__, new_values)
except (ValueError, TypeError, AssertionError) as exc:
raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], self.__class__)
known_field = self.__fields__.get(name, None)
if known_field:
# We want to
# - make sure validators are called without the current value for this field inside `values`
# - keep other values (e.g. submodels) untouched (using `BaseModel.dict()` will change them into dicts)
# - keep the order of the fields
if not known_field.field_info.allow_mutation:
raise TypeError(f'"{known_field.name}" has allow_mutation set to False and cannot be assigned')
dict_without_original_value = {k: v for k, v in self.__dict__.items() if k != name}
value, error_ = known_field.validate(value, dict_without_original_value, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
else:
new_values[name] = value
errors = []
for skip_on_failure, validator in self.__post_root_validators__:
if skip_on_failure and errors:
continue
try:
new_values = validator(self.__class__, new_values)
except (ValueError, TypeError, AssertionError) as exc:
errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
if errors:
raise ValidationError(errors, self.__class__)
# update the whole __dict__ as other values than just `value`
# may be changed (e.g. with `root_validator`)
object_setattr(self, '__dict__', new_values)
else:
self.__dict__[name] = value
self.__fields_set__.add(name)
def __getstate__(self) -> 'DictAny':
private_attrs = ((k, getattr(self, k, Undefined)) for k in self.__private_attributes__)
return {
'__dict__': self.__dict__,
'__fields_set__': self.__fields_set__,
'__private_attribute_values__': {k: v for k, v in private_attrs if v is not Undefined},
}
def __setstate__(self, state: 'DictAny') -> None:
object_setattr(self, '__dict__', state['__dict__'])
object_setattr(self, '__fields_set__', state['__fields_set__'])
for name, value in state.get('__private_attribute_values__', {}).items():
object_setattr(self, name, value)
def _init_private_attributes(self) -> None:
for name, private_attr in self.__private_attributes__.items():
default = private_attr.get_default()
if default is not Undefined:
object_setattr(self, name, default)
def dict(
self,
*,
include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> 'DictStrAny':
"""
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
"""
if skip_defaults is not None:
warnings.warn(
f'{self.__class__.__name__}.dict(): "skip_defaults" is deprecated and replaced by "exclude_unset"',
DeprecationWarning,
)
exclude_unset = skip_defaults
return dict(
self._iter(
to_dict=True,
by_alias=by_alias,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
)
def json(
self,
*,
include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
encoder: Optional[Callable[[Any], Any]] = None,
models_as_dict: bool = True,
**dumps_kwargs: Any,
) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
"""
if skip_defaults is not None:
warnings.warn(
f'{self.__class__.__name__}.json(): "skip_defaults" is deprecated and replaced by "exclude_unset"',
DeprecationWarning,
)
exclude_unset = skip_defaults
encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)
# We don't directly call `self.dict()`, which does exactly this with `to_dict=True`
# because we want to be able to keep raw `BaseModel` instances and not as `dict`.
# This allows users to write custom JSON encoders for given `BaseModel` classes.
data = dict(
self._iter(
to_dict=models_as_dict,
by_alias=by_alias,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
)
if self.__custom_root_type__:
data = data[ROOT_KEY]
return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)
@classmethod
def _enforce_dict_if_root(cls, obj: Any) -> Any:
if cls.__custom_root_type__ and (
not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY})
or cls.__fields__[ROOT_KEY].shape in MAPPING_LIKE_SHAPES
):
return {ROOT_KEY: obj}
else:
return obj
@classmethod
def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':
obj = cls._enforce_dict_if_root(obj)
if not isinstance(obj, dict):
try:
obj = dict(obj)
except (TypeError, ValueError) as e:
exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}')
raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e
return cls(**obj)
@classmethod
def parse_raw(
cls: Type['Model'],
b: StrBytes,
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
) -> 'Model':
try:
obj = load_str_bytes(
b,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=cls.__config__.json_loads,
)
except (ValueError, TypeError, UnicodeDecodeError) as e:
raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)
return cls.parse_obj(obj)
@classmethod
def parse_file(
cls: Type['Model'],
path: Union[str, Path],
*,
content_type: str = None,
encoding: str = 'utf8',
proto: Protocol = None,
allow_pickle: bool = False,
) -> 'Model':
obj = load_file(
path,
proto=proto,
content_type=content_type,
encoding=encoding,
allow_pickle=allow_pickle,
json_loads=cls.__config__.json_loads,
)
return cls.parse_obj(obj)
@classmethod
def from_orm(cls: Type['Model'], obj: Any) -> 'Model':
if not cls.__config__.orm_mode:
raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')
obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj)
m = cls.__new__(cls)
values, fields_set, validation_error = validate_model(cls, obj)
if validation_error:
raise validation_error
object_setattr(m, '__dict__', values)
object_setattr(m, '__fields_set__', fields_set)
m._init_private_attributes()
return m
@classmethod
def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':
"""
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if `Config.extra = 'allow'` was set since it adds all passed values
"""
m = cls.__new__(cls)
fields_values: Dict[str, Any] = {}
for name, field in cls.__fields__.items():
if name in values:
fields_values[name] = values[name]
elif not field.required:
fields_values[name] = field.get_default()
fields_values.update(values)
object_setattr(m, '__dict__', fields_values)
if _fields_set is None:
_fields_set = set(values.keys())
object_setattr(m, '__fields_set__', _fields_set)
m._init_private_attributes()
return m
def _copy_and_set_values(self: 'Model', values: 'DictStrAny', fields_set: 'SetStr', *, deep: bool) -> 'Model':
if deep:
# chances of having empty dict here are quite low for using smart_deepcopy
values = deepcopy(values)
cls = self.__class__
m = cls.__new__(cls)
object_setattr(m, '__dict__', values)
object_setattr(m, '__fields_set__', fields_set)
for name in self.__private_attributes__:
value = getattr(self, name, Undefined)
if value is not Undefined:
if deep:
value = deepcopy(value)
object_setattr(m, name, value)
return m
def copy(
self: 'Model',
*,
include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
update: 'DictStrAny' = None,
deep: bool = False,
) -> 'Model':
"""
Duplicate a model, optionally choose which fields to include, exclude and change.
:param include: fields to include in new model
:param exclude: fields to exclude from new model, as with values this takes precedence over include
:param update: values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
:param deep: set to `True` to make a deep copy of the model
:return: new model instance
"""
values = dict(
self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False),
**(update or {}),
)
# new `__fields_set__` can have unset optional fields with a set value in `update` kwarg
if update:
fields_set = self.__fields_set__ | update.keys()
else:
fields_set = set(self.__fields_set__)
return self._copy_and_set_values(values, fields_set, deep=deep)
@classmethod
def schema(cls, by_alias: bool = True, ref_template: str = default_ref_template) -> 'DictStrAny':
cached = cls.__schema_cache__.get((by_alias, ref_template))
if cached is not None:
return cached
s = model_schema(cls, by_alias=by_alias, ref_template=ref_template)
cls.__schema_cache__[(by_alias, ref_template)] = s
return s
@classmethod
def schema_json(
cls, *, by_alias: bool = True, ref_template: str = default_ref_template, **dumps_kwargs: Any
) -> str:
from .json import pydantic_encoder
return cls.__config__.json_dumps(
cls.schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls: Type['Model'], value: Any) -> 'Model':
if isinstance(value, cls):
copy_on_model_validation = cls.__config__.copy_on_model_validation
# whether to deep or shallow copy the model on validation, None means do not copy
deep_copy: Optional[bool] = None
if copy_on_model_validation not in {'deep', 'shallow', 'none'}:
# Warn about deprecated behavior
warnings.warn(
"`copy_on_model_validation` should be a string: 'deep', 'shallow' or 'none'", DeprecationWarning
)
if copy_on_model_validation:
deep_copy = False
if copy_on_model_validation == 'shallow':
# shallow copy
deep_copy = False
elif copy_on_model_validation == 'deep':
# deep copy
deep_copy = True
if deep_copy is None:
return value
else:
return value._copy_and_set_values(value.__dict__, value.__fields_set__, deep=deep_copy)
value = cls._enforce_dict_if_root(value)
if isinstance(value, dict):
return cls(**value)
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
@classmethod
def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:
if isinstance(obj, GetterDict):
return obj
return cls.__config__.getter_dict(obj)
@classmethod
@no_type_check
def _get_value(
cls,
v: Any,
to_dict: bool,
by_alias: bool,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],
exclude_unset: bool,
exclude_defaults: bool,
exclude_none: bool,
) -> Any:
if isinstance(v, BaseModel):
if to_dict:
v_dict = v.dict(
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=include,
exclude=exclude,
exclude_none=exclude_none,
)
if ROOT_KEY in v_dict:
return v_dict[ROOT_KEY]
return v_dict
else:
return v.copy(include=include, exclude=exclude)
value_exclude = ValueItems(v, exclude) if exclude else None
value_include = ValueItems(v, include) if include else None
if isinstance(v, dict):
return {
k_: cls._get_value(
v_,
to_dict=to_dict,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=value_include and value_include.for_element(k_),
exclude=value_exclude and value_exclude.for_element(k_),
exclude_none=exclude_none,
)
for k_, v_ in v.items()
if (not value_exclude or not value_exclude.is_excluded(k_))
and (not value_include or value_include.is_included(k_))
}
elif sequence_like(v):
seq_args = (
cls._get_value(
v_,
to_dict=to_dict,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
include=value_include and value_include.for_element(i),
exclude=value_exclude and value_exclude.for_element(i),
exclude_none=exclude_none,
)
for i, v_ in enumerate(v)
if (not value_exclude or not value_exclude.is_excluded(i))
and (not value_include or value_include.is_included(i))
)
return v.__class__(*seq_args) if is_namedtuple(v.__class__) else v.__class__(seq_args)
elif isinstance(v, Enum) and getattr(cls.Config, 'use_enum_values', False):
return v.value
else:
return v
@classmethod
def __try_update_forward_refs__(cls, **localns: Any) -> None:
"""
Same as update_forward_refs but will not raise exception
when forward references are not defined.
"""
update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns, (NameError,))
@classmethod
def update_forward_refs(cls, **localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this Model, globalns and localns.
"""
update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns)
def __iter__(self) -> 'TupleGenerator':
"""
so `dict(model)` works
"""
yield from self.__dict__.items()
def _iter(
self,
to_dict: bool = False,
by_alias: bool = False,
include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> 'TupleGenerator':
# Merge field set excludes with explicit exclude parameter with explicit overriding field set options.
# The extra "is not None" guards are not logically necessary but optimizes performance for the simple case.
if exclude is not None or self.__exclude_fields__ is not None:
exclude = ValueItems.merge(self.__exclude_fields__, exclude)
if include is not None or self.__include_fields__ is not None:
include = ValueItems.merge(self.__include_fields__, include, intersect=True)
allowed_keys = self._calculate_keys(
include=include, exclude=exclude, exclude_unset=exclude_unset # type: ignore
)
if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none):
# huge boost for plain _iter()
yield from self.__dict__.items()
return
value_exclude = ValueItems(self, exclude) if exclude is not None else None
value_include = ValueItems(self, include) if include is not None else None
for field_key, v in self.__dict__.items():
if (allowed_keys is not None and field_key not in allowed_keys) or (exclude_none and v is None):
continue
if exclude_defaults:
model_field = self.__fields__.get(field_key)
if not getattr(model_field, 'required', True) and getattr(model_field, 'default', _missing) == v:
continue
if by_alias and field_key in self.__fields__:
dict_key = self.__fields__[field_key].alias
else:
dict_key = field_key
if to_dict or value_include or value_exclude:
v = self._get_value(
v,
to_dict=to_dict,
by_alias=by_alias,
include=value_include and value_include.for_element(field_key),
exclude=value_exclude and value_exclude.for_element(field_key),
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
yield dict_key, v
def _calculate_keys(
self,
include: Optional['MappingIntStrAny'],
exclude: Optional['MappingIntStrAny'],
exclude_unset: bool,
update: Optional['DictStrAny'] = None,
) -> Optional[AbstractSet[str]]:
if include is None and exclude is None and exclude_unset is False:
return None
keys: AbstractSet[str]
if exclude_unset:
keys = self.__fields_set__.copy()
else:
keys = self.__dict__.keys()
if include is not None:
keys &= include.keys()
if update:
keys -= update.keys()
if exclude:
keys -= {k for k, v in exclude.items() if ValueItems.is_true(v)}
return keys
def __eq__(self, other: Any) -> bool:
if isinstance(other, BaseModel):
return self.dict() == other.dict()
else:
return self.dict() == other
def __repr_args__(self) -> 'ReprArgs':
return [
(k, v) for k, v in self.__dict__.items() if k not in self.__fields__ or self.__fields__[k].field_info.repr
]
_is_base_model_class_defined = True
@overload
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: None = None,
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
**field_definitions: Any,
) -> Type['BaseModel']:
...
@overload
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Union[Type['Model'], Tuple[Type['Model'], ...]],
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
**field_definitions: Any,
) -> Type['Model']:
...
def create_model(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Union[None, Type['Model'], Tuple[Type['Model'], ...]] = None,
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None,
__cls_kwargs__: Dict[str, Any] = None,
**field_definitions: Any,
) -> Type['Model']:
"""
Dynamically create a model.
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>, e.g.
`foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format
`<name>=<FieldInfo>`, e.g. `foo=Field(default_factory=datetime.utcnow, alias='bar')`
"""
if __base__ is not None:
if __config__ is not None:
raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')
if not isinstance(__base__, tuple):
__base__ = (__base__,)
else:
__base__ = (cast(Type['Model'], BaseModel),)
__cls_kwargs__ = __cls_kwargs__ or {}
fields = {}
annotations = {}
for f_name, f_def in field_definitions.items():
if not is_valid_field(f_name):
warnings.warn(f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning)
if isinstance(f_def, tuple):
try:
f_annotation, f_value = f_def
except ValueError as e:
raise ConfigError(
'field definitions should either be a tuple of (<type>, <default>) or just a '
'default value, unfortunately this means tuples as '
'default values are not allowed'
) from e
else:
f_annotation, f_value = None, f_def
if f_annotation:
annotations[f_name] = f_annotation
fields[f_name] = f_value
namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}
if __validators__:
namespace.update(__validators__)
namespace.update(fields)
if __config__:
namespace['Config'] = inherit_config(__config__, BaseConfig)
return type(__model_name, __base__, namespace, **__cls_kwargs__)
_missing = object()
def validate_model( # noqa: C901 (ignore complexity)
model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None
) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:
"""
validate data against a model.
"""
values = {}
errors = []
# input_data names, possibly alias
names_used = set()
# field names, never aliases
fields_set = set()
config = model.__config__
check_extra = config.extra is not Extra.ignore
cls_ = cls or model
for validator in model.__pre_root_validators__:
try:
input_data = validator(cls_, input_data)
except (ValueError, TypeError, AssertionError) as exc:
return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)
for name, field in model.__fields__.items():
value = input_data.get(field.alias, _missing)
using_name = False
if value is _missing and config.allow_population_by_field_name and field.alt_alias:
value = input_data.get(field.name, _missing)
using_name = True
if value is _missing:
if field.required:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
continue
value = field.get_default()
if not config.validate_all and not field.validate_always:
values[name] = value
continue
else:
fields_set.add(name)
if check_extra:
names_used.add(field.name if using_name else field.alias)
v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[name] = v_
if check_extra:
if isinstance(input_data, GetterDict):
extra = input_data.extra_keys() - names_used
else:
extra = input_data.keys() - names_used
if extra:
fields_set |= extra
if config.extra is Extra.allow:
for f in extra:
values[f] = input_data[f]
else:
for f in sorted(extra):
errors.append(ErrorWrapper(ExtraError(), loc=f))
for skip_on_failure, validator in model.__post_root_validators__:
if skip_on_failure and errors:
continue
try:
values = validator(cls_, values)
except (ValueError, TypeError, AssertionError) as exc:
errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
if errors:
return values, fields_set, ValidationError(errors, cls_)
else:
return values, fields_set, None
| 42,932 | Python | 38.388073 | 120 | 0.565918 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/config.py | import json
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union
from typing_extensions import Literal, Protocol
from .typing import AnyCallable
from .utils import GetterDict
if TYPE_CHECKING:
from typing import overload
from .fields import ModelField
from .main import BaseModel
ConfigType = Type['BaseConfig']
class SchemaExtraCallable(Protocol):
@overload
def __call__(self, schema: Dict[str, Any]) -> None:
pass
@overload
def __call__(self, schema: Dict[str, Any], model_class: Type[BaseModel]) -> None:
pass
else:
SchemaExtraCallable = Callable[..., None]
__all__ = 'BaseConfig', 'Extra', 'inherit_config', 'prepare_config'
class Extra(str, Enum):
allow = 'allow'
ignore = 'ignore'
forbid = 'forbid'
class BaseConfig:
title: Optional[str] = None
anystr_lower: bool = False
anystr_strip_whitespace: bool = False
min_anystr_length: int = 0
max_anystr_length: Optional[int] = None
validate_all: bool = False
extra: Extra = Extra.ignore
allow_mutation: bool = True
frozen: bool = False
allow_population_by_field_name: bool = False
use_enum_values: bool = False
fields: Dict[str, Union[str, Dict[str, str]]] = {}
validate_assignment: bool = False
error_msg_templates: Dict[str, str] = {}
arbitrary_types_allowed: bool = False
orm_mode: bool = False
getter_dict: Type[GetterDict] = GetterDict
alias_generator: Optional[Callable[[str], str]] = None
keep_untouched: Tuple[type, ...] = ()
schema_extra: Union[Dict[str, Any], 'SchemaExtraCallable'] = {}
json_loads: Callable[[str], Any] = json.loads
json_dumps: Callable[..., str] = json.dumps
# key type should include ForwardRef, but that breaks with python3.6
json_encoders: Dict[Union[Type[Any], str], AnyCallable] = {}
underscore_attrs_are_private: bool = False
# whether inherited models as fields should be reconstructed as base model,
# and whether such a copy should be shallow or deep
copy_on_model_validation: Literal['none', 'deep', 'shallow'] = 'shallow'
# whether `Union` should check all allowed types before even trying to coerce
smart_union: bool = False
@classmethod
def get_field_info(cls, name: str) -> Dict[str, Any]:
"""
Get properties of FieldInfo from the `fields` property of the config class.
"""
fields_value = cls.fields.get(name)
if isinstance(fields_value, str):
field_info: Dict[str, Any] = {'alias': fields_value}
elif isinstance(fields_value, dict):
field_info = fields_value
else:
field_info = {}
if 'alias' in field_info:
field_info.setdefault('alias_priority', 2)
if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:
alias = cls.alias_generator(name)
if not isinstance(alias, str):
raise TypeError(f'Config.alias_generator must return str, not {alias.__class__}')
field_info.update(alias=alias, alias_priority=1)
return field_info
@classmethod
def prepare_field(cls, field: 'ModelField') -> None:
"""
Optional hook to check or modify fields during model creation.
"""
pass
def inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **namespace: Any) -> 'ConfigType':
if not self_config:
base_classes: Tuple['ConfigType', ...] = (parent_config,)
elif self_config == parent_config:
base_classes = (self_config,)
else:
base_classes = self_config, parent_config
namespace['json_encoders'] = {
**getattr(parent_config, 'json_encoders', {}),
**getattr(self_config, 'json_encoders', {}),
**namespace.get('json_encoders', {}),
}
return type('Config', base_classes, namespace)
def prepare_config(config: Type[BaseConfig], cls_name: str) -> None:
if not isinstance(config.extra, Extra):
try:
config.extra = Extra(config.extra)
except ValueError:
raise ValueError(f'"{cls_name}": {config.extra} is not a valid value for "extra"')
| 4,268 | Python | 32.093023 | 109 | 0.632849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/fields.py | import copy
from collections import Counter as CollectionCounter, defaultdict, deque
from collections.abc import Hashable as CollectionsHashable, Iterable as CollectionsIterable
from typing import (
TYPE_CHECKING,
Any,
Counter,
DefaultDict,
Deque,
Dict,
FrozenSet,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from typing_extensions import Annotated
from . import errors as errors_
from .class_validators import Validator, make_generic_validator, prep_validators
from .error_wrappers import ErrorWrapper
from .errors import ConfigError, InvalidDiscriminator, MissingDiscriminator, NoneIsNotAllowedError
from .types import Json, JsonWrapper
from .typing import (
Callable,
ForwardRef,
NoArgAnyCallable,
convert_generics,
display_as_type,
get_args,
get_origin,
is_literal_type,
is_new_type,
is_none_type,
is_typeddict,
is_union,
new_type_supertype,
)
from .utils import (
PyObjectStr,
Representation,
ValueItems,
get_discriminator_alias_and_values,
get_unique_discriminator_alias,
lenient_isinstance,
lenient_issubclass,
sequence_like,
smart_deepcopy,
)
from .validators import constant_validator, dict_validator, find_validators, validate_json
Required: Any = Ellipsis
T = TypeVar('T')
class UndefinedType:
def __repr__(self) -> str:
return 'PydanticUndefined'
def __copy__(self: T) -> T:
return self
def __reduce__(self) -> str:
return 'Undefined'
def __deepcopy__(self: T, _: Any) -> T:
return self
Undefined = UndefinedType()
if TYPE_CHECKING:
from .class_validators import ValidatorsList
from .config import BaseConfig
from .error_wrappers import ErrorList
from .types import ModelOrDc
from .typing import AbstractSetIntStr, MappingIntStrAny, ReprArgs
ValidateReturn = Tuple[Optional[Any], Optional[ErrorList]]
LocStr = Union[Tuple[Union[int, str], ...], str]
BoolUndefined = Union[bool, UndefinedType]
class FieldInfo(Representation):
"""
Captures extra information about a field.
"""
__slots__ = (
'default',
'default_factory',
'alias',
'alias_priority',
'title',
'description',
'exclude',
'include',
'const',
'gt',
'ge',
'lt',
'le',
'multiple_of',
'max_digits',
'decimal_places',
'min_items',
'max_items',
'unique_items',
'min_length',
'max_length',
'allow_mutation',
'repr',
'regex',
'discriminator',
'extra',
)
# field constraints with the default value, it's also used in update_from_config below
__field_constraints__ = {
'min_length': None,
'max_length': None,
'regex': None,
'gt': None,
'lt': None,
'ge': None,
'le': None,
'multiple_of': None,
'max_digits': None,
'decimal_places': None,
'min_items': None,
'max_items': None,
'unique_items': None,
'allow_mutation': True,
}
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
self.default = default
self.default_factory = kwargs.pop('default_factory', None)
self.alias = kwargs.pop('alias', None)
self.alias_priority = kwargs.pop('alias_priority', 2 if self.alias else None)
self.title = kwargs.pop('title', None)
self.description = kwargs.pop('description', None)
self.exclude = kwargs.pop('exclude', None)
self.include = kwargs.pop('include', None)
self.const = kwargs.pop('const', None)
self.gt = kwargs.pop('gt', None)
self.ge = kwargs.pop('ge', None)
self.lt = kwargs.pop('lt', None)
self.le = kwargs.pop('le', None)
self.multiple_of = kwargs.pop('multiple_of', None)
self.max_digits = kwargs.pop('max_digits', None)
self.decimal_places = kwargs.pop('decimal_places', None)
self.min_items = kwargs.pop('min_items', None)
self.max_items = kwargs.pop('max_items', None)
self.unique_items = kwargs.pop('unique_items', None)
self.min_length = kwargs.pop('min_length', None)
self.max_length = kwargs.pop('max_length', None)
self.allow_mutation = kwargs.pop('allow_mutation', True)
self.regex = kwargs.pop('regex', None)
self.discriminator = kwargs.pop('discriminator', None)
self.repr = kwargs.pop('repr', True)
self.extra = kwargs
def __repr_args__(self) -> 'ReprArgs':
field_defaults_to_hide: Dict[str, Any] = {
'repr': True,
**self.__field_constraints__,
}
attrs = ((s, getattr(self, s)) for s in self.__slots__)
return [(a, v) for a, v in attrs if v != field_defaults_to_hide.get(a, None)]
def get_constraints(self) -> Set[str]:
"""
Gets the constraints set on the field by comparing the constraint value with its default value
:return: the constraints set on field_info
"""
return {attr for attr, default in self.__field_constraints__.items() if getattr(self, attr) != default}
def update_from_config(self, from_config: Dict[str, Any]) -> None:
"""
Update this FieldInfo based on a dict from get_field_info, only fields which have not been set are dated.
"""
for attr_name, value in from_config.items():
try:
current_value = getattr(self, attr_name)
except AttributeError:
# attr_name is not an attribute of FieldInfo, it should therefore be added to extra
self.extra[attr_name] = value
else:
if current_value is self.__field_constraints__.get(attr_name, None):
setattr(self, attr_name, value)
elif attr_name == 'exclude':
self.exclude = ValueItems.merge(value, current_value)
elif attr_name == 'include':
self.include = ValueItems.merge(value, current_value, intersect=True)
def _validate(self) -> None:
if self.default is not Undefined and self.default_factory is not None:
raise ValueError('cannot specify both default and default_factory')
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclude: Union['AbstractSetIntStr', 'MappingIntStrAny', Any] = None,
include: Union['AbstractSetIntStr', 'MappingIntStrAny', Any] = None,
const: bool = None,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
max_digits: int = None,
decimal_places: int = None,
min_items: int = None,
max_items: int = None,
unique_items: bool = None,
min_length: int = None,
max_length: int = None,
allow_mutation: bool = True,
regex: str = None,
discriminator: str = None,
repr: bool = True,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field, either for the model schema or complex validation. Some arguments
apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclude: exclude this field while dumping.
Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method.
:param include: include this field while dumping.
Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method.
:param const: this field is required and *must* take it's default value
:param gt: only applies to numbers, requires the field to be "greater than". The schema
will have an ``exclusiveMinimum`` validation keyword
:param ge: only applies to numbers, requires the field to be "greater than or equal to". The
schema will have a ``minimum`` validation keyword
:param lt: only applies to numbers, requires the field to be "less than". The schema
will have an ``exclusiveMaximum`` validation keyword
:param le: only applies to numbers, requires the field to be "less than or equal to". The
schema will have a ``maximum`` validation keyword
:param multiple_of: only applies to numbers, requires the field to be "a multiple of". The
schema will have a ``multipleOf`` validation keyword
:param max_digits: only applies to Decimals, requires the field to have a maximum number
of digits within the decimal. It does not include a zero before the decimal point or trailing decimal zeroes.
:param decimal_places: only applies to Decimals, requires the field to have at most a number of decimal places
allowed. It does not include trailing decimal zeroes.
:param min_items: only applies to lists, requires the field to have a minimum number of
elements. The schema will have a ``minItems`` validation keyword
:param max_items: only applies to lists, requires the field to have a maximum number of
elements. The schema will have a ``maxItems`` validation keyword
:param max_items: only applies to lists, requires the field not to have duplicated
elements. The schema will have a ``uniqueItems`` validation keyword
:param min_length: only applies to strings, requires the field to have a minimum length. The
schema will have a ``maximum`` validation keyword
:param max_length: only applies to strings, requires the field to have a maximum length. The
schema will have a ``maxLength`` validation keyword
:param allow_mutation: a boolean which defaults to True. When False, the field raises a TypeError if the field is
assigned on an instance. The BaseModel Config must set validate_assignment to True
:param regex: only applies to strings, requires the field match against a regular expression
pattern string. The schema will have a ``pattern`` validation keyword
:param discriminator: only useful with a (discriminated a.k.a. tagged) `Union` of sub models with a common field.
The `discriminator` is the name of this common field to shorten validation and improve generated schema
:param repr: show this field in the representation
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = FieldInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclude=exclude,
include=include,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
max_digits=max_digits,
decimal_places=decimal_places,
min_items=min_items,
max_items=max_items,
unique_items=unique_items,
min_length=min_length,
max_length=max_length,
allow_mutation=allow_mutation,
regex=regex,
discriminator=discriminator,
repr=repr,
**extra,
)
field_info._validate()
return field_info
# used to be an enum but changed to int's for small performance improvement as less access overhead
SHAPE_SINGLETON = 1
SHAPE_LIST = 2
SHAPE_SET = 3
SHAPE_MAPPING = 4
SHAPE_TUPLE = 5
SHAPE_TUPLE_ELLIPSIS = 6
SHAPE_SEQUENCE = 7
SHAPE_FROZENSET = 8
SHAPE_ITERABLE = 9
SHAPE_GENERIC = 10
SHAPE_DEQUE = 11
SHAPE_DICT = 12
SHAPE_DEFAULTDICT = 13
SHAPE_COUNTER = 14
SHAPE_NAME_LOOKUP = {
SHAPE_LIST: 'List[{}]',
SHAPE_SET: 'Set[{}]',
SHAPE_TUPLE_ELLIPSIS: 'Tuple[{}, ...]',
SHAPE_SEQUENCE: 'Sequence[{}]',
SHAPE_FROZENSET: 'FrozenSet[{}]',
SHAPE_ITERABLE: 'Iterable[{}]',
SHAPE_DEQUE: 'Deque[{}]',
SHAPE_DICT: 'Dict[{}]',
SHAPE_DEFAULTDICT: 'DefaultDict[{}]',
SHAPE_COUNTER: 'Counter[{}]',
}
MAPPING_LIKE_SHAPES: Set[int] = {SHAPE_DEFAULTDICT, SHAPE_DICT, SHAPE_MAPPING, SHAPE_COUNTER}
class ModelField(Representation):
__slots__ = (
'type_',
'outer_type_',
'sub_fields',
'sub_fields_mapping',
'key_field',
'validators',
'pre_validators',
'post_validators',
'default',
'default_factory',
'required',
'model_config',
'name',
'alias',
'has_alias',
'field_info',
'discriminator_key',
'discriminator_alias',
'validate_always',
'allow_none',
'shape',
'class_validators',
'parse_json',
)
def __init__(
self,
*,
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]],
model_config: Type['BaseConfig'],
default: Any = None,
default_factory: Optional[NoArgAnyCallable] = None,
required: 'BoolUndefined' = Undefined,
alias: str = None,
field_info: Optional[FieldInfo] = None,
) -> None:
self.name: str = name
self.has_alias: bool = bool(alias)
self.alias: str = alias or name
self.type_: Any = convert_generics(type_)
self.outer_type_: Any = type_
self.class_validators = class_validators or {}
self.default: Any = default
self.default_factory: Optional[NoArgAnyCallable] = default_factory
self.required: 'BoolUndefined' = required
self.model_config = model_config
self.field_info: FieldInfo = field_info or FieldInfo(default)
self.discriminator_key: Optional[str] = self.field_info.discriminator
self.discriminator_alias: Optional[str] = self.discriminator_key
self.allow_none: bool = False
self.validate_always: bool = False
self.sub_fields: Optional[List[ModelField]] = None
self.sub_fields_mapping: Optional[Dict[str, 'ModelField']] = None # used for discriminated union
self.key_field: Optional[ModelField] = None
self.validators: 'ValidatorsList' = []
self.pre_validators: Optional['ValidatorsList'] = None
self.post_validators: Optional['ValidatorsList'] = None
self.parse_json: bool = False
self.shape: int = SHAPE_SINGLETON
self.model_config.prepare_field(self)
self.prepare()
def get_default(self) -> Any:
return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory()
@staticmethod
def _get_field_info(
field_name: str, annotation: Any, value: Any, config: Type['BaseConfig']
) -> Tuple[FieldInfo, Any]:
"""
Get a FieldInfo from a root typing.Annotated annotation, value, or config default.
The FieldInfo may be set in typing.Annotated or the value, but not both. If neither contain
a FieldInfo, a new one will be created using the config.
:param field_name: name of the field for use in error messages
:param annotation: a type hint such as `str` or `Annotated[str, Field(..., min_length=5)]`
:param value: the field's assigned value
:param config: the model's config object
:return: the FieldInfo contained in the `annotation`, the value, or a new one from the config.
"""
field_info_from_config = config.get_field_info(field_name)
field_info = None
if get_origin(annotation) is Annotated:
field_infos = [arg for arg in get_args(annotation)[1:] if isinstance(arg, FieldInfo)]
if len(field_infos) > 1:
raise ValueError(f'cannot specify multiple `Annotated` `Field`s for {field_name!r}')
field_info = next(iter(field_infos), None)
if field_info is not None:
field_info = copy.copy(field_info)
field_info.update_from_config(field_info_from_config)
if field_info.default is not Undefined:
raise ValueError(f'`Field` default cannot be set in `Annotated` for {field_name!r}')
if value is not Undefined and value is not Required:
# check also `Required` because of `validate_arguments` that sets `...` as default value
field_info.default = value
if isinstance(value, FieldInfo):
if field_info is not None:
raise ValueError(f'cannot specify `Annotated` and value `Field`s together for {field_name!r}')
field_info = value
field_info.update_from_config(field_info_from_config)
elif field_info is None:
field_info = FieldInfo(value, **field_info_from_config)
value = None if field_info.default_factory is not None else field_info.default
field_info._validate()
return field_info, value
@classmethod
def infer(
cls,
*,
name: str,
value: Any,
annotation: Any,
class_validators: Optional[Dict[str, Validator]],
config: Type['BaseConfig'],
) -> 'ModelField':
from .schema import get_annotation_from_field_info
field_info, value = cls._get_field_info(name, annotation, value, config)
required: 'BoolUndefined' = Undefined
if value is Required:
required = True
value = None
elif value is not Undefined:
required = False
annotation = get_annotation_from_field_info(annotation, field_info, name, config.validate_assignment)
return cls(
name=name,
type_=annotation,
alias=field_info.alias,
class_validators=class_validators,
default=value,
default_factory=field_info.default_factory,
required=required,
model_config=config,
field_info=field_info,
)
def set_config(self, config: Type['BaseConfig']) -> None:
self.model_config = config
info_from_config = config.get_field_info(self.name)
config.prepare_field(self)
new_alias = info_from_config.get('alias')
new_alias_priority = info_from_config.get('alias_priority') or 0
if new_alias and new_alias_priority >= (self.field_info.alias_priority or 0):
self.field_info.alias = new_alias
self.field_info.alias_priority = new_alias_priority
self.alias = new_alias
new_exclude = info_from_config.get('exclude')
if new_exclude is not None:
self.field_info.exclude = ValueItems.merge(self.field_info.exclude, new_exclude)
new_include = info_from_config.get('include')
if new_include is not None:
self.field_info.include = ValueItems.merge(self.field_info.include, new_include, intersect=True)
@property
def alt_alias(self) -> bool:
return self.name != self.alias
def prepare(self) -> None:
"""
Prepare the field but inspecting self.default, self.type_ etc.
Note: this method is **not** idempotent (because _type_analysis is not idempotent),
e.g. calling it it multiple times may modify the field and configure it incorrectly.
"""
self._set_default_and_type()
if self.type_.__class__ is ForwardRef or self.type_.__class__ is DeferredType:
# self.type_ is currently a ForwardRef and there's nothing we can do now,
# user will need to call model.update_forward_refs()
return
self._type_analysis()
if self.required is Undefined:
self.required = True
if self.default is Undefined and self.default_factory is None:
self.default = None
self.populate_validators()
def _set_default_and_type(self) -> None:
"""
Set the default value, infer the type if needed and check if `None` value is valid.
"""
if self.default_factory is not None:
if self.type_ is Undefined:
raise errors_.ConfigError(
f'you need to set the type of field {self.name!r} when using `default_factory`'
)
return
default_value = self.get_default()
if default_value is not None and self.type_ is Undefined:
self.type_ = default_value.__class__
self.outer_type_ = self.type_
if self.type_ is Undefined:
raise errors_.ConfigError(f'unable to infer type for attribute "{self.name}"')
if self.required is False and default_value is None:
self.allow_none = True
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity)
# typing interface is horrible, we have to do some ugly checks
if lenient_issubclass(self.type_, JsonWrapper):
self.type_ = self.type_.inner_type
self.parse_json = True
elif lenient_issubclass(self.type_, Json):
self.type_ = Any
self.parse_json = True
elif isinstance(self.type_, TypeVar):
if self.type_.__bound__:
self.type_ = self.type_.__bound__
elif self.type_.__constraints__:
self.type_ = Union[self.type_.__constraints__]
else:
self.type_ = Any
elif is_new_type(self.type_):
self.type_ = new_type_supertype(self.type_)
if self.type_ is Any or self.type_ is object:
if self.required is Undefined:
self.required = False
self.allow_none = True
return
elif self.type_ is Pattern:
# python 3.7 only, Pattern is a typing object but without sub fields
return
elif is_literal_type(self.type_):
return
elif is_typeddict(self.type_):
return
origin = get_origin(self.type_)
if origin is Annotated:
self.type_ = get_args(self.type_)[0]
self._type_analysis()
return
if self.discriminator_key is not None and not is_union(origin):
raise TypeError('`discriminator` can only be used with `Union` type with more than one variant')
# add extra check for `collections.abc.Hashable` for python 3.10+ where origin is not `None`
if origin is None or origin is CollectionsHashable:
# field is not "typing" object eg. Union, Dict, List etc.
# allow None for virtual superclasses of NoneType, e.g. Hashable
if isinstance(self.type_, type) and isinstance(None, self.type_):
self.allow_none = True
return
elif origin is Callable:
return
elif is_union(origin):
types_ = []
for type_ in get_args(self.type_):
if is_none_type(type_) or type_ is Any or type_ is object:
if self.required is Undefined:
self.required = False
self.allow_none = True
if is_none_type(type_):
continue
types_.append(type_)
if len(types_) == 1:
# Optional[]
self.type_ = types_[0]
# this is the one case where the "outer type" isn't just the original type
self.outer_type_ = self.type_
# re-run to correctly interpret the new self.type_
self._type_analysis()
else:
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{display_as_type(t)}') for t in types_]
if self.discriminator_key is not None:
self.prepare_discriminated_union_sub_fields()
return
elif issubclass(origin, Tuple): # type: ignore
# origin == Tuple without item type
args = get_args(self.type_)
if not args: # plain tuple
self.type_ = Any
self.shape = SHAPE_TUPLE_ELLIPSIS
elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...]
self.type_ = args[0]
self.shape = SHAPE_TUPLE_ELLIPSIS
self.sub_fields = [self._create_sub_type(args[0], f'{self.name}_0')]
elif args == ((),): # Tuple[()] means empty tuple
self.shape = SHAPE_TUPLE
self.type_ = Any
self.sub_fields = []
else:
self.shape = SHAPE_TUPLE
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(args)]
return
elif issubclass(origin, List):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'list_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_LIST
elif issubclass(origin, Set):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'set_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SET
elif issubclass(origin, FrozenSet):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'frozenset_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_FROZENSET
elif issubclass(origin, Deque):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_DEQUE
elif issubclass(origin, Sequence):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SEQUENCE
# priority to most common mapping: dict
elif origin is dict or origin is Dict:
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DICT
elif issubclass(origin, DefaultDict):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DEFAULTDICT
elif issubclass(origin, Counter):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = int
self.shape = SHAPE_COUNTER
elif issubclass(origin, Mapping):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_MAPPING
# Equality check as almost everything inherits form Iterable, including str
# check for Iterable and CollectionsIterable, as it could receive one even when declared with the other
elif origin in {Iterable, CollectionsIterable}:
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_ITERABLE
self.sub_fields = [self._create_sub_type(self.type_, f'{self.name}_type')]
elif issubclass(origin, Type): # type: ignore
return
elif hasattr(origin, '__get_validators__') or self.model_config.arbitrary_types_allowed:
# Is a Pydantic-compatible generic that handles itself
# or we have arbitrary_types_allowed = True
self.shape = SHAPE_GENERIC
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(get_args(self.type_))]
self.type_ = origin
return
else:
raise TypeError(f'Fields of type "{origin}" are not supported.')
# type_ has been refined eg. as the type of a List and sub_fields needs to be populated
self.sub_fields = [self._create_sub_type(self.type_, '_' + self.name)]
def prepare_discriminated_union_sub_fields(self) -> None:
"""
Prepare the mapping <discriminator key> -> <ModelField> and update `sub_fields`
Note that this process can be aborted if a `ForwardRef` is encountered
"""
assert self.discriminator_key is not None
if self.type_.__class__ is DeferredType:
return
assert self.sub_fields is not None
sub_fields_mapping: Dict[str, 'ModelField'] = {}
all_aliases: Set[str] = set()
for sub_field in self.sub_fields:
t = sub_field.type_
if t.__class__ is ForwardRef:
# Stopping everything...will need to call `update_forward_refs`
return
alias, discriminator_values = get_discriminator_alias_and_values(t, self.discriminator_key)
all_aliases.add(alias)
for discriminator_value in discriminator_values:
sub_fields_mapping[discriminator_value] = sub_field
self.sub_fields_mapping = sub_fields_mapping
self.discriminator_alias = get_unique_discriminator_alias(all_aliases, self.discriminator_key)
def _create_sub_type(self, type_: Type[Any], name: str, *, for_keys: bool = False) -> 'ModelField':
if for_keys:
class_validators = None
else:
# validators for sub items should not have `each_item` as we want to check only the first sublevel
class_validators = {
k: Validator(
func=v.func,
pre=v.pre,
each_item=False,
always=v.always,
check_fields=v.check_fields,
skip_on_failure=v.skip_on_failure,
)
for k, v in self.class_validators.items()
if v.each_item
}
field_info, _ = self._get_field_info(name, type_, None, self.model_config)
return self.__class__(
type_=type_,
name=name,
class_validators=class_validators,
model_config=self.model_config,
field_info=field_info,
)
def populate_validators(self) -> None:
"""
Prepare self.pre_validators, self.validators, and self.post_validators based on self.type_'s __get_validators__
and class validators. This method should be idempotent, e.g. it should be safe to call multiple times
without mis-configuring the field.
"""
self.validate_always = getattr(self.type_, 'validate_always', False) or any(
v.always for v in self.class_validators.values()
)
class_validators_ = self.class_validators.values()
if not self.sub_fields or self.shape == SHAPE_GENERIC:
get_validators = getattr(self.type_, '__get_validators__', None)
v_funcs = (
*[v.func for v in class_validators_ if v.each_item and v.pre],
*(get_validators() if get_validators else list(find_validators(self.type_, self.model_config))),
*[v.func for v in class_validators_ if v.each_item and not v.pre],
)
self.validators = prep_validators(v_funcs)
self.pre_validators = []
self.post_validators = []
if self.field_info and self.field_info.const:
self.post_validators.append(make_generic_validator(constant_validator))
if class_validators_:
self.pre_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and v.pre)
self.post_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and not v.pre)
if self.parse_json:
self.pre_validators.append(make_generic_validator(validate_json))
self.pre_validators = self.pre_validators or None
self.post_validators = self.post_validators or None
def validate(
self, v: Any, values: Dict[str, Any], *, loc: 'LocStr', cls: Optional['ModelOrDc'] = None
) -> 'ValidateReturn':
assert self.type_.__class__ is not DeferredType
if self.type_.__class__ is ForwardRef:
assert cls is not None
raise ConfigError(
f'field "{self.name}" not yet prepared so type is still a ForwardRef, '
f'you might need to call {cls.__name__}.update_forward_refs().'
)
errors: Optional['ErrorList']
if self.pre_validators:
v, errors = self._apply_validators(v, values, loc, cls, self.pre_validators)
if errors:
return v, errors
if v is None:
if is_none_type(self.type_):
# keep validating
pass
elif self.allow_none:
if self.post_validators:
return self._apply_validators(v, values, loc, cls, self.post_validators)
else:
return None, None
else:
return v, ErrorWrapper(NoneIsNotAllowedError(), loc)
if self.shape == SHAPE_SINGLETON:
v, errors = self._validate_singleton(v, values, loc, cls)
elif self.shape in MAPPING_LIKE_SHAPES:
v, errors = self._validate_mapping_like(v, values, loc, cls)
elif self.shape == SHAPE_TUPLE:
v, errors = self._validate_tuple(v, values, loc, cls)
elif self.shape == SHAPE_ITERABLE:
v, errors = self._validate_iterable(v, values, loc, cls)
elif self.shape == SHAPE_GENERIC:
v, errors = self._apply_validators(v, values, loc, cls, self.validators)
else:
# sequence, list, set, generator, tuple with ellipsis, frozen set
v, errors = self._validate_sequence_like(v, values, loc, cls)
if not errors and self.post_validators:
v, errors = self._apply_validators(v, values, loc, cls, self.post_validators)
return v, errors
def _validate_sequence_like( # noqa: C901 (ignore complexity)
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
"""
Validate sequence-like containers: lists, tuples, sets and generators
Note that large if-else blocks are necessary to enable Cython
optimization, which is why we disable the complexity check above.
"""
if not sequence_like(v):
e: errors_.PydanticTypeError
if self.shape == SHAPE_LIST:
e = errors_.ListError()
elif self.shape in (SHAPE_TUPLE, SHAPE_TUPLE_ELLIPSIS):
e = errors_.TupleError()
elif self.shape == SHAPE_SET:
e = errors_.SetError()
elif self.shape == SHAPE_FROZENSET:
e = errors_.FrozenSetError()
else:
e = errors_.SequenceError()
return v, ErrorWrapper(e, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result = []
errors: List[ErrorList] = []
for i, v_ in enumerate(v):
v_loc = *loc, i
r, ee = self._validate_singleton(v_, values, v_loc, cls)
if ee:
errors.append(ee)
else:
result.append(r)
if errors:
return v, errors
converted: Union[List[Any], Set[Any], FrozenSet[Any], Tuple[Any, ...], Iterator[Any], Deque[Any]] = result
if self.shape == SHAPE_SET:
converted = set(result)
elif self.shape == SHAPE_FROZENSET:
converted = frozenset(result)
elif self.shape == SHAPE_TUPLE_ELLIPSIS:
converted = tuple(result)
elif self.shape == SHAPE_DEQUE:
converted = deque(result)
elif self.shape == SHAPE_SEQUENCE:
if isinstance(v, tuple):
converted = tuple(result)
elif isinstance(v, set):
converted = set(result)
elif isinstance(v, Generator):
converted = iter(result)
elif isinstance(v, deque):
converted = deque(result)
return converted, None
def _validate_iterable(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
"""
Validate Iterables.
This intentionally doesn't validate values to allow infinite generators.
"""
try:
iterable = iter(v)
except TypeError:
return v, ErrorWrapper(errors_.IterableError(), loc)
return iterable, None
def _validate_tuple(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
e: Optional[Exception] = None
if not sequence_like(v):
e = errors_.TupleError()
else:
actual_length, expected_length = len(v), len(self.sub_fields) # type: ignore
if actual_length != expected_length:
e = errors_.TupleLengthError(actual_length=actual_length, expected_length=expected_length)
if e:
return v, ErrorWrapper(e, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result = []
errors: List[ErrorList] = []
for i, (v_, field) in enumerate(zip(v, self.sub_fields)): # type: ignore
v_loc = *loc, i
r, ee = field.validate(v_, values, loc=v_loc, cls=cls)
if ee:
errors.append(ee)
else:
result.append(r)
if errors:
return v, errors
else:
return tuple(result), None
def _validate_mapping_like(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
try:
v_iter = dict_validator(v)
except TypeError as exc:
return v, ErrorWrapper(exc, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result, errors = {}, []
for k, v_ in v_iter.items():
v_loc = *loc, '__key__'
key_result, key_errors = self.key_field.validate(k, values, loc=v_loc, cls=cls) # type: ignore
if key_errors:
errors.append(key_errors)
continue
v_loc = *loc, k
value_result, value_errors = self._validate_singleton(v_, values, v_loc, cls)
if value_errors:
errors.append(value_errors)
continue
result[key_result] = value_result
if errors:
return v, errors
elif self.shape == SHAPE_DICT:
return result, None
elif self.shape == SHAPE_DEFAULTDICT:
return defaultdict(self.type_, result), None
elif self.shape == SHAPE_COUNTER:
return CollectionCounter(result), None
else:
return self._get_mapping_value(v, result), None
def _get_mapping_value(self, original: T, converted: Dict[Any, Any]) -> Union[T, Dict[Any, Any]]:
"""
When type is `Mapping[KT, KV]` (or another unsupported mapping), we try to avoid
coercing to `dict` unwillingly.
"""
original_cls = original.__class__
if original_cls == dict or original_cls == Dict:
return converted
elif original_cls in {defaultdict, DefaultDict}:
return defaultdict(self.type_, converted)
else:
try:
# Counter, OrderedDict, UserDict, ...
return original_cls(converted) # type: ignore
except TypeError:
raise RuntimeError(f'Could not convert dictionary to {original_cls.__name__!r}') from None
def _validate_singleton(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
if self.sub_fields:
if self.discriminator_key is not None:
return self._validate_discriminated_union(v, values, loc, cls)
errors = []
if self.model_config.smart_union and is_union(get_origin(self.type_)):
# 1st pass: check if the value is an exact instance of one of the Union types
# (e.g. to avoid coercing a bool into an int)
for field in self.sub_fields:
if v.__class__ is field.outer_type_:
return v, None
# 2nd pass: check if the value is an instance of any subclass of the Union types
for field in self.sub_fields:
# This whole logic will be improved later on to support more complex `isinstance` checks
# It will probably be done once a strict mode is added and be something like:
# ```
# value, error = field.validate(v, values, strict=True)
# if error is None:
# return value, None
# ```
try:
if isinstance(v, field.outer_type_):
return v, None
except TypeError:
# compound type
if lenient_isinstance(v, get_origin(field.outer_type_)):
value, error = field.validate(v, values, loc=loc, cls=cls)
if not error:
return value, None
# 1st pass by default or 3rd pass with `smart_union` enabled:
# check if the value can be coerced into one of the Union types
for field in self.sub_fields:
value, error = field.validate(v, values, loc=loc, cls=cls)
if error:
errors.append(error)
else:
return value, None
return v, errors
else:
return self._apply_validators(v, values, loc, cls, self.validators)
def _validate_discriminated_union(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
assert self.discriminator_key is not None
assert self.discriminator_alias is not None
try:
discriminator_value = v[self.discriminator_alias]
except KeyError:
return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc)
except TypeError:
try:
# BaseModel or dataclass
discriminator_value = getattr(v, self.discriminator_alias)
except (AttributeError, TypeError):
return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc)
try:
sub_field = self.sub_fields_mapping[discriminator_value] # type: ignore[index]
except TypeError:
assert cls is not None
raise ConfigError(
f'field "{self.name}" not yet prepared so type is still a ForwardRef, '
f'you might need to call {cls.__name__}.update_forward_refs().'
)
except KeyError:
assert self.sub_fields_mapping is not None
return v, ErrorWrapper(
InvalidDiscriminator(
discriminator_key=self.discriminator_key,
discriminator_value=discriminator_value,
allowed_values=list(self.sub_fields_mapping),
),
loc,
)
else:
if not isinstance(loc, tuple):
loc = (loc,)
return sub_field.validate(v, values, loc=(*loc, display_as_type(sub_field.type_)), cls=cls)
def _apply_validators(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'], validators: 'ValidatorsList'
) -> 'ValidateReturn':
for validator in validators:
try:
v = validator(cls, v, values, self, self.model_config)
except (ValueError, TypeError, AssertionError) as exc:
return v, ErrorWrapper(exc, loc)
return v, None
def is_complex(self) -> bool:
"""
Whether the field is "complex" eg. env variables should be parsed as JSON.
"""
from .main import BaseModel
return (
self.shape != SHAPE_SINGLETON
or lenient_issubclass(self.type_, (BaseModel, list, set, frozenset, dict))
or hasattr(self.type_, '__pydantic_model__') # pydantic dataclass
)
def _type_display(self) -> PyObjectStr:
t = display_as_type(self.type_)
# have to do this since display_as_type(self.outer_type_) is different (and wrong) on python 3.6
if self.shape in MAPPING_LIKE_SHAPES:
t = f'Mapping[{display_as_type(self.key_field.type_)}, {t}]' # type: ignore
elif self.shape == SHAPE_TUPLE:
t = 'Tuple[{}]'.format(', '.join(display_as_type(f.type_) for f in self.sub_fields)) # type: ignore
elif self.shape == SHAPE_GENERIC:
assert self.sub_fields
t = '{}[{}]'.format(
display_as_type(self.type_), ', '.join(display_as_type(f.type_) for f in self.sub_fields)
)
elif self.shape != SHAPE_SINGLETON:
t = SHAPE_NAME_LOOKUP[self.shape].format(t)
if self.allow_none and (self.shape != SHAPE_SINGLETON or not self.sub_fields):
t = f'Optional[{t}]'
return PyObjectStr(t)
def __repr_args__(self) -> 'ReprArgs':
args = [('name', self.name), ('type', self._type_display()), ('required', self.required)]
if not self.required:
if self.default_factory is not None:
args.append(('default_factory', f'<function {self.default_factory.__name__}>'))
else:
args.append(('default', self.default))
if self.alt_alias:
args.append(('alias', self.alias))
return args
class ModelPrivateAttr(Representation):
__slots__ = ('default', 'default_factory')
def __init__(self, default: Any = Undefined, *, default_factory: Optional[NoArgAnyCallable] = None) -> None:
self.default = default
self.default_factory = default_factory
def get_default(self) -> Any:
return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory()
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and (self.default, self.default_factory) == (
other.default,
other.default_factory,
)
def PrivateAttr(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
) -> Any:
"""
Indicates that attribute is only used internally and never mixed with regular fields.
Types or values of private attrs are not checked by pydantic and it's up to you to keep them relevant.
Private attrs are stored in model __slots__.
:param default: the attribute’s default value
:param default_factory: callable that will be called when a default value is needed for this attribute
If both `default` and `default_factory` are set, an error is raised.
"""
if default is not Undefined and default_factory is not None:
raise ValueError('cannot specify both default and default_factory')
return ModelPrivateAttr(
default,
default_factory=default_factory,
)
class DeferredType:
"""
Used to postpone field preparation, while creating recursive generic models.
"""
| 48,714 | Python | 38.995895 | 120 | 0.587613 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/decorator.py | from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload
from . import validator
from .config import Extra
from .errors import ConfigError
from .main import BaseModel, create_model
from .typing import get_all_type_hints
from .utils import to_camel
__all__ = ('validate_arguments',)
if TYPE_CHECKING:
from .typing import AnyCallable
AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)
ConfigType = Union[None, Type[Any], Dict[str, Any]]
@overload
def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:
...
@overload
def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':
...
def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:
"""
Decorator to validate the arguments passed to a function.
"""
def validate(_func: 'AnyCallable') -> 'AnyCallable':
vd = ValidatedFunction(_func, config)
@wraps(_func)
def wrapper_function(*args: Any, **kwargs: Any) -> Any:
return vd.call(*args, **kwargs)
wrapper_function.vd = vd # type: ignore
wrapper_function.validate = vd.init_model_instance # type: ignore
wrapper_function.raw_function = vd.raw_function # type: ignore
wrapper_function.model = vd.model # type: ignore
return wrapper_function
if func:
return validate(func)
else:
return validate
ALT_V_ARGS = 'v__args'
ALT_V_KWARGS = 'v__kwargs'
V_POSITIONAL_ONLY_NAME = 'v__positional_only'
V_DUPLICATE_KWARGS = 'v__duplicate_kwargs'
class ValidatedFunction:
def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
from inspect import Parameter, signature
parameters: Mapping[str, Parameter] = signature(function).parameters
if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:
raise ConfigError(
f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" '
f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator'
)
self.raw_function = function
self.arg_mapping: Dict[int, str] = {}
self.positional_only_args = set()
self.v_args_name = 'args'
self.v_kwargs_name = 'kwargs'
type_hints = get_all_type_hints(function)
takes_args = False
takes_kwargs = False
fields: Dict[str, Tuple[Any, Any]] = {}
for i, (name, p) in enumerate(parameters.items()):
if p.annotation is p.empty:
annotation = Any
else:
annotation = type_hints[name]
default = ... if p.default is p.empty else p.default
if p.kind == Parameter.POSITIONAL_ONLY:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_POSITIONAL_ONLY_NAME] = List[str], None
self.positional_only_args.add(name)
elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_DUPLICATE_KWARGS] = List[str], None
elif p.kind == Parameter.KEYWORD_ONLY:
fields[name] = annotation, default
elif p.kind == Parameter.VAR_POSITIONAL:
self.v_args_name = name
fields[name] = Tuple[annotation, ...], None
takes_args = True
else:
assert p.kind == Parameter.VAR_KEYWORD, p.kind
self.v_kwargs_name = name
fields[name] = Dict[str, annotation], None # type: ignore
takes_kwargs = True
# these checks avoid a clash between "args" and a field with that name
if not takes_args and self.v_args_name in fields:
self.v_args_name = ALT_V_ARGS
# same with "kwargs"
if not takes_kwargs and self.v_kwargs_name in fields:
self.v_kwargs_name = ALT_V_KWARGS
if not takes_args:
# we add the field so validation below can raise the correct exception
fields[self.v_args_name] = List[Any], None
if not takes_kwargs:
# same with kwargs
fields[self.v_kwargs_name] = Dict[Any, Any], None
self.create_model(fields, takes_args, takes_kwargs, config)
def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:
values = self.build_values(args, kwargs)
return self.model(**values)
def call(self, *args: Any, **kwargs: Any) -> Any:
m = self.init_model_instance(*args, **kwargs)
return self.execute(m)
def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
values: Dict[str, Any] = {}
if args:
arg_iter = enumerate(args)
while True:
try:
i, a = next(arg_iter)
except StopIteration:
break
arg_name = self.arg_mapping.get(i)
if arg_name is not None:
values[arg_name] = a
else:
values[self.v_args_name] = [a] + [a for _, a in arg_iter]
break
var_kwargs = {}
wrong_positional_args = []
duplicate_kwargs = []
non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}
for k, v in kwargs.items():
if k in non_var_fields:
if k in self.positional_only_args:
wrong_positional_args.append(k)
if k in values:
duplicate_kwargs.append(k)
values[k] = v
else:
var_kwargs[k] = v
if var_kwargs:
values[self.v_kwargs_name] = var_kwargs
if wrong_positional_args:
values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
if duplicate_kwargs:
values[V_DUPLICATE_KWARGS] = duplicate_kwargs
return values
def execute(self, m: BaseModel) -> Any:
d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}
var_kwargs = d.pop(self.v_kwargs_name, {})
if self.v_args_name in d:
args_: List[Any] = []
in_kwargs = False
kwargs = {}
for name, value in d.items():
if in_kwargs:
kwargs[name] = value
elif name == self.v_args_name:
args_ += value
in_kwargs = True
else:
args_.append(value)
return self.raw_function(*args_, **kwargs, **var_kwargs)
elif self.positional_only_args:
args_ = []
kwargs = {}
for name, value in d.items():
if name in self.positional_only_args:
args_.append(value)
else:
kwargs[name] = value
return self.raw_function(*args_, **kwargs, **var_kwargs)
else:
return self.raw_function(**d, **var_kwargs)
def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
pos_args = len(self.arg_mapping)
class CustomConfig:
pass
if not TYPE_CHECKING: # pragma: no branch
if isinstance(config, dict):
CustomConfig = type('Config', (), config) # noqa: F811
elif config is not None:
CustomConfig = config # noqa: F811
if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
raise ConfigError(
'Setting the "fields" and "alias_generator" property on custom Config for '
'@validate_arguments is not yet supported, please remove.'
)
class DecoratorBaseModel(BaseModel):
@validator(self.v_args_name, check_fields=False, allow_reuse=True)
def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:
if takes_args or v is None:
return v
raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
@validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if takes_kwargs or v is None:
return v
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v.keys()))
raise TypeError(f'unexpected keyword argument{plural}: {keys}')
@validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
def check_positional_only(cls, v: Optional[List[str]]) -> None:
if v is None:
return
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
@validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:
if v is None:
return
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'multiple values for argument{plural}: {keys}')
class Config(CustomConfig):
extra = getattr(CustomConfig, 'extra', Extra.forbid)
self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
| 10,040 | Python | 37.619231 | 120 | 0.555478 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/__init__.py | # flake8: noqa
from . import dataclasses
from .annotated_types import create_model_from_namedtuple, create_model_from_typeddict
from .class_validators import root_validator, validator
from .config import BaseConfig, Extra
from .decorator import validate_arguments
from .env_settings import BaseSettings
from .error_wrappers import ValidationError
from .errors import *
from .fields import Field, PrivateAttr, Required
from .main import *
from .networks import *
from .parse import Protocol
from .tools import *
from .types import *
from .version import VERSION
__version__ = VERSION
# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2
# please use "from pydantic.errors import ..." instead
__all__ = [
# annotated types utils
'create_model_from_namedtuple',
'create_model_from_typeddict',
# dataclasses
'dataclasses',
# class_validators
'root_validator',
'validator',
# config
'BaseConfig',
'Extra',
# decorator
'validate_arguments',
# env_settings
'BaseSettings',
# error_wrappers
'ValidationError',
# fields
'Field',
'Required',
# main
'BaseModel',
'compiled',
'create_model',
'validate_model',
# network
'AnyUrl',
'AnyHttpUrl',
'FileUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'AmqpDsn',
'RedisDsn',
'KafkaDsn',
'validate_email',
# parse
'Protocol',
# tools
'parse_file_as',
'parse_obj_as',
'parse_raw_as',
'schema_of',
'schema_json_of',
# types
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedFrozenSet',
'confrozenset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'PrivateAttr',
'ByteSize',
'PastDate',
'FutureDate',
# version
'VERSION',
]
| 2,619 | Python | 19.96 | 95 | 0.622757 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/networks.py | import re
from ipaddress import (
IPv4Address,
IPv4Interface,
IPv4Network,
IPv6Address,
IPv6Interface,
IPv6Network,
_BaseAddress,
_BaseNetwork,
)
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
Generator,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from . import errors
from .utils import Representation, update_not_none
from .validators import constr_length_validator, str_validator
if TYPE_CHECKING:
import email_validator
from typing_extensions import TypedDict
from .config import BaseConfig
from .fields import ModelField
from .typing import AnyCallable
CallableGenerator = Generator[AnyCallable, None, None]
class Parts(TypedDict, total=False):
scheme: str
user: Optional[str]
password: Optional[str]
ipv4: Optional[str]
ipv6: Optional[str]
domain: Optional[str]
port: Optional[str]
path: Optional[str]
query: Optional[str]
fragment: Optional[str]
else:
email_validator = None
class Parts(dict):
pass
NetworkType = Union[str, bytes, int, Tuple[Union[str, bytes, int], Union[str, int]]]
__all__ = [
'AnyUrl',
'AnyHttpUrl',
'FileUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'AmqpDsn',
'RedisDsn',
'KafkaDsn',
'validate_email',
]
_url_regex_cache = None
_ascii_domain_regex_cache = None
_int_domain_regex_cache = None
def url_regex() -> Pattern[str]:
global _url_regex_cache
if _url_regex_cache is None:
_url_regex_cache = re.compile(
r'(?:(?P<scheme>[a-z][a-z0-9+\-.]+)://)?' # scheme https://tools.ietf.org/html/rfc3986#appendix-A
r'(?:(?P<user>[^\s:/]*)(?::(?P<password>[^\s/]*))?@)?' # user info
r'(?:'
r'(?P<ipv4>(?:\d{1,3}\.){3}\d{1,3})(?=$|[/:#?])|' # ipv4
r'(?P<ipv6>\[[A-F0-9]*:[A-F0-9:]+\])(?=$|[/:#?])|' # ipv6
r'(?P<domain>[^\s/:?#]+)' # domain, validation occurs later
r')?'
r'(?::(?P<port>\d+))?' # port
r'(?P<path>/[^\s?#]*)?' # path
r'(?:\?(?P<query>[^\s#]*))?' # query
r'(?:#(?P<fragment>[^\s#]*))?', # fragment
re.IGNORECASE,
)
return _url_regex_cache
def ascii_domain_regex() -> Pattern[str]:
global _ascii_domain_regex_cache
if _ascii_domain_regex_cache is None:
ascii_chunk = r'[_0-9a-z](?:[-_0-9a-z]{0,61}[_0-9a-z])?'
ascii_domain_ending = r'(?P<tld>\.[a-z]{2,63})?\.?'
_ascii_domain_regex_cache = re.compile(
fr'(?:{ascii_chunk}\.)*?{ascii_chunk}{ascii_domain_ending}', re.IGNORECASE
)
return _ascii_domain_regex_cache
def int_domain_regex() -> Pattern[str]:
global _int_domain_regex_cache
if _int_domain_regex_cache is None:
int_chunk = r'[_0-9a-\U00040000](?:[-_0-9a-\U00040000]{0,61}[_0-9a-\U00040000])?'
int_domain_ending = r'(?P<tld>(\.[^\W\d_]{2,63})|(\.(?:xn--)[_0-9a-z-]{2,63}))?\.?'
_int_domain_regex_cache = re.compile(fr'(?:{int_chunk}\.)*?{int_chunk}{int_domain_ending}', re.IGNORECASE)
return _int_domain_regex_cache
class AnyUrl(str):
strip_whitespace = True
min_length = 1
max_length = 2**16
allowed_schemes: Optional[Collection[str]] = None
tld_required: bool = False
user_required: bool = False
host_required: bool = True
hidden_parts: Set[str] = set()
__slots__ = ('scheme', 'user', 'password', 'host', 'tld', 'host_type', 'port', 'path', 'query', 'fragment')
@no_type_check
def __new__(cls, url: Optional[str], **kwargs) -> object:
return str.__new__(cls, cls.build(**kwargs) if url is None else url)
def __init__(
self,
url: str,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: Optional[str] = None,
tld: Optional[str] = None,
host_type: str = 'domain',
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> None:
str.__init__(url)
self.scheme = scheme
self.user = user
self.password = password
self.host = host
self.tld = tld
self.host_type = host_type
self.port = port
self.path = path
self.query = query
self.fragment = fragment
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
parts = Parts(
scheme=scheme,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs, # type: ignore[misc]
)
url = scheme + '://'
if user:
url += user
if password:
url += ':' + password
if user or password:
url += '@'
url += host
if port and ('port' not in cls.hidden_parts or cls.get_default_parts(parts).get('port') != port):
url += ':' + port
if path:
url += path
if query:
url += '?' + query
if fragment:
url += '#' + fragment
return url
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length, format='uri')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any, field: 'ModelField', config: 'BaseConfig') -> 'AnyUrl':
if value.__class__ == cls:
return value
value = str_validator(value)
if cls.strip_whitespace:
value = value.strip()
url: str = cast(str, constr_length_validator(value, field, config))
m = url_regex().match(url)
# the regex should always match, if it doesn't please report with details of the URL tried
assert m, 'URL regex failed unexpectedly'
original_parts = cast('Parts', m.groupdict())
parts = cls.apply_default_parts(original_parts)
parts = cls.validate_parts(parts)
host, tld, host_type, rebuild = cls.validate_host(parts)
if m.end() != len(url):
raise errors.UrlExtraError(extra=url[m.end() :])
return cls(
None if rebuild else url,
scheme=parts['scheme'],
user=parts['user'],
password=parts['password'],
host=host,
tld=tld,
host_type=host_type,
port=parts['port'],
path=parts['path'],
query=parts['query'],
fragment=parts['fragment'],
)
@classmethod
def validate_parts(cls, parts: 'Parts') -> 'Parts':
"""
A method used to validate parts of an URL.
Could be overridden to set default values for parts if missing
"""
scheme = parts['scheme']
if scheme is None:
raise errors.UrlSchemeError()
if cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
port = parts['port']
if port is not None and int(port) > 65_535:
raise errors.UrlPortError()
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def validate_host(cls, parts: 'Parts') -> Tuple[str, Optional[str], str, bool]:
host, tld, host_type, rebuild = None, None, None, False
for f in ('domain', 'ipv4', 'ipv6'):
host = parts[f] # type: ignore[literal-required]
if host:
host_type = f
break
if host is None:
if cls.host_required:
raise errors.UrlHostError()
elif host_type == 'domain':
is_international = False
d = ascii_domain_regex().fullmatch(host)
if d is None:
d = int_domain_regex().fullmatch(host)
if d is None:
raise errors.UrlHostError()
is_international = True
tld = d.group('tld')
if tld is None and not is_international:
d = int_domain_regex().fullmatch(host)
assert d is not None
tld = d.group('tld')
is_international = True
if tld is not None:
tld = tld[1:]
elif cls.tld_required:
raise errors.UrlHostTldError()
if is_international:
host_type = 'int_domain'
rebuild = True
host = host.encode('idna').decode('ascii')
if tld is not None:
tld = tld.encode('idna').decode('ascii')
return host, tld, host_type, rebuild # type: ignore
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {}
@classmethod
def apply_default_parts(cls, parts: 'Parts') -> 'Parts':
for key, value in cls.get_default_parts(parts).items():
if not parts[key]: # type: ignore[literal-required]
parts[key] = value # type: ignore[literal-required]
return parts
def __repr__(self) -> str:
extra = ', '.join(f'{n}={getattr(self, n)!r}' for n in self.__slots__ if getattr(self, n) is not None)
return f'{self.__class__.__name__}({super().__repr__()}, {extra})'
class AnyHttpUrl(AnyUrl):
allowed_schemes = {'http', 'https'}
class HttpUrl(AnyHttpUrl):
tld_required = True
# https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
max_length = 2083
hidden_parts = {'port'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {'port': '80' if parts['scheme'] == 'http' else '443'}
class FileUrl(AnyUrl):
allowed_schemes = {'file'}
host_required = False
class PostgresDsn(AnyUrl):
allowed_schemes = {
'postgres',
'postgresql',
'postgresql+asyncpg',
'postgresql+pg8000',
'postgresql+psycopg2',
'postgresql+psycopg2cffi',
'postgresql+py-postgresql',
'postgresql+pygresql',
}
user_required = True
class AmqpDsn(AnyUrl):
allowed_schemes = {'amqp', 'amqps'}
host_required = False
class RedisDsn(AnyUrl):
allowed_schemes = {'redis', 'rediss'}
host_required = False
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {
'domain': 'localhost' if not (parts['ipv4'] or parts['ipv6']) else '',
'port': '6379',
'path': '/0',
}
class KafkaDsn(AnyUrl):
allowed_schemes = {'kafka'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {
'domain': 'localhost',
'port': '9092',
}
def stricturl(
*,
strip_whitespace: bool = True,
min_length: int = 1,
max_length: int = 2**16,
tld_required: bool = True,
host_required: bool = True,
allowed_schemes: Optional[Collection[str]] = None,
) -> Type[AnyUrl]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
min_length=min_length,
max_length=max_length,
tld_required=tld_required,
host_required=host_required,
allowed_schemes=allowed_schemes,
)
return type('UrlValue', (AnyUrl,), namespace)
def import_email_validator() -> None:
global email_validator
try:
import email_validator
except ImportError as e:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e
class EmailStr(str):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
# included here and below so the error happens straight away
import_email_validator()
yield str_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> str:
return validate_email(value)[1]
class NameEmail(Representation):
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def __eq__(self, other: Any) -> bool:
return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='name-email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
import_email_validator()
yield cls.validate
@classmethod
def validate(cls, value: Any) -> 'NameEmail':
if value.__class__ == cls:
return value
value = str_validator(value)
return cls(*validate_email(value))
def __str__(self) -> str:
return f'{self.name} <{self.email}>'
class IPvAnyAddress(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyaddress')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Union[str, bytes, int]) -> Union[IPv4Address, IPv6Address]:
try:
return IPv4Address(value)
except ValueError:
pass
try:
return IPv6Address(value)
except ValueError:
raise errors.IPvAnyAddressError()
class IPvAnyInterface(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyinterface')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Interface, IPv6Interface]:
try:
return IPv4Interface(value)
except ValueError:
pass
try:
return IPv6Interface(value)
except ValueError:
raise errors.IPvAnyInterfaceError()
class IPvAnyNetwork(_BaseNetwork): # type: ignore
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanynetwork')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Network, IPv6Network]:
# Assume IP Network is defined with a default value for ``strict`` argument.
# Define your own class if you want to specify network address check strictness.
try:
return IPv4Network(value)
except ValueError:
pass
try:
return IPv6Network(value)
except ValueError:
raise errors.IPvAnyNetworkError()
pretty_email_regex = re.compile(r'([\w ]*?) *<(.*)> *')
def validate_email(value: Union[str]) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "John Doe <[email protected]>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
if email_validator is None:
import_email_validator()
m = pretty_email_regex.fullmatch(value)
name: Optional[str] = None
if m:
name, value = m.groups()
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise errors.EmailError() from e
at_index = email.index('@')
local_part = email[:at_index] # RFC 5321, local part must be case-sensitive.
global_part = email[at_index:].lower()
return name or local_part, local_part + global_part
| 17,207 | Python | 28.6179 | 118 | 0.571453 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/version.py | __all__ = 'VERSION', 'version_info'
VERSION = '1.9.2'
def version_info() -> str:
import platform
import sys
from importlib import import_module
from pathlib import Path
from .main import compiled
optional_deps = []
for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):
try:
import_module(p.replace('-', '_'))
except ImportError:
continue
optional_deps.append(p)
info = {
'pydantic version': VERSION,
'pydantic compiled': compiled,
'install path': Path(__file__).resolve().parent,
'python version': sys.version,
'platform': platform.platform(),
'optional deps. installed': optional_deps,
}
return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
| 848 | Python | 26.387096 | 101 | 0.570755 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/env_settings.py | import os
import warnings
from pathlib import Path
from typing import AbstractSet, Any, Callable, ClassVar, Dict, List, Mapping, Optional, Tuple, Type, Union
from .config import BaseConfig, Extra
from .fields import ModelField
from .main import BaseModel
from .typing import StrPath, display_as_type, get_origin, is_union
from .utils import deep_update, path_type, sequence_like
env_file_sentinel = str(object())
SettingsSourceCallable = Callable[['BaseSettings'], Dict[str, Any]]
class SettingsError(ValueError):
pass
class BaseSettings(BaseModel):
"""
Base class for settings, allowing values to be overridden by environment variables.
This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),
Heroku and any 12 factor app design.
"""
def __init__(
__pydantic_self__,
_env_file: Optional[StrPath] = env_file_sentinel,
_env_file_encoding: Optional[str] = None,
_env_nested_delimiter: Optional[str] = None,
_secrets_dir: Optional[StrPath] = None,
**values: Any,
) -> None:
# Uses something other than `self` the first arg to allow "self" as a settable attribute
super().__init__(
**__pydantic_self__._build_values(
values,
_env_file=_env_file,
_env_file_encoding=_env_file_encoding,
_env_nested_delimiter=_env_nested_delimiter,
_secrets_dir=_secrets_dir,
)
)
def _build_values(
self,
init_kwargs: Dict[str, Any],
_env_file: Optional[StrPath] = None,
_env_file_encoding: Optional[str] = None,
_env_nested_delimiter: Optional[str] = None,
_secrets_dir: Optional[StrPath] = None,
) -> Dict[str, Any]:
# Configure built-in sources
init_settings = InitSettingsSource(init_kwargs=init_kwargs)
env_settings = EnvSettingsSource(
env_file=(_env_file if _env_file != env_file_sentinel else self.__config__.env_file),
env_file_encoding=(
_env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding
),
env_nested_delimiter=(
_env_nested_delimiter if _env_nested_delimiter is not None else self.__config__.env_nested_delimiter
),
)
file_secret_settings = SecretsSettingsSource(secrets_dir=_secrets_dir or self.__config__.secrets_dir)
# Provide a hook to set built-in sources priority and add / remove sources
sources = self.__config__.customise_sources(
init_settings=init_settings, env_settings=env_settings, file_secret_settings=file_secret_settings
)
if sources:
return deep_update(*reversed([source(self) for source in sources]))
else:
# no one should mean to do this, but I think returning an empty dict is marginally preferable
# to an informative error and much better than a confusing error
return {}
class Config(BaseConfig):
env_prefix = ''
env_file = None
env_file_encoding = None
env_nested_delimiter = None
secrets_dir = None
validate_all = True
extra = Extra.forbid
arbitrary_types_allowed = True
case_sensitive = False
@classmethod
def prepare_field(cls, field: ModelField) -> None:
env_names: Union[List[str], AbstractSet[str]]
field_info_from_config = cls.get_field_info(field.name)
env = field_info_from_config.get('env') or field.field_info.extra.get('env')
if env is None:
if field.has_alias:
warnings.warn(
'aliases are no longer used by BaseSettings to define which environment variables to read. '
'Instead use the "env" field setting. '
'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',
FutureWarning,
)
env_names = {cls.env_prefix + field.name}
elif isinstance(env, str):
env_names = {env}
elif isinstance(env, (set, frozenset)):
env_names = env
elif sequence_like(env):
env_names = list(env)
else:
raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')
if not cls.case_sensitive:
env_names = env_names.__class__(n.lower() for n in env_names)
field.field_info.extra['env_names'] = env_names
@classmethod
def customise_sources(
cls,
init_settings: SettingsSourceCallable,
env_settings: SettingsSourceCallable,
file_secret_settings: SettingsSourceCallable,
) -> Tuple[SettingsSourceCallable, ...]:
return init_settings, env_settings, file_secret_settings
# populated by the metaclass using the Config class defined above, annotated here to help IDEs only
__config__: ClassVar[Type[Config]]
class InitSettingsSource:
__slots__ = ('init_kwargs',)
def __init__(self, init_kwargs: Dict[str, Any]):
self.init_kwargs = init_kwargs
def __call__(self, settings: BaseSettings) -> Dict[str, Any]:
return self.init_kwargs
def __repr__(self) -> str:
return f'InitSettingsSource(init_kwargs={self.init_kwargs!r})'
class EnvSettingsSource:
__slots__ = ('env_file', 'env_file_encoding', 'env_nested_delimiter')
def __init__(
self, env_file: Optional[StrPath], env_file_encoding: Optional[str], env_nested_delimiter: Optional[str] = None
):
self.env_file: Optional[StrPath] = env_file
self.env_file_encoding: Optional[str] = env_file_encoding
self.env_nested_delimiter: Optional[str] = env_nested_delimiter
def __call__(self, settings: BaseSettings) -> Dict[str, Any]: # noqa C901
"""
Build environment variables suitable for passing to the Model.
"""
d: Dict[str, Any] = {}
if settings.__config__.case_sensitive:
env_vars: Mapping[str, Optional[str]] = os.environ
else:
env_vars = {k.lower(): v for k, v in os.environ.items()}
if self.env_file is not None:
env_path = Path(self.env_file).expanduser()
if env_path.is_file():
env_vars = {
**read_env_file(
env_path, encoding=self.env_file_encoding, case_sensitive=settings.__config__.case_sensitive
),
**env_vars,
}
for field in settings.__fields__.values():
env_val: Optional[str] = None
for env_name in field.field_info.extra['env_names']:
env_val = env_vars.get(env_name)
if env_val is not None:
break
is_complex, allow_json_failure = self.field_is_complex(field)
if is_complex:
if env_val is None:
# field is complex but no value found so far, try explode_env_vars
env_val_built = self.explode_env_vars(field, env_vars)
if env_val_built:
d[field.alias] = env_val_built
else:
# field is complex and there's a value, decode that as JSON, then add explode_env_vars
try:
env_val = settings.__config__.json_loads(env_val)
except ValueError as e:
if not allow_json_failure:
raise SettingsError(f'error parsing JSON for "{env_name}"') from e
if isinstance(env_val, dict):
d[field.alias] = deep_update(env_val, self.explode_env_vars(field, env_vars))
else:
d[field.alias] = env_val
elif env_val is not None:
# simplest case, field is not complex, we only need to add the value if it was found
d[field.alias] = env_val
return d
def field_is_complex(self, field: ModelField) -> Tuple[bool, bool]:
"""
Find out if a field is complex, and if so whether JSON errors should be ignored
"""
if field.is_complex():
allow_json_failure = False
elif is_union(get_origin(field.type_)) and field.sub_fields and any(f.is_complex() for f in field.sub_fields):
allow_json_failure = True
else:
return False, False
return True, allow_json_failure
def explode_env_vars(self, field: ModelField, env_vars: Mapping[str, Optional[str]]) -> Dict[str, Any]:
"""
Process env_vars and extract the values of keys containing env_nested_delimiter into nested dictionaries.
This is applied to a single field, hence filtering by env_var prefix.
"""
prefixes = [f'{env_name}{self.env_nested_delimiter}' for env_name in field.field_info.extra['env_names']]
result: Dict[str, Any] = {}
for env_name, env_val in env_vars.items():
if not any(env_name.startswith(prefix) for prefix in prefixes):
continue
_, *keys, last_key = env_name.split(self.env_nested_delimiter)
env_var = result
for key in keys:
env_var = env_var.setdefault(key, {})
env_var[last_key] = env_val
return result
def __repr__(self) -> str:
return (
f'EnvSettingsSource(env_file={self.env_file!r}, env_file_encoding={self.env_file_encoding!r}, '
f'env_nested_delimiter={self.env_nested_delimiter!r})'
)
class SecretsSettingsSource:
__slots__ = ('secrets_dir',)
def __init__(self, secrets_dir: Optional[StrPath]):
self.secrets_dir: Optional[StrPath] = secrets_dir
def __call__(self, settings: BaseSettings) -> Dict[str, Any]:
"""
Build fields from "secrets" files.
"""
secrets: Dict[str, Optional[str]] = {}
if self.secrets_dir is None:
return secrets
secrets_path = Path(self.secrets_dir).expanduser()
if not secrets_path.exists():
warnings.warn(f'directory "{secrets_path}" does not exist')
return secrets
if not secrets_path.is_dir():
raise SettingsError(f'secrets_dir must reference a directory, not a {path_type(secrets_path)}')
for field in settings.__fields__.values():
for env_name in field.field_info.extra['env_names']:
path = secrets_path / env_name
if path.is_file():
secret_value = path.read_text().strip()
if field.is_complex():
try:
secret_value = settings.__config__.json_loads(secret_value)
except ValueError as e:
raise SettingsError(f'error parsing JSON for "{env_name}"') from e
secrets[field.alias] = secret_value
elif path.exists():
warnings.warn(
f'attempted to load secret file "{path}" but found a {path_type(path)} instead.',
stacklevel=4,
)
return secrets
def __repr__(self) -> str:
return f'SecretsSettingsSource(secrets_dir={self.secrets_dir!r})'
def read_env_file(
file_path: StrPath, *, encoding: str = None, case_sensitive: bool = False
) -> Dict[str, Optional[str]]:
try:
from dotenv import dotenv_values
except ImportError as e:
raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding or 'utf8')
if not case_sensitive:
return {k.lower(): v for k, v in file_vars.items()}
else:
return file_vars
| 12,223 | Python | 38.817589 | 119 | 0.573427 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/errors.py | from decimal import Decimal
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Sequence, Set, Tuple, Type, Union
from .typing import display_as_type
if TYPE_CHECKING:
from .typing import DictStrAny
# explicitly state exports to avoid "from .errors import *" also importing Decimal, Path etc.
__all__ = (
'PydanticTypeError',
'PydanticValueError',
'ConfigError',
'MissingError',
'ExtraError',
'NoneIsNotAllowedError',
'NoneIsAllowedError',
'WrongConstantError',
'NotNoneError',
'BoolError',
'BytesError',
'DictError',
'EmailError',
'UrlError',
'UrlSchemeError',
'UrlSchemePermittedError',
'UrlUserInfoError',
'UrlHostError',
'UrlHostTldError',
'UrlPortError',
'UrlExtraError',
'EnumError',
'IntEnumError',
'EnumMemberError',
'IntegerError',
'FloatError',
'PathError',
'PathNotExistsError',
'PathNotAFileError',
'PathNotADirectoryError',
'PyObjectError',
'SequenceError',
'ListError',
'SetError',
'FrozenSetError',
'TupleError',
'TupleLengthError',
'ListMinLengthError',
'ListMaxLengthError',
'ListUniqueItemsError',
'SetMinLengthError',
'SetMaxLengthError',
'FrozenSetMinLengthError',
'FrozenSetMaxLengthError',
'AnyStrMinLengthError',
'AnyStrMaxLengthError',
'StrError',
'StrRegexError',
'NumberNotGtError',
'NumberNotGeError',
'NumberNotLtError',
'NumberNotLeError',
'NumberNotMultipleError',
'DecimalError',
'DecimalIsNotFiniteError',
'DecimalMaxDigitsError',
'DecimalMaxPlacesError',
'DecimalWholeDigitsError',
'DateTimeError',
'DateError',
'DateNotInThePastError',
'DateNotInTheFutureError',
'TimeError',
'DurationError',
'HashableError',
'UUIDError',
'UUIDVersionError',
'ArbitraryTypeError',
'ClassError',
'SubclassError',
'JsonError',
'JsonTypeError',
'PatternError',
'DataclassTypeError',
'CallableError',
'IPvAnyAddressError',
'IPvAnyInterfaceError',
'IPvAnyNetworkError',
'IPv4AddressError',
'IPv6AddressError',
'IPv4NetworkError',
'IPv6NetworkError',
'IPv4InterfaceError',
'IPv6InterfaceError',
'ColorError',
'StrictBoolError',
'NotDigitError',
'LuhnValidationError',
'InvalidLengthForBrand',
'InvalidByteSize',
'InvalidByteSizeUnit',
'MissingDiscriminator',
'InvalidDiscriminator',
)
def cls_kwargs(cls: Type['PydanticErrorMixin'], ctx: 'DictStrAny') -> 'PydanticErrorMixin':
"""
For built-in exceptions like ValueError or TypeError, we need to implement
__reduce__ to override the default behaviour (instead of __getstate__/__setstate__)
By default pickle protocol 2 calls `cls.__new__(cls, *args)`.
Since we only use kwargs, we need a little constructor to change that.
Note: the callable can't be a lambda as pickle looks in the namespace to find it
"""
return cls(**ctx)
class PydanticErrorMixin:
code: str
msg_template: str
def __init__(self, **ctx: Any) -> None:
self.__dict__ = ctx
def __str__(self) -> str:
return self.msg_template.format(**self.__dict__)
def __reduce__(self) -> Tuple[Callable[..., 'PydanticErrorMixin'], Tuple[Type['PydanticErrorMixin'], 'DictStrAny']]:
return cls_kwargs, (self.__class__, self.__dict__)
class PydanticTypeError(PydanticErrorMixin, TypeError):
pass
class PydanticValueError(PydanticErrorMixin, ValueError):
pass
class ConfigError(RuntimeError):
pass
class MissingError(PydanticValueError):
msg_template = 'field required'
class ExtraError(PydanticValueError):
msg_template = 'extra fields not permitted'
class NoneIsNotAllowedError(PydanticTypeError):
code = 'none.not_allowed'
msg_template = 'none is not an allowed value'
class NoneIsAllowedError(PydanticTypeError):
code = 'none.allowed'
msg_template = 'value is not none'
class WrongConstantError(PydanticValueError):
code = 'const'
def __str__(self) -> str:
permitted = ', '.join(repr(v) for v in self.permitted) # type: ignore
return f'unexpected value; permitted: {permitted}'
class NotNoneError(PydanticTypeError):
code = 'not_none'
msg_template = 'value is not None'
class BoolError(PydanticTypeError):
msg_template = 'value could not be parsed to a boolean'
class BytesError(PydanticTypeError):
msg_template = 'byte type expected'
class DictError(PydanticTypeError):
msg_template = 'value is not a valid dict'
class EmailError(PydanticValueError):
msg_template = 'value is not a valid email address'
class UrlError(PydanticValueError):
code = 'url'
class UrlSchemeError(UrlError):
code = 'url.scheme'
msg_template = 'invalid or missing URL scheme'
class UrlSchemePermittedError(UrlError):
code = 'url.scheme'
msg_template = 'URL scheme not permitted'
def __init__(self, allowed_schemes: Set[str]):
super().__init__(allowed_schemes=allowed_schemes)
class UrlUserInfoError(UrlError):
code = 'url.userinfo'
msg_template = 'userinfo required in URL but missing'
class UrlHostError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid'
class UrlHostTldError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid, top level domain required'
class UrlPortError(UrlError):
code = 'url.port'
msg_template = 'URL port invalid, port cannot exceed 65535'
class UrlExtraError(UrlError):
code = 'url.extra'
msg_template = 'URL invalid, extra characters found after valid URL: {extra!r}'
class EnumMemberError(PydanticTypeError):
code = 'enum'
def __str__(self) -> str:
permitted = ', '.join(repr(v.value) for v in self.enum_values) # type: ignore
return f'value is not a valid enumeration member; permitted: {permitted}'
class IntegerError(PydanticTypeError):
msg_template = 'value is not a valid integer'
class FloatError(PydanticTypeError):
msg_template = 'value is not a valid float'
class PathError(PydanticTypeError):
msg_template = 'value is not a valid path'
class _PathValueError(PydanticValueError):
def __init__(self, *, path: Path) -> None:
super().__init__(path=str(path))
class PathNotExistsError(_PathValueError):
code = 'path.not_exists'
msg_template = 'file or directory at path "{path}" does not exist'
class PathNotAFileError(_PathValueError):
code = 'path.not_a_file'
msg_template = 'path "{path}" does not point to a file'
class PathNotADirectoryError(_PathValueError):
code = 'path.not_a_directory'
msg_template = 'path "{path}" does not point to a directory'
class PyObjectError(PydanticTypeError):
msg_template = 'ensure this value contains valid import path or valid callable: {error_message}'
class SequenceError(PydanticTypeError):
msg_template = 'value is not a valid sequence'
class IterableError(PydanticTypeError):
msg_template = 'value is not a valid iterable'
class ListError(PydanticTypeError):
msg_template = 'value is not a valid list'
class SetError(PydanticTypeError):
msg_template = 'value is not a valid set'
class FrozenSetError(PydanticTypeError):
msg_template = 'value is not a valid frozenset'
class DequeError(PydanticTypeError):
msg_template = 'value is not a valid deque'
class TupleError(PydanticTypeError):
msg_template = 'value is not a valid tuple'
class TupleLengthError(PydanticValueError):
code = 'tuple.length'
msg_template = 'wrong tuple length {actual_length}, expected {expected_length}'
def __init__(self, *, actual_length: int, expected_length: int) -> None:
super().__init__(actual_length=actual_length, expected_length=expected_length)
class ListMinLengthError(PydanticValueError):
code = 'list.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class ListMaxLengthError(PydanticValueError):
code = 'list.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class ListUniqueItemsError(PydanticValueError):
code = 'list.unique_items'
msg_template = 'the list has duplicated items'
class SetMinLengthError(PydanticValueError):
code = 'set.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class SetMaxLengthError(PydanticValueError):
code = 'set.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class FrozenSetMinLengthError(PydanticValueError):
code = 'frozenset.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class FrozenSetMaxLengthError(PydanticValueError):
code = 'frozenset.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMinLengthError(PydanticValueError):
code = 'any_str.min_length'
msg_template = 'ensure this value has at least {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMaxLengthError(PydanticValueError):
code = 'any_str.max_length'
msg_template = 'ensure this value has at most {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class StrError(PydanticTypeError):
msg_template = 'str type expected'
class StrRegexError(PydanticValueError):
code = 'str.regex'
msg_template = 'string does not match regex "{pattern}"'
def __init__(self, *, pattern: str) -> None:
super().__init__(pattern=pattern)
class _NumberBoundError(PydanticValueError):
def __init__(self, *, limit_value: Union[int, float, Decimal]) -> None:
super().__init__(limit_value=limit_value)
class NumberNotGtError(_NumberBoundError):
code = 'number.not_gt'
msg_template = 'ensure this value is greater than {limit_value}'
class NumberNotGeError(_NumberBoundError):
code = 'number.not_ge'
msg_template = 'ensure this value is greater than or equal to {limit_value}'
class NumberNotLtError(_NumberBoundError):
code = 'number.not_lt'
msg_template = 'ensure this value is less than {limit_value}'
class NumberNotLeError(_NumberBoundError):
code = 'number.not_le'
msg_template = 'ensure this value is less than or equal to {limit_value}'
class NumberNotMultipleError(PydanticValueError):
code = 'number.not_multiple'
msg_template = 'ensure this value is a multiple of {multiple_of}'
def __init__(self, *, multiple_of: Union[int, float, Decimal]) -> None:
super().__init__(multiple_of=multiple_of)
class DecimalError(PydanticTypeError):
msg_template = 'value is not a valid decimal'
class DecimalIsNotFiniteError(PydanticValueError):
code = 'decimal.not_finite'
msg_template = 'value is not a valid decimal'
class DecimalMaxDigitsError(PydanticValueError):
code = 'decimal.max_digits'
msg_template = 'ensure that there are no more than {max_digits} digits in total'
def __init__(self, *, max_digits: int) -> None:
super().__init__(max_digits=max_digits)
class DecimalMaxPlacesError(PydanticValueError):
code = 'decimal.max_places'
msg_template = 'ensure that there are no more than {decimal_places} decimal places'
def __init__(self, *, decimal_places: int) -> None:
super().__init__(decimal_places=decimal_places)
class DecimalWholeDigitsError(PydanticValueError):
code = 'decimal.whole_digits'
msg_template = 'ensure that there are no more than {whole_digits} digits before the decimal point'
def __init__(self, *, whole_digits: int) -> None:
super().__init__(whole_digits=whole_digits)
class DateTimeError(PydanticValueError):
msg_template = 'invalid datetime format'
class DateError(PydanticValueError):
msg_template = 'invalid date format'
class DateNotInThePastError(PydanticValueError):
code = 'date.not_in_the_past'
msg_template = 'date is not in the past'
class DateNotInTheFutureError(PydanticValueError):
code = 'date.not_in_the_future'
msg_template = 'date is not in the future'
class TimeError(PydanticValueError):
msg_template = 'invalid time format'
class DurationError(PydanticValueError):
msg_template = 'invalid duration format'
class HashableError(PydanticTypeError):
msg_template = 'value is not a valid hashable'
class UUIDError(PydanticTypeError):
msg_template = 'value is not a valid uuid'
class UUIDVersionError(PydanticValueError):
code = 'uuid.version'
msg_template = 'uuid version {required_version} expected'
def __init__(self, *, required_version: int) -> None:
super().__init__(required_version=required_version)
class ArbitraryTypeError(PydanticTypeError):
code = 'arbitrary_type'
msg_template = 'instance of {expected_arbitrary_type} expected'
def __init__(self, *, expected_arbitrary_type: Type[Any]) -> None:
super().__init__(expected_arbitrary_type=display_as_type(expected_arbitrary_type))
class ClassError(PydanticTypeError):
code = 'class'
msg_template = 'a class is expected'
class SubclassError(PydanticTypeError):
code = 'subclass'
msg_template = 'subclass of {expected_class} expected'
def __init__(self, *, expected_class: Type[Any]) -> None:
super().__init__(expected_class=display_as_type(expected_class))
class JsonError(PydanticValueError):
msg_template = 'Invalid JSON'
class JsonTypeError(PydanticTypeError):
code = 'json'
msg_template = 'JSON object must be str, bytes or bytearray'
class PatternError(PydanticValueError):
code = 'regex_pattern'
msg_template = 'Invalid regular expression'
class DataclassTypeError(PydanticTypeError):
code = 'dataclass'
msg_template = 'instance of {class_name}, tuple or dict expected'
class CallableError(PydanticTypeError):
msg_template = '{value} is not callable'
class EnumError(PydanticTypeError):
code = 'enum_instance'
msg_template = '{value} is not a valid Enum instance'
class IntEnumError(PydanticTypeError):
code = 'int_enum_instance'
msg_template = '{value} is not a valid IntEnum instance'
class IPvAnyAddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 address'
class IPvAnyInterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 interface'
class IPvAnyNetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 network'
class IPv4AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 address'
class IPv6AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv6 address'
class IPv4NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 network'
class IPv6NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv6 network'
class IPv4InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 interface'
class IPv6InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv6 interface'
class ColorError(PydanticValueError):
msg_template = 'value is not a valid color: {reason}'
class StrictBoolError(PydanticValueError):
msg_template = 'value is not a valid boolean'
class NotDigitError(PydanticValueError):
code = 'payment_card_number.digits'
msg_template = 'card number is not all digits'
class LuhnValidationError(PydanticValueError):
code = 'payment_card_number.luhn_check'
msg_template = 'card number is not luhn valid'
class InvalidLengthForBrand(PydanticValueError):
code = 'payment_card_number.invalid_length_for_brand'
msg_template = 'Length for a {brand} card must be {required_length}'
class InvalidByteSize(PydanticValueError):
msg_template = 'could not parse value and unit from byte string'
class InvalidByteSizeUnit(PydanticValueError):
msg_template = 'could not interpret byte unit: {unit}'
class MissingDiscriminator(PydanticValueError):
code = 'discriminated_union.missing_discriminator'
msg_template = 'Discriminator {discriminator_key!r} is missing in value'
class InvalidDiscriminator(PydanticValueError):
code = 'discriminated_union.invalid_discriminator'
msg_template = (
'No match for discriminator {discriminator_key!r} and value {discriminator_value!r} '
'(allowed values: {allowed_values})'
)
def __init__(self, *, discriminator_key: str, discriminator_value: Any, allowed_values: Sequence[Any]) -> None:
super().__init__(
discriminator_key=discriminator_key,
discriminator_value=discriminator_value,
allowed_values=', '.join(map(repr, allowed_values)),
)
| 17,547 | Python | 26.333333 | 120 | 0.695617 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/utils.py | import warnings
import weakref
from collections import OrderedDict, defaultdict, deque
from copy import deepcopy
from itertools import islice, zip_longest
from types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Collection,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from typing_extensions import Annotated
from .errors import ConfigError
from .typing import (
NoneType,
WithArgsTypes,
all_literal_values,
display_as_type,
get_args,
get_origin,
is_literal_type,
is_union,
)
from .version import version_info
if TYPE_CHECKING:
from inspect import Signature
from pathlib import Path
from .config import BaseConfig
from .dataclasses import Dataclass
from .fields import ModelField
from .main import BaseModel
from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs
__all__ = (
'import_string',
'sequence_like',
'validate_field_name',
'lenient_isinstance',
'lenient_issubclass',
'in_ipython',
'deep_update',
'update_not_none',
'almost_equal_floats',
'get_model',
'to_camel',
'is_valid_field',
'smart_deepcopy',
'PyObjectStr',
'Representation',
'GetterDict',
'ValueItems',
'version_info', # required here to match behaviour in v1.3
'ClassAttribute',
'path_type',
'ROOT_KEY',
'get_unique_discriminator_alias',
'get_discriminator_alias_and_values',
'LimitedDict',
)
ROOT_KEY = '__root__'
# these are types that are returned unchanged by deepcopy
IMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {
int,
float,
complex,
str,
bool,
bytes,
type,
NoneType,
FunctionType,
BuiltinFunctionType,
LambdaType,
weakref.ref,
CodeType,
# note: including ModuleType will differ from behaviour of deepcopy by not producing error.
# It might be not a good idea in general, but considering that this function used only internally
# against default values of fields, this will allow to actually have a field with module as default value
ModuleType,
NotImplemented.__class__,
Ellipsis.__class__,
}
# these are types that if empty, might be copied with simple copy() instead of deepcopy()
BUILTIN_COLLECTIONS: Set[Type[Any]] = {
list,
set,
tuple,
frozenset,
dict,
OrderedDict,
defaultdict,
deque,
}
def import_string(dotted_path: str) -> Any:
"""
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import fails.
"""
from importlib import import_module
try:
module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
except ValueError as e:
raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as e:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
def truncate(v: Union[str], *, max_len: int = 80) -> str:
"""
Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long
"""
warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)
if isinstance(v, str) and len(v) > (max_len - 2):
# -3 so quote + string + … + quote has correct length
return (v[: (max_len - 3)] + '…').__repr__()
try:
v = v.__repr__()
except TypeError:
v = v.__class__.__repr__(v) # in case v is a type
if len(v) > max_len:
v = v[: max_len - 1] + '…'
return v
def sequence_like(v: Any) -> bool:
return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))
def validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:
"""
Ensure that the field's name does not shadow an existing attribute of the model.
"""
for base in bases:
if getattr(base, field_name, None):
raise NameError(
f'Field name "{field_name}" shadows a BaseModel attribute; '
f'use a different field name with "alias=\'{field_name}\'".'
)
def lenient_isinstance(o: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:
try:
return isinstance(o, class_or_tuple) # type: ignore[arg-type]
except TypeError:
return False
def lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:
try:
return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type]
except TypeError:
if isinstance(cls, WithArgsTypes):
return False
raise # pragma: no cover
def in_ipython() -> bool:
"""
Check whether we're in an ipython environment, including jupyter notebooks.
"""
try:
eval('__IPYTHON__')
except NameError:
return False
else: # pragma: no cover
return True
KeyType = TypeVar('KeyType')
def deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:
updated_mapping = mapping.copy()
for updating_mapping in updating_mappings:
for k, v in updating_mapping.items():
if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):
updated_mapping[k] = deep_update(updated_mapping[k], v)
else:
updated_mapping[k] = v
return updated_mapping
def update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:
mapping.update({k: v for k, v in update.items() if v is not None})
def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:
"""
Return True if two floats are almost equal
"""
return abs(value_1 - value_2) <= delta
def generate_model_signature(
init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']
) -> 'Signature':
"""
Generate signature for model based on its fields
"""
from inspect import Parameter, Signature, signature
from .config import Extra
present_params = signature(init).parameters.values()
merged_params: Dict[str, Parameter] = {}
var_kw = None
use_var_kw = False
for param in islice(present_params, 1, None): # skip self arg
if param.kind is param.VAR_KEYWORD:
var_kw = param
continue
merged_params[param.name] = param
if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through
allow_names = config.allow_population_by_field_name
for field_name, field in fields.items():
param_name = field.alias
if field_name in merged_params or param_name in merged_params:
continue
elif not param_name.isidentifier():
if allow_names and field_name.isidentifier():
param_name = field_name
else:
use_var_kw = True
continue
# TODO: replace annotation with actual expected types once #1055 solved
kwargs = {'default': field.default} if not field.required else {}
merged_params[param_name] = Parameter(
param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs
)
if config.extra is Extra.allow:
use_var_kw = True
if var_kw and use_var_kw:
# Make sure the parameter for extra kwargs
# does not have the same name as a field
default_model_signature = [
('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),
('data', Parameter.VAR_KEYWORD),
]
if [(p.name, p.kind) for p in present_params] == default_model_signature:
# if this is the standard model signature, use extra_data as the extra args name
var_kw_name = 'extra_data'
else:
# else start from var_kw
var_kw_name = var_kw.name
# generate a name that's definitely unique
while var_kw_name in fields:
var_kw_name += '_'
merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)
return Signature(parameters=list(merged_params.values()), return_annotation=None)
def get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:
from .main import BaseModel
try:
model_cls = obj.__pydantic_model__ # type: ignore
except AttributeError:
model_cls = obj
if not issubclass(model_cls, BaseModel):
raise TypeError('Unsupported type, must be either BaseModel or dataclass')
return model_cls
def to_camel(string: str) -> str:
return ''.join(word.capitalize() for word in string.split('_'))
T = TypeVar('T')
def unique_list(
input_list: Union[List[T], Tuple[T, ...]],
*,
name_factory: Callable[[T], str] = str,
) -> List[T]:
"""
Make a list unique while maintaining order.
We update the list if another one with the same name is set
(e.g. root validator overridden in subclass)
"""
result: List[T] = []
result_names: List[str] = []
for v in input_list:
v_name = name_factory(v)
if v_name not in result_names:
result_names.append(v_name)
result.append(v)
else:
result[result_names.index(v_name)] = v
return result
class PyObjectStr(str):
"""
String class where repr doesn't include quotes. Useful with Representation when you want to return a string
representation of something that valid (or pseudo-valid) python.
"""
def __repr__(self) -> str:
return str(self)
class Representation:
"""
Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.
__pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations
of objects.
"""
__slots__: Tuple[str, ...] = tuple()
def __repr_args__(self) -> 'ReprArgs':
"""
Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.
Can either return:
* name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`
* or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`
"""
attrs = ((s, getattr(self, s)) for s in self.__slots__)
return [(a, v) for a, v in attrs if v is not None]
def __repr_name__(self) -> str:
"""
Name of the instance's class, used in __repr__.
"""
return self.__class__.__name__
def __repr_str__(self, join_str: str) -> str:
return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())
def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:
"""
Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects
"""
yield self.__repr_name__() + '('
yield 1
for name, value in self.__repr_args__():
if name is not None:
yield name + '='
yield fmt(value)
yield ','
yield 0
yield -1
yield ')'
def __str__(self) -> str:
return self.__repr_str__(' ')
def __repr__(self) -> str:
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
class GetterDict(Representation):
"""
Hack to make object's smell just enough like dicts for validate_model.
We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.
"""
__slots__ = ('_obj',)
def __init__(self, obj: Any):
self._obj = obj
def __getitem__(self, key: str) -> Any:
try:
return getattr(self._obj, key)
except AttributeError as e:
raise KeyError(key) from e
def get(self, key: Any, default: Any = None) -> Any:
return getattr(self._obj, key, default)
def extra_keys(self) -> Set[Any]:
"""
We don't want to get any other attributes of obj if the model didn't explicitly ask for them
"""
return set()
def keys(self) -> List[Any]:
"""
Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python
dictionaries.
"""
return list(self)
def values(self) -> List[Any]:
return [self[k] for k in self]
def items(self) -> Iterator[Tuple[str, Any]]:
for k in self:
yield k, self.get(k)
def __iter__(self) -> Iterator[str]:
for name in dir(self._obj):
if not name.startswith('_'):
yield name
def __len__(self) -> int:
return sum(1 for _ in self)
def __contains__(self, item: Any) -> bool:
return item in self.keys()
def __eq__(self, other: Any) -> bool:
return dict(self) == dict(other.items())
def __repr_args__(self) -> 'ReprArgs':
return [(None, dict(self))]
def __repr_name__(self) -> str:
return f'GetterDict[{display_as_type(self._obj)}]'
class ValueItems(Representation):
"""
Class for more convenient calculation of excluded or included fields on values.
"""
__slots__ = ('_items', '_type')
def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:
items = self._coerce_items(items)
if isinstance(value, (list, tuple)):
items = self._normalize_indexes(items, len(value))
self._items: 'MappingIntStrAny' = items
def is_excluded(self, item: Any) -> bool:
"""
Check if item is fully excluded.
:param item: key or index of a value
"""
return self.is_true(self._items.get(item))
def is_included(self, item: Any) -> bool:
"""
Check if value is contained in self._items
:param item: key or index of value
"""
return item in self._items
def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:
"""
:param e: key or index of element on value
:return: raw values for element if self._items is dict and contain needed element
"""
item = self._items.get(e)
return item if not self.is_true(item) else None
def _normalize_indexes(self, items: 'MappingIntStrAny', v_length: int) -> 'DictIntStrAny':
"""
:param items: dict or set of indexes which will be normalized
:param v_length: length of sequence indexes of which will be
>>> self._normalize_indexes({0: True, -2: True, -1: True}, 4)
{0: True, 2: True, 3: True}
>>> self._normalize_indexes({'__all__': True}, 4)
{0: True, 1: True, 2: True, 3: True}
"""
normalized_items: 'DictIntStrAny' = {}
all_items = None
for i, v in items.items():
if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)):
raise TypeError(f'Unexpected type of exclude value for index "{i}" {v.__class__}')
if i == '__all__':
all_items = self._coerce_value(v)
continue
if not isinstance(i, int):
raise TypeError(
'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '
'expected integer keys or keyword "__all__"'
)
normalized_i = v_length + i if i < 0 else i
normalized_items[normalized_i] = self.merge(v, normalized_items.get(normalized_i))
if not all_items:
return normalized_items
if self.is_true(all_items):
for i in range(v_length):
normalized_items.setdefault(i, ...)
return normalized_items
for i in range(v_length):
normalized_item = normalized_items.setdefault(i, {})
if not self.is_true(normalized_item):
normalized_items[i] = self.merge(all_items, normalized_item)
return normalized_items
@classmethod
def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any:
"""
Merge a ``base`` item with an ``override`` item.
Both ``base`` and ``override`` are converted to dictionaries if possible.
Sets are converted to dictionaries with the sets entries as keys and
Ellipsis as values.
Each key-value pair existing in ``base`` is merged with ``override``,
while the rest of the key-value pairs are updated recursively with this function.
Merging takes place based on the "union" of keys if ``intersect`` is
set to ``False`` (default) and on the intersection of keys if
``intersect`` is set to ``True``.
"""
override = cls._coerce_value(override)
base = cls._coerce_value(base)
if override is None:
return base
if cls.is_true(base) or base is None:
return override
if cls.is_true(override):
return base if intersect else override
# intersection or union of keys while preserving ordering:
if intersect:
merge_keys = [k for k in base if k in override] + [k for k in override if k in base]
else:
merge_keys = list(base) + [k for k in override if k not in base]
merged: 'DictIntStrAny' = {}
for k in merge_keys:
merged_item = cls.merge(base.get(k), override.get(k), intersect=intersect)
if merged_item is not None:
merged[k] = merged_item
return merged
@staticmethod
def _coerce_items(items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> 'MappingIntStrAny':
if isinstance(items, Mapping):
pass
elif isinstance(items, AbstractSet):
items = dict.fromkeys(items, ...)
else:
class_name = getattr(items, '__class__', '???')
raise TypeError(f'Unexpected type of exclude value {class_name}')
return items
@classmethod
def _coerce_value(cls, value: Any) -> Any:
if value is None or cls.is_true(value):
return value
return cls._coerce_items(value)
@staticmethod
def is_true(v: Any) -> bool:
return v is True or v is ...
def __repr_args__(self) -> 'ReprArgs':
return [(None, self._items)]
class ClassAttribute:
"""
Hide class attribute from its instances
"""
__slots__ = (
'name',
'value',
)
def __init__(self, name: str, value: Any) -> None:
self.name = name
self.value = value
def __get__(self, instance: Any, owner: Type[Any]) -> None:
if instance is None:
return self.value
raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')
path_types = {
'is_dir': 'directory',
'is_file': 'file',
'is_mount': 'mount point',
'is_symlink': 'symlink',
'is_block_device': 'block device',
'is_char_device': 'char device',
'is_fifo': 'FIFO',
'is_socket': 'socket',
}
def path_type(p: 'Path') -> str:
"""
Find out what sort of thing a path is.
"""
assert p.exists(), 'path does not exist'
for method, name in path_types.items():
if getattr(p, method)():
return name
return 'unknown'
Obj = TypeVar('Obj')
def smart_deepcopy(obj: Obj) -> Obj:
"""
Return type as is for immutable built-in types
Use obj.copy() for built-in empty collections
Use copy.deepcopy() for non-empty collections and unknown objects
"""
obj_type = obj.__class__
if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:
return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway
elif not obj and obj_type in BUILTIN_COLLECTIONS:
# faster way for empty collections, no need to copy its members
return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method
return deepcopy(obj) # slowest way when we actually might need a deepcopy
def is_valid_field(name: str) -> bool:
if not name.startswith('_'):
return True
return ROOT_KEY == name
def is_valid_private_name(name: str) -> bool:
return not is_valid_field(name) and name not in {
'__annotations__',
'__classcell__',
'__doc__',
'__module__',
'__orig_bases__',
'__qualname__',
}
_EMPTY = object()
def all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool:
"""
Check that the items of `left` are the same objects as those in `right`.
>>> a, b = object(), object()
>>> all_identical([a, b, a], [a, b, a])
True
>>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while "equal" is not "identical"
False
"""
for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY):
if left_item is not right_item:
return False
return True
def get_unique_discriminator_alias(all_aliases: Collection[str], discriminator_key: str) -> str:
"""Validate that all aliases are the same and if that's the case return the alias"""
unique_aliases = set(all_aliases)
if len(unique_aliases) > 1:
raise ConfigError(
f'Aliases for discriminator {discriminator_key!r} must be the same (got {", ".join(sorted(all_aliases))})'
)
return unique_aliases.pop()
def get_discriminator_alias_and_values(tp: Any, discriminator_key: str) -> Tuple[str, Tuple[str, ...]]:
"""
Get alias and all valid values in the `Literal` type of the discriminator field
`tp` can be a `BaseModel` class or directly an `Annotated` `Union` of many.
"""
is_root_model = getattr(tp, '__custom_root_type__', False)
if get_origin(tp) is Annotated:
tp = get_args(tp)[0]
if hasattr(tp, '__pydantic_model__'):
tp = tp.__pydantic_model__
if is_union(get_origin(tp)):
alias, all_values = _get_union_alias_and_all_values(tp, discriminator_key)
return alias, tuple(v for values in all_values for v in values)
elif is_root_model:
union_type = tp.__fields__[ROOT_KEY].type_
alias, all_values = _get_union_alias_and_all_values(union_type, discriminator_key)
if len(set(all_values)) > 1:
raise ConfigError(
f'Field {discriminator_key!r} is not the same for all submodels of {display_as_type(tp)!r}'
)
return alias, all_values[0]
else:
try:
t_discriminator_type = tp.__fields__[discriminator_key].type_
except AttributeError as e:
raise TypeError(f'Type {tp.__name__!r} is not a valid `BaseModel` or `dataclass`') from e
except KeyError as e:
raise ConfigError(f'Model {tp.__name__!r} needs a discriminator field for key {discriminator_key!r}') from e
if not is_literal_type(t_discriminator_type):
raise ConfigError(f'Field {discriminator_key!r} of model {tp.__name__!r} needs to be a `Literal`')
return tp.__fields__[discriminator_key].alias, all_literal_values(t_discriminator_type)
def _get_union_alias_and_all_values(
union_type: Type[Any], discriminator_key: str
) -> Tuple[str, Tuple[Tuple[str, ...], ...]]:
zipped_aliases_values = [get_discriminator_alias_and_values(t, discriminator_key) for t in get_args(union_type)]
# unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))]
all_aliases, all_values = zip(*zipped_aliases_values)
return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values
KT = TypeVar('KT')
VT = TypeVar('VT')
if TYPE_CHECKING:
# Annoying inheriting from `MutableMapping` and `dict` breaks cython, hence this work around
class LimitedDict(dict, MutableMapping[KT, VT]): # type: ignore[type-arg]
def __init__(self, size_limit: int = 1000):
...
else:
class LimitedDict(dict):
"""
Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage.
Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache.
Annoying inheriting from `MutableMapping` breaks cython.
"""
def __init__(self, size_limit: int = 1000):
self.size_limit = size_limit
super().__init__()
def __setitem__(self, __key: Any, __value: Any) -> None:
super().__setitem__(__key, __value)
if len(self) > self.size_limit:
excess = len(self) - self.size_limit + self.size_limit // 10
to_remove = list(self.keys())[:excess]
for key in to_remove:
del self[key]
def __class_getitem__(cls, *args: Any) -> Any: # pragma: no cover
# just in case LimitedDict is used in type annotations
pass
| 25,662 | Python | 31.48481 | 120 | 0.597187 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/schema.py | import re
import warnings
from collections import defaultdict
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Generic,
Iterable,
List,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from typing_extensions import Annotated, Literal
from .fields import (
MAPPING_LIKE_SHAPES,
SHAPE_DEQUE,
SHAPE_FROZENSET,
SHAPE_GENERIC,
SHAPE_ITERABLE,
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
)
from .json import pydantic_encoder
from .networks import AnyUrl, EmailStr
from .types import (
ConstrainedDecimal,
ConstrainedFloat,
ConstrainedFrozenSet,
ConstrainedInt,
ConstrainedList,
ConstrainedSet,
ConstrainedStr,
SecretBytes,
SecretStr,
conbytes,
condecimal,
confloat,
confrozenset,
conint,
conlist,
conset,
constr,
)
from .typing import (
ForwardRef,
all_literal_values,
get_args,
get_origin,
get_sub_types,
is_callable_type,
is_literal_type,
is_namedtuple,
is_none_type,
is_union,
)
from .utils import ROOT_KEY, get_model, lenient_issubclass, sequence_like
if TYPE_CHECKING:
from .dataclasses import Dataclass
from .main import BaseModel
default_prefix = '#/definitions/'
default_ref_template = '#/definitions/{model}'
TypeModelOrEnum = Union[Type['BaseModel'], Type[Enum]]
TypeModelSet = Set[TypeModelOrEnum]
def _apply_modify_schema(
modify_schema: Callable[..., None], field: Optional[ModelField], field_schema: Dict[str, Any]
) -> None:
from inspect import signature
sig = signature(modify_schema)
args = set(sig.parameters.keys())
if 'field' in args or 'kwargs' in args:
modify_schema(field_schema, field=field)
else:
modify_schema(field_schema)
def schema(
models: Sequence[Union[Type['BaseModel'], Type['Dataclass']]],
*,
by_alias: bool = True,
title: Optional[str] = None,
description: Optional[str] = None,
ref_prefix: Optional[str] = None,
ref_template: str = default_ref_template,
) -> Dict[str, Any]:
"""
Process a list of models and generate a single JSON Schema with all of them defined in the ``definitions``
top-level JSON key, including their sub-models.
:param models: a list of models to include in the generated JSON Schema
:param by_alias: generate the schemas using the aliases defined, if any
:param title: title for the generated schema that includes the definitions
:param description: description for the generated schema
:param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the
default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere
else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the
top-level key ``definitions``, so you can extract them from there. But all the references will have the set
prefix.
:param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful
for references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For
a sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``.
:return: dict with the JSON Schema with a ``definitions`` top-level key including the schema definitions for
the models and sub-models passed in ``models``.
"""
clean_models = [get_model(model) for model in models]
flat_models = get_flat_models_from_models(clean_models)
model_name_map = get_model_name_map(flat_models)
definitions = {}
output_schema: Dict[str, Any] = {}
if title:
output_schema['title'] = title
if description:
output_schema['description'] = description
for model in clean_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
)
definitions.update(m_definitions)
model_name = model_name_map[model]
definitions[model_name] = m_schema
if definitions:
output_schema['definitions'] = definitions
return output_schema
def model_schema(
model: Union[Type['BaseModel'], Type['Dataclass']],
by_alias: bool = True,
ref_prefix: Optional[str] = None,
ref_template: str = default_ref_template,
) -> Dict[str, Any]:
"""
Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level
JSON key.
:param model: a Pydantic model (a class that inherits from BaseModel)
:param by_alias: generate the schemas using the aliases defined, if any
:param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the
default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere
else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the
top-level key ``definitions``, so you can extract them from there. But all the references will have the set
prefix.
:param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for
references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a
sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``.
:return: dict with the JSON Schema for the passed ``model``
"""
model = get_model(model)
flat_models = get_flat_models_from_model(model)
model_name_map = get_model_name_map(flat_models)
model_name = model_name_map[model]
m_schema, m_definitions, nested_models = model_process_schema(
model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, ref_template=ref_template
)
if model_name in nested_models:
# model_name is in Nested models, it has circular references
m_definitions[model_name] = m_schema
m_schema = get_schema_ref(model_name, ref_prefix, ref_template, False)
if m_definitions:
m_schema.update({'definitions': m_definitions})
return m_schema
def get_field_info_schema(field: ModelField, schema_overrides: bool = False) -> Tuple[Dict[str, Any], bool]:
# If no title is explicitly set, we don't set title in the schema for enums.
# The behaviour is the same as `BaseModel` reference, where the default title
# is in the definitions part of the schema.
schema_: Dict[str, Any] = {}
if field.field_info.title or not lenient_issubclass(field.type_, Enum):
schema_['title'] = field.field_info.title or field.alias.title().replace('_', ' ')
if field.field_info.title:
schema_overrides = True
if field.field_info.description:
schema_['description'] = field.field_info.description
schema_overrides = True
if (
not field.required
and not field.field_info.const
and field.default is not None
and not is_callable_type(field.outer_type_)
):
schema_['default'] = encode_default(field.default)
schema_overrides = True
return schema_, schema_overrides
def field_schema(
field: ModelField,
*,
by_alias: bool = True,
model_name_map: Dict[TypeModelOrEnum, str],
ref_prefix: Optional[str] = None,
ref_template: str = default_ref_template,
known_models: TypeModelSet = None,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``ModelField``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for
references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a
sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``.
:param known_models: used to solve circular references
:return: tuple of the schema for this field and additional definitions
"""
s, schema_overrides = get_field_info_schema(field)
validation_schema = get_field_schema_validations(field)
if validation_schema:
s.update(validation_schema)
schema_overrides = True
f_schema, f_definitions, f_nested_models = field_type_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models or set(),
)
# $ref will only be returned when there are no schema_overrides
if '$ref' in f_schema:
return f_schema, f_definitions, f_nested_models
else:
s.update(f_schema)
return s, f_definitions, f_nested_models
numeric_types = (int, float, Decimal)
_str_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (
('max_length', numeric_types, 'maxLength'),
('min_length', numeric_types, 'minLength'),
('regex', str, 'pattern'),
)
_numeric_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (
('gt', numeric_types, 'exclusiveMinimum'),
('lt', numeric_types, 'exclusiveMaximum'),
('ge', numeric_types, 'minimum'),
('le', numeric_types, 'maximum'),
('multiple_of', numeric_types, 'multipleOf'),
)
def get_field_schema_validations(field: ModelField) -> Dict[str, Any]:
"""
Get the JSON Schema validation keywords for a ``field`` with an annotation of
a Pydantic ``FieldInfo`` with validation arguments.
"""
f_schema: Dict[str, Any] = {}
if lenient_issubclass(field.type_, Enum):
# schema is already updated by `enum_process_schema`; just update with field extra
if field.field_info.extra:
f_schema.update(field.field_info.extra)
return f_schema
if lenient_issubclass(field.type_, (str, bytes)):
for attr_name, t, keyword in _str_types_attrs:
attr = getattr(field.field_info, attr_name, None)
if isinstance(attr, t):
f_schema[keyword] = attr
if lenient_issubclass(field.type_, numeric_types) and not issubclass(field.type_, bool):
for attr_name, t, keyword in _numeric_types_attrs:
attr = getattr(field.field_info, attr_name, None)
if isinstance(attr, t):
f_schema[keyword] = attr
if field.field_info is not None and field.field_info.const:
f_schema['const'] = field.default
if field.field_info.extra:
f_schema.update(field.field_info.extra)
modify_schema = getattr(field.outer_type_, '__modify_schema__', None)
if modify_schema:
_apply_modify_schema(modify_schema, field, f_schema)
return f_schema
def get_model_name_map(unique_models: TypeModelSet) -> Dict[TypeModelOrEnum, str]:
"""
Process a set of models and generate unique names for them to be used as keys in the JSON Schema
definitions. By default the names are the same as the class name. But if two models in different Python
modules have the same name (e.g. "users.Model" and "items.Model"), the generated names will be
based on the Python module path for those conflicting models to prevent name collisions.
:param unique_models: a Python set of models
:return: dict mapping models to names
"""
name_model_map = {}
conflicting_names: Set[str] = set()
for model in unique_models:
model_name = normalize_name(model.__name__)
if model_name in conflicting_names:
model_name = get_long_model_name(model)
name_model_map[model_name] = model
elif model_name in name_model_map:
conflicting_names.add(model_name)
conflicting_model = name_model_map.pop(model_name)
name_model_map[get_long_model_name(conflicting_model)] = conflicting_model
name_model_map[get_long_model_name(model)] = model
else:
name_model_map[model_name] = model
return {v: k for k, v in name_model_map.items()}
def get_flat_models_from_model(model: Type['BaseModel'], known_models: TypeModelSet = None) -> TypeModelSet:
"""
Take a single ``model`` and generate a set with itself and all the sub-models in the tree. I.e. if you pass
model ``Foo`` (subclass of Pydantic ``BaseModel``) as ``model``, and it has a field of type ``Bar`` (also
subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also subclass of ``BaseModel``),
the return value will be ``set([Foo, Bar, Baz])``.
:param model: a Pydantic ``BaseModel`` subclass
:param known_models: used to solve circular references
:return: a set with the initial model and all its sub-models
"""
known_models = known_models or set()
flat_models: TypeModelSet = set()
flat_models.add(model)
known_models |= flat_models
fields = cast(Sequence[ModelField], model.__fields__.values())
flat_models |= get_flat_models_from_fields(fields, known_models=known_models)
return flat_models
def get_flat_models_from_field(field: ModelField, known_models: TypeModelSet) -> TypeModelSet:
"""
Take a single Pydantic ``ModelField`` (from a model) that could have been declared as a sublcass of BaseModel
(so, it could be a submodel), and generate a set with its model and all the sub-models in the tree.
I.e. if you pass a field that was declared to be of type ``Foo`` (subclass of BaseModel) as ``field``, and that
model ``Foo`` has a field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of
type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.
:param field: a Pydantic ``ModelField``
:param known_models: used to solve circular references
:return: a set with the model used in the declaration for this field, if any, and all its sub-models
"""
from .dataclasses import dataclass, is_builtin_dataclass
from .main import BaseModel
flat_models: TypeModelSet = set()
# Handle dataclass-based models
if is_builtin_dataclass(field.type_):
field.type_ = dataclass(field.type_)
was_dataclass = True
else:
was_dataclass = False
field_type = field.type_
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):
field_type = field_type.__pydantic_model__
if field.sub_fields and (not lenient_issubclass(field_type, BaseModel) or was_dataclass):
flat_models |= get_flat_models_from_fields(field.sub_fields, known_models=known_models)
elif lenient_issubclass(field_type, BaseModel) and field_type not in known_models:
flat_models |= get_flat_models_from_model(field_type, known_models=known_models)
elif lenient_issubclass(field_type, Enum):
flat_models.add(field_type)
return flat_models
def get_flat_models_from_fields(fields: Sequence[ModelField], known_models: TypeModelSet) -> TypeModelSet:
"""
Take a list of Pydantic ``ModelField``s (from a model) that could have been declared as subclasses of ``BaseModel``
(so, any of them could be a submodel), and generate a set with their models and all the sub-models in the tree.
I.e. if you pass a the fields of a model ``Foo`` (subclass of ``BaseModel``) as ``fields``, and on of them has a
field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also
subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.
:param fields: a list of Pydantic ``ModelField``s
:param known_models: used to solve circular references
:return: a set with any model declared in the fields, and all their sub-models
"""
flat_models: TypeModelSet = set()
for field in fields:
flat_models |= get_flat_models_from_field(field, known_models=known_models)
return flat_models
def get_flat_models_from_models(models: Sequence[Type['BaseModel']]) -> TypeModelSet:
"""
Take a list of ``models`` and generate a set with them and all their sub-models in their trees. I.e. if you pass
a list of two models, ``Foo`` and ``Bar``, both subclasses of Pydantic ``BaseModel`` as models, and ``Bar`` has
a field of type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.
"""
flat_models: TypeModelSet = set()
for model in models:
flat_models |= get_flat_models_from_model(model)
return flat_models
def get_long_model_name(model: TypeModelOrEnum) -> str:
return f'{model.__module__}__{model.__qualname__}'.replace('.', '__')
def field_type_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
from .main import BaseModel # noqa: F811
definitions = {}
nested_models: Set[str] = set()
f_schema: Dict[str, Any]
if field.shape in {
SHAPE_LIST,
SHAPE_TUPLE_ELLIPSIS,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_FROZENSET,
SHAPE_ITERABLE,
SHAPE_DEQUE,
}:
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
f_schema = {'type': 'array', 'items': items_schema}
if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:
f_schema['uniqueItems'] = True
elif field.shape in MAPPING_LIKE_SHAPES:
f_schema = {'type': 'object'}
key_field = cast(ModelField, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if regex:
# Dict keys have a regex pattern
# items_schema might be a schema or empty dict, add it either way
f_schema['patternProperties'] = {regex.pattern: items_schema}
elif items_schema:
# The dict values are not simply Any, so they need a schema
f_schema['additionalProperties'] = items_schema
elif field.shape == SHAPE_TUPLE or (field.shape == SHAPE_GENERIC and not issubclass(field.type_, BaseModel)):
sub_schema = []
sub_fields = cast(List[ModelField], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions, sf_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sf_definitions)
nested_models.update(sf_nested_models)
sub_schema.append(sf_schema)
sub_fields_len = len(sub_fields)
if field.shape == SHAPE_GENERIC:
all_of_schemas = sub_schema[0] if sub_fields_len == 1 else {'type': 'array', 'items': sub_schema}
f_schema = {'allOf': [all_of_schemas]}
else:
f_schema = {
'type': 'array',
'minItems': sub_fields_len,
'maxItems': sub_fields_len,
}
if sub_fields_len >= 1:
f_schema['items'] = sub_schema
else:
assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape
f_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
# check field type to avoid repeated calls to the same __modify_schema__ method
if field.type_ != field.outer_type_:
if field.shape == SHAPE_GENERIC:
field_type = field.type_
else:
field_type = field.outer_type_
modify_schema = getattr(field_type, '__modify_schema__', None)
if modify_schema:
_apply_modify_schema(modify_schema, field, f_schema)
return f_schema, definitions, nested_models
def model_process_schema(
model: TypeModelOrEnum,
*,
by_alias: bool = True,
model_name_map: Dict[TypeModelOrEnum, str],
ref_prefix: Optional[str] = None,
ref_template: str = default_ref_template,
known_models: TypeModelSet = None,
field: Optional[ModelField] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``model_schema()``, you probably should be using that function.
Take a single ``model`` and generate its schema. Also return additional schema definitions, from sub-models. The
sub-models of the returned schema will be referenced, but their definitions will not be included in the schema. All
the definitions are returned as the second value.
"""
from inspect import getdoc, signature
known_models = known_models or set()
if lenient_issubclass(model, Enum):
model = cast(Type[Enum], model)
s = enum_process_schema(model, field=field)
return s, {}, set()
model = cast(Type['BaseModel'], model)
s = {'title': model.__config__.title or model.__name__}
doc = getdoc(model)
if doc:
s['description'] = doc
known_models.add(model)
m_schema, m_definitions, nested_models = model_type_schema(
model,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
s.update(m_schema)
schema_extra = model.__config__.schema_extra
if callable(schema_extra):
if len(signature(schema_extra).parameters) == 1:
schema_extra(s)
else:
schema_extra(s, model)
else:
s.update(schema_extra)
return s, m_definitions, nested_models
def model_type_schema(
model: Type['BaseModel'],
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
You probably should be using ``model_schema()``, this function is indirectly used by that function.
Take a single ``model`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
properties = {}
required = []
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
for k, f in model.__fields__.items():
try:
f_schema, f_definitions, f_nested_models = field_schema(
f,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
except SkipField as skip:
warnings.warn(skip.message, UserWarning)
continue
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if by_alias:
properties[f.alias] = f_schema
if f.required:
required.append(f.alias)
else:
properties[k] = f_schema
if f.required:
required.append(k)
if ROOT_KEY in properties:
out_schema = properties[ROOT_KEY]
out_schema['title'] = model.__config__.title or model.__name__
else:
out_schema = {'type': 'object', 'properties': properties}
if required:
out_schema['required'] = required
if model.__config__.extra == 'forbid':
out_schema['additionalProperties'] = False
return out_schema, definitions, nested_models
def enum_process_schema(enum: Type[Enum], *, field: Optional[ModelField] = None) -> Dict[str, Any]:
"""
Take a single `enum` and generate its schema.
This is similar to the `model_process_schema` function, but applies to ``Enum`` objects.
"""
from inspect import getdoc
schema_: Dict[str, Any] = {
'title': enum.__name__,
# Python assigns all enums a default docstring value of 'An enumeration', so
# all enums will have a description field even if not explicitly provided.
'description': getdoc(enum),
# Add enum values and the enum field type to the schema.
'enum': [item.value for item in cast(Iterable[Enum], enum)],
}
add_field_type_to_schema(enum, schema_)
modify_schema = getattr(enum, '__modify_schema__', None)
if modify_schema:
_apply_modify_schema(modify_schema, field, schema_)
return schema_
def field_singleton_sub_fields_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
This function is indirectly used by ``field_schema()``, you probably should be using that function.
Take a list of Pydantic ``ModelField`` from the declaration of a type with parameters, and generate their
schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``.
"""
sub_fields = cast(List[ModelField], field.sub_fields)
definitions = {}
nested_models: Set[str] = set()
if len(sub_fields) == 1:
return field_type_schema(
sub_fields[0],
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
else:
s: Dict[str, Any] = {}
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#discriminator-object
if field.discriminator_key is not None:
assert field.sub_fields_mapping is not None
discriminator_models_refs: Dict[str, Union[str, Dict[str, Any]]] = {}
for discriminator_value, sub_field in field.sub_fields_mapping.items():
# sub_field is either a `BaseModel` or directly an `Annotated` `Union` of many
if is_union(get_origin(sub_field.type_)):
sub_models = get_sub_types(sub_field.type_)
discriminator_models_refs[discriminator_value] = {
model_name_map[sub_model]: get_schema_ref(
model_name_map[sub_model], ref_prefix, ref_template, False
)
for sub_model in sub_models
}
else:
sub_field_type = sub_field.type_
if hasattr(sub_field_type, '__pydantic_model__'):
sub_field_type = sub_field_type.__pydantic_model__
discriminator_model_name = model_name_map[sub_field_type]
discriminator_model_ref = get_schema_ref(discriminator_model_name, ref_prefix, ref_template, False)
discriminator_models_refs[discriminator_value] = discriminator_model_ref['$ref']
s['discriminator'] = {
'propertyName': field.discriminator_alias,
'mapping': discriminator_models_refs,
}
sub_field_schemas = []
for sf in sub_fields:
sub_schema, sub_definitions, sub_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sub_definitions)
if schema_overrides and 'allOf' in sub_schema:
# if the sub_field is a referenced schema we only need the referenced
# object. Otherwise we will end up with several allOf inside anyOf.
# See https://github.com/samuelcolvin/pydantic/issues/1209
sub_schema = sub_schema['allOf'][0]
if sub_schema.keys() == {'discriminator', 'anyOf'}:
# we don't want discriminator information inside anyOf choices, this is dealt with elsewhere
sub_schema.pop('discriminator')
sub_field_schemas.append(sub_schema)
nested_models.update(sub_nested_models)
s['anyOf'] = sub_field_schemas
return s, definitions, nested_models
# Order is important, e.g. subclasses of str must go before str
# this is used only for standard library types, custom types should use __modify_schema__ instead
field_class_to_schema: Tuple[Tuple[Any, Dict[str, Any]], ...] = (
(Path, {'type': 'string', 'format': 'path'}),
(datetime, {'type': 'string', 'format': 'date-time'}),
(date, {'type': 'string', 'format': 'date'}),
(time, {'type': 'string', 'format': 'time'}),
(timedelta, {'type': 'number', 'format': 'time-delta'}),
(IPv4Network, {'type': 'string', 'format': 'ipv4network'}),
(IPv6Network, {'type': 'string', 'format': 'ipv6network'}),
(IPv4Interface, {'type': 'string', 'format': 'ipv4interface'}),
(IPv6Interface, {'type': 'string', 'format': 'ipv6interface'}),
(IPv4Address, {'type': 'string', 'format': 'ipv4'}),
(IPv6Address, {'type': 'string', 'format': 'ipv6'}),
(Pattern, {'type': 'string', 'format': 'regex'}),
(str, {'type': 'string'}),
(bytes, {'type': 'string', 'format': 'binary'}),
(bool, {'type': 'boolean'}),
(int, {'type': 'integer'}),
(float, {'type': 'number'}),
(Decimal, {'type': 'number'}),
(UUID, {'type': 'string', 'format': 'uuid'}),
(dict, {'type': 'object'}),
(list, {'type': 'array', 'items': {}}),
(tuple, {'type': 'array', 'items': {}}),
(set, {'type': 'array', 'items': {}, 'uniqueItems': True}),
(frozenset, {'type': 'array', 'items': {}, 'uniqueItems': True}),
)
json_scheme = {'type': 'string', 'format': 'json-string'}
def add_field_type_to_schema(field_type: Any, schema_: Dict[str, Any]) -> None:
"""
Update the given `schema` with the type-specific metadata for the given `field_type`.
This function looks through `field_class_to_schema` for a class that matches the given `field_type`,
and then modifies the given `schema` with the information from that type.
"""
for type_, t_schema in field_class_to_schema:
# Fallback for `typing.Pattern` as it is not a valid class
if lenient_issubclass(field_type, type_) or field_type is type_ is Pattern:
schema_.update(t_schema)
break
def get_schema_ref(name: str, ref_prefix: Optional[str], ref_template: str, schema_overrides: bool) -> Dict[str, Any]:
if ref_prefix:
schema_ref = {'$ref': ref_prefix + name}
else:
schema_ref = {'$ref': ref_template.format(model=name)}
return {'allOf': [schema_ref]} if schema_overrides else schema_ref
def field_singleton_schema( # noqa: C901 (ignore complexity)
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
This function is indirectly used by ``field_schema()``, you should probably be using that function.
Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models.
"""
from .main import BaseModel
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
field_type = field.type_
# Recurse into this field if it contains sub_fields and is NOT a
# BaseModel OR that BaseModel is a const
if field.sub_fields and (
(field.field_info and field.field_info.const) or not lenient_issubclass(field_type, BaseModel)
):
return field_singleton_sub_fields_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
if field_type is Any or field_type is object or field_type.__class__ == TypeVar:
return {}, definitions, nested_models # no restrictions
if is_none_type(field_type):
return {'type': 'null'}, definitions, nested_models
if is_callable_type(field_type):
raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')
f_schema: Dict[str, Any] = {}
if field.field_info is not None and field.field_info.const:
f_schema['const'] = field.default
if is_literal_type(field_type):
values = all_literal_values(field_type)
if len({v.__class__ for v in values}) > 1:
return field_schema(
multitypes_literal_field_for_schema(values, field),
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
# All values have the same type
field_type = values[0].__class__
f_schema['enum'] = list(values)
add_field_type_to_schema(field_type, f_schema)
elif lenient_issubclass(field_type, Enum):
enum_name = model_name_map[field_type]
f_schema, schema_overrides = get_field_info_schema(field, schema_overrides)
f_schema.update(get_schema_ref(enum_name, ref_prefix, ref_template, schema_overrides))
definitions[enum_name] = enum_process_schema(field_type, field=field)
elif is_namedtuple(field_type):
sub_schema, *_ = model_process_schema(
field_type.__pydantic_model__,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
field=field,
)
items_schemas = list(sub_schema['properties'].values())
f_schema.update(
{
'type': 'array',
'items': items_schemas,
'minItems': len(items_schemas),
'maxItems': len(items_schemas),
}
)
elif not hasattr(field_type, '__pydantic_model__'):
add_field_type_to_schema(field_type, f_schema)
modify_schema = getattr(field_type, '__modify_schema__', None)
if modify_schema:
_apply_modify_schema(modify_schema, field, f_schema)
if f_schema:
return f_schema, definitions, nested_models
# Handle dataclass-based models
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):
field_type = field_type.__pydantic_model__
if issubclass(field_type, BaseModel):
model_name = model_name_map[field_type]
if field_type not in known_models:
sub_schema, sub_definitions, sub_nested_models = model_process_schema(
field_type,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
field=field,
)
definitions.update(sub_definitions)
definitions[model_name] = sub_schema
nested_models.update(sub_nested_models)
else:
nested_models.add(model_name)
schema_ref = get_schema_ref(model_name, ref_prefix, ref_template, schema_overrides)
return schema_ref, definitions, nested_models
# For generics with no args
args = get_args(field_type)
if args is not None and not args and Generic in field_type.__bases__:
return f_schema, definitions, nested_models
raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
def multitypes_literal_field_for_schema(values: Tuple[Any, ...], field: ModelField) -> ModelField:
"""
To support `Literal` with values of different types, we split it into multiple `Literal` with same type
e.g. `Literal['qwe', 'asd', 1, 2]` becomes `Union[Literal['qwe', 'asd'], Literal[1, 2]]`
"""
literal_distinct_types = defaultdict(list)
for v in values:
literal_distinct_types[v.__class__].append(v)
distinct_literals = (Literal[tuple(same_type_values)] for same_type_values in literal_distinct_types.values())
return ModelField(
name=field.name,
type_=Union[tuple(distinct_literals)], # type: ignore
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
field_info=field.field_info,
)
def encode_default(dft: Any) -> Any:
if isinstance(dft, Enum):
return dft.value
elif isinstance(dft, (int, float, str)):
return dft
elif sequence_like(dft):
t = dft.__class__
seq_args = (encode_default(v) for v in dft)
return t(*seq_args) if is_namedtuple(t) else t(seq_args)
elif isinstance(dft, dict):
return {encode_default(k): encode_default(v) for k, v in dft.items()}
elif dft is None:
return None
else:
return pydantic_encoder(dft)
_map_types_constraint: Dict[Any, Callable[..., type]] = {int: conint, float: confloat, Decimal: condecimal}
def get_annotation_from_field_info(
annotation: Any, field_info: FieldInfo, field_name: str, validate_assignment: bool = False
) -> Type[Any]:
"""
Get an annotation with validation implemented for numbers and strings based on the field_info.
:param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``
:param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema
:param field_name: name of the field for use in error messages
:param validate_assignment: default False, flag for BaseModel Config value of validate_assignment
:return: the same ``annotation`` if unmodified or a new annotation with validation in place
"""
constraints = field_info.get_constraints()
used_constraints: Set[str] = set()
if constraints:
annotation, used_constraints = get_annotation_with_constraints(annotation, field_info)
if validate_assignment:
used_constraints.add('allow_mutation')
unused_constraints = constraints - used_constraints
if unused_constraints:
raise ValueError(
f'On field "{field_name}" the following field constraints are set but not enforced: '
f'{", ".join(unused_constraints)}. '
f'\nFor more details see https://pydantic-docs.helpmanual.io/usage/schema/#unenforced-field-constraints'
)
return annotation
def get_annotation_with_constraints(annotation: Any, field_info: FieldInfo) -> Tuple[Type[Any], Set[str]]: # noqa: C901
"""
Get an annotation with used constraints implemented for numbers and strings based on the field_info.
:param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``
:param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema
:return: the same ``annotation`` if unmodified or a new annotation along with the used constraints.
"""
used_constraints: Set[str] = set()
def go(type_: Any) -> Type[Any]:
if (
is_literal_type(type_)
or isinstance(type_, ForwardRef)
or lenient_issubclass(type_, (ConstrainedList, ConstrainedSet, ConstrainedFrozenSet))
):
return type_
origin = get_origin(type_)
if origin is not None:
args: Tuple[Any, ...] = get_args(type_)
if any(isinstance(a, ForwardRef) for a in args):
# forward refs cause infinite recursion below
return type_
if origin is Annotated:
return go(args[0])
if is_union(origin):
return Union[tuple(go(a) for a in args)] # type: ignore
if issubclass(origin, List) and (
field_info.min_items is not None
or field_info.max_items is not None
or field_info.unique_items is not None
):
used_constraints.update({'min_items', 'max_items', 'unique_items'})
return conlist(
go(args[0]),
min_items=field_info.min_items,
max_items=field_info.max_items,
unique_items=field_info.unique_items,
)
if issubclass(origin, Set) and (field_info.min_items is not None or field_info.max_items is not None):
used_constraints.update({'min_items', 'max_items'})
return conset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)
if issubclass(origin, FrozenSet) and (field_info.min_items is not None or field_info.max_items is not None):
used_constraints.update({'min_items', 'max_items'})
return confrozenset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)
for t in (Tuple, List, Set, FrozenSet, Sequence):
if issubclass(origin, t): # type: ignore
return t[tuple(go(a) for a in args)] # type: ignore
if issubclass(origin, Dict):
return Dict[args[0], go(args[1])] # type: ignore
attrs: Optional[Tuple[str, ...]] = None
constraint_func: Optional[Callable[..., type]] = None
if isinstance(type_, type):
if issubclass(type_, (SecretStr, SecretBytes)):
attrs = ('max_length', 'min_length')
def constraint_func(**kw: Any) -> Type[Any]:
return type(type_.__name__, (type_,), kw)
elif issubclass(type_, str) and not issubclass(type_, (EmailStr, AnyUrl, ConstrainedStr)):
attrs = ('max_length', 'min_length', 'regex')
constraint_func = constr
elif issubclass(type_, bytes):
attrs = ('max_length', 'min_length', 'regex')
constraint_func = conbytes
elif issubclass(type_, numeric_types) and not issubclass(
type_,
(
ConstrainedInt,
ConstrainedFloat,
ConstrainedDecimal,
ConstrainedList,
ConstrainedSet,
ConstrainedFrozenSet,
bool,
),
):
# Is numeric type
attrs = ('gt', 'lt', 'ge', 'le', 'multiple_of')
if issubclass(type_, Decimal):
attrs += ('max_digits', 'decimal_places')
numeric_type = next(t for t in numeric_types if issubclass(type_, t)) # pragma: no branch
constraint_func = _map_types_constraint[numeric_type]
if attrs:
used_constraints.update(set(attrs))
kwargs = {
attr_name: attr
for attr_name, attr in ((attr_name, getattr(field_info, attr_name)) for attr_name in attrs)
if attr is not None
}
if kwargs:
constraint_func = cast(Callable[..., type], constraint_func)
return constraint_func(**kwargs)
return type_
return go(annotation), used_constraints
def normalize_name(name: str) -> str:
"""
Normalizes the given name. This can be applied to either a model *or* enum.
"""
return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name)
class SkipField(Exception):
"""
Utility exception used to exclude fields from schema.
"""
def __init__(self, message: str) -> None:
self.message = message
| 46,835 | Python | 39.691573 | 120 | 0.623166 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/generics.py | import sys
import typing
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Dict,
Generic,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import Annotated
from .class_validators import gather_all_validators
from .fields import DeferredType
from .main import BaseModel, create_model
from .types import JsonWrapper
from .typing import display_as_type, get_all_type_hints, get_args, get_origin, typing_base
from .utils import LimitedDict, all_identical, lenient_issubclass
GenericModelT = TypeVar('GenericModelT', bound='GenericModel')
TypeVarType = Any # since mypy doesn't allow the use of TypeVar as a type
Parametrization = Mapping[TypeVarType, Type[Any]]
_generic_types_cache: 'LimitedDict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]]' = LimitedDict()
# _assigned_parameters is a Mapping from parametrized version of generic models to assigned types of parametrizations
# as captured during construction of the class (not instances).
# E.g., for generic model `Model[A, B]`, when parametrized model `Model[int, str]` is created,
# `Model[int, str]`: {A: int, B: str}` will be stored in `_assigned_parameters`.
# (This information is only otherwise available after creation from the class name string).
_assigned_parameters: 'LimitedDict[Type[Any], Parametrization]' = LimitedDict()
class GenericModel(BaseModel):
__slots__ = ()
__concrete__: ClassVar[bool] = False
if TYPE_CHECKING:
# Putting this in a TYPE_CHECKING block allows us to replace `if Generic not in cls.__bases__` with
# `not hasattr(cls, "__parameters__")`. This means we don't need to force non-concrete subclasses of
# `GenericModel` to also inherit from `Generic`, which would require changes to the use of `create_model` below.
__parameters__: ClassVar[Tuple[TypeVarType, ...]]
# Setting the return type as Type[Any] instead of Type[BaseModel] prevents PyCharm warnings
def __class_getitem__(cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]]) -> Type[Any]:
"""Instantiates a new class from a generic class `cls` and type variables `params`.
:param params: Tuple of types the class . Given a generic class
`Model` with 2 type variables and a concrete model `Model[str, int]`,
the value `(str, int)` would be passed to `params`.
:return: New model class inheriting from `cls` with instantiated
types described by `params`. If no parameters are given, `cls` is
returned as is.
"""
cached = _generic_types_cache.get((cls, params))
if cached is not None:
return cached
if cls.__concrete__ and Generic not in cls.__bases__:
raise TypeError('Cannot parameterize a concrete instantiation of a generic model')
if not isinstance(params, tuple):
params = (params,)
if cls is GenericModel and any(isinstance(param, TypeVar) for param in params):
raise TypeError('Type parameters should be placed on typing.Generic, not GenericModel')
if not hasattr(cls, '__parameters__'):
raise TypeError(f'Type {cls.__name__} must inherit from typing.Generic before being parameterized')
check_parameters_count(cls, params)
# Build map from generic typevars to passed params
typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params))
if all_identical(typevars_map.keys(), typevars_map.values()) and typevars_map:
return cls # if arguments are equal to parameters it's the same object
# Create new model with original model as parent inserting fields with DeferredType.
model_name = cls.__concrete_name__(params)
validators = gather_all_validators(cls)
type_hints = get_all_type_hints(cls).items()
instance_type_hints = {k: v for k, v in type_hints if get_origin(v) is not ClassVar}
fields = {k: (DeferredType(), cls.__fields__[k].field_info) for k in instance_type_hints if k in cls.__fields__}
model_module, called_globally = get_caller_frame_info()
created_model = cast(
Type[GenericModel], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes
create_model(
model_name,
__module__=model_module or cls.__module__,
__base__=(cls,) + tuple(cls.__parameterized_bases__(typevars_map)),
__config__=None,
__validators__=validators,
__cls_kwargs__=None,
**fields,
),
)
_assigned_parameters[created_model] = typevars_map
if called_globally: # create global reference and therefore allow pickling
object_by_reference = None
reference_name = model_name
reference_module_globals = sys.modules[created_model.__module__].__dict__
while object_by_reference is not created_model:
object_by_reference = reference_module_globals.setdefault(reference_name, created_model)
reference_name += '_'
created_model.Config = cls.Config
# Find any typevars that are still present in the model.
# If none are left, the model is fully "concrete", otherwise the new
# class is a generic class as well taking the found typevars as
# parameters.
new_params = tuple(
{param: None for param in iter_contained_typevars(typevars_map.values())}
) # use dict as ordered set
created_model.__concrete__ = not new_params
if new_params:
created_model.__parameters__ = new_params
# Save created model in cache so we don't end up creating duplicate
# models that should be identical.
_generic_types_cache[(cls, params)] = created_model
if len(params) == 1:
_generic_types_cache[(cls, params[0])] = created_model
# Recursively walk class type hints and replace generic typevars
# with concrete types that were passed.
_prepare_model_fields(created_model, fields, instance_type_hints, typevars_map)
return created_model
@classmethod
def __concrete_name__(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str:
"""Compute class name for child classes.
:param params: Tuple of types the class . Given a generic class
`Model` with 2 type variables and a concrete model `Model[str, int]`,
the value `(str, int)` would be passed to `params`.
:return: String representing a the new class where `params` are
passed to `cls` as type variables.
This method can be overridden to achieve a custom naming scheme for GenericModels.
"""
param_names = [display_as_type(param) for param in params]
params_component = ', '.join(param_names)
return f'{cls.__name__}[{params_component}]'
@classmethod
def __parameterized_bases__(cls, typevars_map: Parametrization) -> Iterator[Type[Any]]:
"""
Returns unbound bases of cls parameterised to given type variables
:param typevars_map: Dictionary of type applications for binding subclasses.
Given a generic class `Model` with 2 type variables [S, T]
and a concrete model `Model[str, int]`,
the value `{S: str, T: int}` would be passed to `typevars_map`.
:return: an iterator of generic sub classes, parameterised by `typevars_map`
and other assigned parameters of `cls`
e.g.:
```
class A(GenericModel, Generic[T]):
...
class B(A[V], Generic[V]):
...
assert A[int] in B.__parameterized_bases__({V: int})
```
"""
def build_base_model(
base_model: Type[GenericModel], mapped_types: Parametrization
) -> Iterator[Type[GenericModel]]:
base_parameters = tuple([mapped_types[param] for param in base_model.__parameters__])
parameterized_base = base_model.__class_getitem__(base_parameters)
if parameterized_base is base_model or parameterized_base is cls:
# Avoid duplication in MRO
return
yield parameterized_base
for base_model in cls.__bases__:
if not issubclass(base_model, GenericModel):
# not a class that can be meaningfully parameterized
continue
elif not getattr(base_model, '__parameters__', None):
# base_model is "GenericModel" (and has no __parameters__)
# or
# base_model is already concrete, and will be included transitively via cls.
continue
elif cls in _assigned_parameters:
if base_model in _assigned_parameters:
# cls is partially parameterised but not from base_model
# e.g. cls = B[S], base_model = A[S]
# B[S][int] should subclass A[int], (and will be transitively via B[int])
# but it's not viable to consistently subclass types with arbitrary construction
# So don't attempt to include A[S][int]
continue
else: # base_model not in _assigned_parameters:
# cls is partially parameterized, base_model is original generic
# e.g. cls = B[str, T], base_model = B[S, T]
# Need to determine the mapping for the base_model parameters
mapped_types: Parametrization = {
key: typevars_map.get(value, value) for key, value in _assigned_parameters[cls].items()
}
yield from build_base_model(base_model, mapped_types)
else:
# cls is base generic, so base_class has a distinct base
# can construct the Parameterised base model using typevars_map directly
yield from build_base_model(base_model, typevars_map)
def replace_types(type_: Any, type_map: Mapping[Any, Any]) -> Any:
"""Return type with all occurrences of `type_map` keys recursively replaced with their values.
:param type_: Any type, class or generic alias
:param type_map: Mapping from `TypeVar` instance to concrete types.
:return: New type representing the basic structure of `type_` with all
`typevar_map` keys recursively replaced.
>>> replace_types(Tuple[str, Union[List[str], float]], {str: int})
Tuple[int, Union[List[int], float]]
"""
if not type_map:
return type_
type_args = get_args(type_)
origin_type = get_origin(type_)
if origin_type is Annotated:
annotated_type, *annotations = type_args
return Annotated[replace_types(annotated_type, type_map), tuple(annotations)]
# Having type args is a good indicator that this is a typing module
# class instantiation or a generic alias of some sort.
if type_args:
resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args)
if all_identical(type_args, resolved_type_args):
# If all arguments are the same, there is no need to modify the
# type or create a new object at all
return type_
if (
origin_type is not None
and isinstance(type_, typing_base)
and not isinstance(origin_type, typing_base)
and getattr(type_, '_name', None) is not None
):
# In python < 3.9 generic aliases don't exist so any of these like `list`,
# `type` or `collections.abc.Callable` need to be translated.
# See: https://www.python.org/dev/peps/pep-0585
origin_type = getattr(typing, type_._name)
assert origin_type is not None
return origin_type[resolved_type_args]
# We handle pydantic generic models separately as they don't have the same
# semantics as "typing" classes or generic aliases
if not origin_type and lenient_issubclass(type_, GenericModel) and not type_.__concrete__:
type_args = type_.__parameters__
resolved_type_args = tuple(replace_types(t, type_map) for t in type_args)
if all_identical(type_args, resolved_type_args):
return type_
return type_[resolved_type_args]
# Handle special case for typehints that can have lists as arguments.
# `typing.Callable[[int, str], int]` is an example for this.
if isinstance(type_, (List, list)):
resolved_list = list(replace_types(element, type_map) for element in type_)
if all_identical(type_, resolved_list):
return type_
return resolved_list
# For JsonWrapperValue, need to handle its inner type to allow correct parsing
# of generic Json arguments like Json[T]
if not origin_type and lenient_issubclass(type_, JsonWrapper):
type_.inner_type = replace_types(type_.inner_type, type_map)
return type_
# If all else fails, we try to resolve the type directly and otherwise just
# return the input with no modifications.
return type_map.get(type_, type_)
def check_parameters_count(cls: Type[GenericModel], parameters: Tuple[Any, ...]) -> None:
actual = len(parameters)
expected = len(cls.__parameters__)
if actual != expected:
description = 'many' if actual > expected else 'few'
raise TypeError(f'Too {description} parameters for {cls.__name__}; actual {actual}, expected {expected}')
DictValues: Type[Any] = {}.values().__class__
def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]:
"""Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found."""
if isinstance(v, TypeVar):
yield v
elif hasattr(v, '__parameters__') and not get_origin(v) and lenient_issubclass(v, GenericModel):
yield from v.__parameters__
elif isinstance(v, (DictValues, list)):
for var in v:
yield from iter_contained_typevars(var)
else:
args = get_args(v)
for arg in args:
yield from iter_contained_typevars(arg)
def get_caller_frame_info() -> Tuple[Optional[str], bool]:
"""
Used inside a function to check whether it was called globally
Will only work against non-compiled code, therefore used only in pydantic.generics
:returns Tuple[module_name, called_globally]
"""
try:
previous_caller_frame = sys._getframe(2)
except ValueError as e:
raise RuntimeError('This function must be used inside another function') from e
except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it
return None, False
frame_globals = previous_caller_frame.f_globals
return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals
def _prepare_model_fields(
created_model: Type[GenericModel],
fields: Mapping[str, Any],
instance_type_hints: Mapping[str, type],
typevars_map: Mapping[Any, type],
) -> None:
"""
Replace DeferredType fields with concrete type hints and prepare them.
"""
for key, field in created_model.__fields__.items():
if key not in fields:
assert field.type_.__class__ is not DeferredType
# https://github.com/nedbat/coveragepy/issues/198
continue # pragma: no cover
assert field.type_.__class__ is DeferredType, field.type_.__class__
field_type_hint = instance_type_hints[key]
concrete_type = replace_types(field_type_hint, typevars_map)
field.type_ = concrete_type
field.outer_type_ = concrete_type
field.prepare()
created_model.__annotations__[key] = concrete_type
| 16,001 | Python | 43.32687 | 120 | 0.63246 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/_hypothesis_plugin.py | """
Register Hypothesis strategies for Pydantic custom types.
This enables fully-automatic generation of test data for most Pydantic classes.
Note that this module has *no* runtime impact on Pydantic itself; instead it
is registered as a setuptools entry point and Hypothesis will import it if
Pydantic is installed. See also:
https://hypothesis.readthedocs.io/en/latest/strategies.html#registering-strategies-via-setuptools-entry-points
https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.register_type_strategy
https://hypothesis.readthedocs.io/en/latest/strategies.html#interaction-with-pytest-cov
https://pydantic-docs.helpmanual.io/usage/types/#pydantic-types
Note that because our motivation is to *improve user experience*, the strategies
are always sound (never generate invalid data) but sacrifice completeness for
maintainability (ie may be unable to generate some tricky but valid data).
Finally, this module makes liberal use of `# type: ignore[<code>]` pragmas.
This is because Hypothesis annotates `register_type_strategy()` with
`(T, SearchStrategy[T])`, but in most cases we register e.g. `ConstrainedInt`
to generate instances of the builtin `int` type which match the constraints.
"""
import contextlib
import ipaddress
import json
import math
from fractions import Fraction
from typing import Callable, Dict, Type, Union, cast, overload
import hypothesis.strategies as st
import pydantic
import pydantic.color
import pydantic.types
# FilePath and DirectoryPath are explicitly unsupported, as we'd have to create
# them on-disk, and that's unsafe in general without being told *where* to do so.
#
# URLs are unsupported because it's easy for users to define their own strategy for
# "normal" URLs, and hard for us to define a general strategy which includes "weird"
# URLs but doesn't also have unpredictable performance problems.
#
# conlist() and conset() are unsupported for now, because the workarounds for
# Cython and Hypothesis to handle parametrized generic types are incompatible.
# Once Cython can support 'normal' generics we'll revisit this.
# Emails
try:
import email_validator
except ImportError: # pragma: no cover
pass
else:
def is_valid_email(s: str) -> bool:
# Hypothesis' st.emails() occasionally generates emails like [email protected]
# that are invalid according to email-validator, so we filter those out.
try:
email_validator.validate_email(s, check_deliverability=False)
return True
except email_validator.EmailNotValidError: # pragma: no cover
return False
# Note that these strategies deliberately stay away from any tricky Unicode
# or other encoding issues; we're just trying to generate *something* valid.
st.register_type_strategy(pydantic.EmailStr, st.emails().filter(is_valid_email)) # type: ignore[arg-type]
st.register_type_strategy(
pydantic.NameEmail,
st.builds(
'{} <{}>'.format, # type: ignore[arg-type]
st.from_regex('[A-Za-z0-9_]+( [A-Za-z0-9_]+){0,5}', fullmatch=True),
st.emails().filter(is_valid_email),
),
)
# PyObject - dotted names, in this case taken from the math module.
st.register_type_strategy(
pydantic.PyObject, # type: ignore[arg-type]
st.sampled_from(
[cast(pydantic.PyObject, f'math.{name}') for name in sorted(vars(math)) if not name.startswith('_')]
),
)
# CSS3 Colors; as name, hex, rgb(a) tuples or strings, or hsl strings
_color_regexes = (
'|'.join(
(
pydantic.color.r_hex_short,
pydantic.color.r_hex_long,
pydantic.color.r_rgb,
pydantic.color.r_rgba,
pydantic.color.r_hsl,
pydantic.color.r_hsla,
)
)
# Use more precise regex patterns to avoid value-out-of-range errors
.replace(pydantic.color._r_sl, r'(?:(\d\d?(?:\.\d+)?|100(?:\.0+)?)%)')
.replace(pydantic.color._r_alpha, r'(?:(0(?:\.\d+)?|1(?:\.0+)?|\.\d+|\d{1,2}%))')
.replace(pydantic.color._r_255, r'(?:((?:\d|\d\d|[01]\d\d|2[0-4]\d|25[0-4])(?:\.\d+)?|255(?:\.0+)?))')
)
st.register_type_strategy(
pydantic.color.Color,
st.one_of(
st.sampled_from(sorted(pydantic.color.COLORS_BY_NAME)),
st.tuples(
st.integers(0, 255),
st.integers(0, 255),
st.integers(0, 255),
st.none() | st.floats(0, 1) | st.floats(0, 100).map('{}%'.format),
),
st.from_regex(_color_regexes, fullmatch=True),
),
)
# Card numbers, valid according to the Luhn algorithm
def add_luhn_digit(card_number: str) -> str:
# See https://en.wikipedia.org/wiki/Luhn_algorithm
for digit in '0123456789':
with contextlib.suppress(Exception):
pydantic.PaymentCardNumber.validate_luhn_check_digit(card_number + digit)
return card_number + digit
raise AssertionError('Unreachable') # pragma: no cover
card_patterns = (
# Note that these patterns omit the Luhn check digit; that's added by the function above
'4[0-9]{14}', # Visa
'5[12345][0-9]{13}', # Mastercard
'3[47][0-9]{12}', # American Express
'[0-26-9][0-9]{10,17}', # other (incomplete to avoid overlap)
)
st.register_type_strategy(
pydantic.PaymentCardNumber,
st.from_regex('|'.join(card_patterns), fullmatch=True).map(add_luhn_digit), # type: ignore[arg-type]
)
# UUIDs
st.register_type_strategy(pydantic.UUID1, st.uuids(version=1))
st.register_type_strategy(pydantic.UUID3, st.uuids(version=3))
st.register_type_strategy(pydantic.UUID4, st.uuids(version=4))
st.register_type_strategy(pydantic.UUID5, st.uuids(version=5))
# Secrets
st.register_type_strategy(pydantic.SecretBytes, st.binary().map(pydantic.SecretBytes))
st.register_type_strategy(pydantic.SecretStr, st.text().map(pydantic.SecretStr))
# IP addresses, networks, and interfaces
st.register_type_strategy(pydantic.IPvAnyAddress, st.ip_addresses()) # type: ignore[arg-type]
st.register_type_strategy(
pydantic.IPvAnyInterface,
st.from_type(ipaddress.IPv4Interface) | st.from_type(ipaddress.IPv6Interface), # type: ignore[arg-type]
)
st.register_type_strategy(
pydantic.IPvAnyNetwork,
st.from_type(ipaddress.IPv4Network) | st.from_type(ipaddress.IPv6Network), # type: ignore[arg-type]
)
# We hook into the con***() functions and the ConstrainedNumberMeta metaclass,
# so here we only have to register subclasses for other constrained types which
# don't go via those mechanisms. Then there are the registration hooks below.
st.register_type_strategy(pydantic.StrictBool, st.booleans())
st.register_type_strategy(pydantic.StrictStr, st.text())
# Constrained-type resolver functions
#
# For these ones, we actually want to inspect the type in order to work out a
# satisfying strategy. First up, the machinery for tracking resolver functions:
RESOLVERS: Dict[type, Callable[[type], st.SearchStrategy]] = {} # type: ignore[type-arg]
@overload
def _registered(typ: Type[pydantic.types.T]) -> Type[pydantic.types.T]:
pass
@overload
def _registered(typ: pydantic.types.ConstrainedNumberMeta) -> pydantic.types.ConstrainedNumberMeta:
pass
def _registered(
typ: Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta]
) -> Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta]:
# This function replaces the version in `pydantic.types`, in order to
# effect the registration of new constrained types so that Hypothesis
# can generate valid examples.
pydantic.types._DEFINED_TYPES.add(typ)
for supertype, resolver in RESOLVERS.items():
if issubclass(typ, supertype):
st.register_type_strategy(typ, resolver(typ)) # type: ignore
return typ
raise NotImplementedError(f'Unknown type {typ!r} has no resolver to register') # pragma: no cover
def resolves(
typ: Union[type, pydantic.types.ConstrainedNumberMeta]
) -> Callable[[Callable[..., st.SearchStrategy]], Callable[..., st.SearchStrategy]]: # type: ignore[type-arg]
def inner(f): # type: ignore
assert f not in RESOLVERS
RESOLVERS[typ] = f
return f
return inner
# Type-to-strategy resolver functions
@resolves(pydantic.JsonWrapper)
def resolve_json(cls): # type: ignore[no-untyped-def]
try:
inner = st.none() if cls.inner_type is None else st.from_type(cls.inner_type)
except Exception: # pragma: no cover
finite = st.floats(allow_infinity=False, allow_nan=False)
inner = st.recursive(
base=st.one_of(st.none(), st.booleans(), st.integers(), finite, st.text()),
extend=lambda x: st.lists(x) | st.dictionaries(st.text(), x), # type: ignore
)
return st.builds(
json.dumps,
inner,
ensure_ascii=st.booleans(),
indent=st.none() | st.integers(0, 16),
sort_keys=st.booleans(),
)
@resolves(pydantic.ConstrainedBytes)
def resolve_conbytes(cls): # type: ignore[no-untyped-def] # pragma: no cover
min_size = cls.min_length or 0
max_size = cls.max_length
if not cls.strip_whitespace:
return st.binary(min_size=min_size, max_size=max_size)
# Fun with regex to ensure we neither start nor end with whitespace
repeats = '{{{},{}}}'.format(
min_size - 2 if min_size > 2 else 0,
max_size - 2 if (max_size or 0) > 2 else '',
)
if min_size >= 2:
pattern = rf'\W.{repeats}\W'
elif min_size == 1:
pattern = rf'\W(.{repeats}\W)?'
else:
assert min_size == 0
pattern = rf'(\W(.{repeats}\W)?)?'
return st.from_regex(pattern.encode(), fullmatch=True)
@resolves(pydantic.ConstrainedDecimal)
def resolve_condecimal(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt
s = st.decimals(min_value, max_value, allow_nan=False, places=cls.decimal_places)
if cls.lt is not None:
s = s.filter(lambda d: d < cls.lt)
if cls.gt is not None:
s = s.filter(lambda d: cls.gt < d)
return s
@resolves(pydantic.ConstrainedFloat)
def resolve_confloat(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
exclude_min = False
exclude_max = False
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt
exclude_min = True
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt
exclude_max = True
if cls.multiple_of is None:
return st.floats(min_value, max_value, exclude_min=exclude_min, exclude_max=exclude_max, allow_nan=False)
if min_value is not None:
min_value = math.ceil(min_value / cls.multiple_of)
if exclude_min:
min_value = min_value + 1
if max_value is not None:
assert max_value >= cls.multiple_of, 'Cannot build model with max value smaller than multiple of'
max_value = math.floor(max_value / cls.multiple_of)
if exclude_max:
max_value = max_value - 1
return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of)
@resolves(pydantic.ConstrainedInt)
def resolve_conint(cls): # type: ignore[no-untyped-def]
min_value = cls.ge
max_value = cls.le
if cls.gt is not None:
assert min_value is None, 'Set `gt` or `ge`, but not both'
min_value = cls.gt + 1
if cls.lt is not None:
assert max_value is None, 'Set `lt` or `le`, but not both'
max_value = cls.lt - 1
if cls.multiple_of is None or cls.multiple_of == 1:
return st.integers(min_value, max_value)
# These adjustments and the .map handle integer-valued multiples, while the
# .filter handles trickier cases as for confloat.
if min_value is not None:
min_value = math.ceil(Fraction(min_value) / Fraction(cls.multiple_of))
if max_value is not None:
max_value = math.floor(Fraction(max_value) / Fraction(cls.multiple_of))
return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of)
@resolves(pydantic.ConstrainedStr)
def resolve_constr(cls): # type: ignore[no-untyped-def] # pragma: no cover
min_size = cls.min_length or 0
max_size = cls.max_length
if cls.regex is None and not cls.strip_whitespace:
return st.text(min_size=min_size, max_size=max_size)
if cls.regex is not None:
strategy = st.from_regex(cls.regex)
if cls.strip_whitespace:
strategy = strategy.filter(lambda s: s == s.strip())
elif cls.strip_whitespace:
repeats = '{{{},{}}}'.format(
min_size - 2 if min_size > 2 else 0,
max_size - 2 if (max_size or 0) > 2 else '',
)
if min_size >= 2:
strategy = st.from_regex(rf'\W.{repeats}\W')
elif min_size == 1:
strategy = st.from_regex(rf'\W(.{repeats}\W)?')
else:
assert min_size == 0
strategy = st.from_regex(rf'(\W(.{repeats}\W)?)?')
if min_size == 0 and max_size is None:
return strategy
elif max_size is None:
return strategy.filter(lambda s: min_size <= len(s))
return strategy.filter(lambda s: min_size <= len(s) <= max_size)
# Finally, register all previously-defined types, and patch in our new function
for typ in list(pydantic.types._DEFINED_TYPES):
_registered(typ)
pydantic.types._registered = _registered
st.register_type_strategy(pydantic.Json, resolve_json)
| 13,765 | Python | 36.715068 | 113 | 0.665165 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/types.py | import math
import re
import warnings
from datetime import date
from decimal import Decimal
from enum import Enum
from pathlib import Path
from types import new_class
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
FrozenSet,
List,
Optional,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from uuid import UUID
from weakref import WeakSet
from . import errors
from .datetime_parse import parse_date
from .utils import import_string, update_not_none
from .validators import (
bytes_validator,
constr_length_validator,
constr_lower,
constr_strip_whitespace,
decimal_validator,
float_validator,
frozenset_validator,
int_validator,
list_validator,
number_multiple_validator,
number_size_validator,
path_exists_validator,
path_validator,
set_validator,
str_validator,
strict_bytes_validator,
strict_float_validator,
strict_int_validator,
strict_str_validator,
)
__all__ = [
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedFrozenSet',
'confrozenset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'ByteSize',
'PastDate',
'FutureDate',
]
NoneStr = Optional[str]
NoneBytes = Optional[bytes]
StrBytes = Union[str, bytes]
NoneStrBytes = Optional[StrBytes]
OptionalInt = Optional[int]
OptionalIntFloat = Union[OptionalInt, float]
OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal]
StrIntFloat = Union[str, int, float]
if TYPE_CHECKING:
from .dataclasses import Dataclass
from .main import BaseModel
from .typing import CallableGenerator
ModelOrDc = Type[Union['BaseModel', 'Dataclass']]
T = TypeVar('T')
_DEFINED_TYPES: 'WeakSet[type]' = WeakSet()
@overload
def _registered(typ: Type[T]) -> Type[T]:
pass
@overload
def _registered(typ: 'ConstrainedNumberMeta') -> 'ConstrainedNumberMeta':
pass
def _registered(typ: Union[Type[T], 'ConstrainedNumberMeta']) -> Union[Type[T], 'ConstrainedNumberMeta']:
# In order to generate valid examples of constrained types, Hypothesis needs
# to inspect the type object - so we keep a weakref to each contype object
# until it can be registered. When (or if) our Hypothesis plugin is loaded,
# it monkeypatches this function.
# If Hypothesis is never used, the total effect is to keep a weak reference
# which has minimal memory usage and doesn't even affect garbage collection.
_DEFINED_TYPES.add(typ)
return typ
class ConstrainedNumberMeta(type):
def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore
new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct))
if new_cls.gt is not None and new_cls.ge is not None:
raise errors.ConfigError('bounds gt and ge cannot be specified at the same time')
if new_cls.lt is not None and new_cls.le is not None:
raise errors.ConfigError('bounds lt and le cannot be specified at the same time')
return _registered(new_cls) # type: ignore
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BOOLEAN TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
StrictBool = bool
else:
class StrictBool(int):
"""
StrictBool to allow for bools which are not type-coerced.
"""
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='boolean')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> bool:
"""
Ensure that we only allow bools.
"""
if isinstance(value, bool):
return value
raise errors.StrictBoolError()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTEGER TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ConstrainedInt(int, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalInt = None
ge: OptionalInt = None
lt: OptionalInt = None
le: OptionalInt = None
multiple_of: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_int_validator if cls.strict else int_validator
yield number_size_validator
yield number_multiple_validator
def conint(
*, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None
) -> Type[int]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedIntValue', (ConstrainedInt,), namespace)
if TYPE_CHECKING:
PositiveInt = int
NegativeInt = int
NonPositiveInt = int
NonNegativeInt = int
StrictInt = int
else:
class PositiveInt(ConstrainedInt):
gt = 0
class NegativeInt(ConstrainedInt):
lt = 0
class NonPositiveInt(ConstrainedInt):
le = 0
class NonNegativeInt(ConstrainedInt):
ge = 0
class StrictInt(ConstrainedInt):
strict = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLOAT TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalIntFloat = None
ge: OptionalIntFloat = None
lt: OptionalIntFloat = None
le: OptionalIntFloat = None
multiple_of: OptionalIntFloat = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
# Modify constraints to account for differences between IEEE floats and JSON
if field_schema.get('exclusiveMinimum') == -math.inf:
del field_schema['exclusiveMinimum']
if field_schema.get('minimum') == -math.inf:
del field_schema['minimum']
if field_schema.get('exclusiveMaximum') == math.inf:
del field_schema['exclusiveMaximum']
if field_schema.get('maximum') == math.inf:
del field_schema['maximum']
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_float_validator if cls.strict else float_validator
yield number_size_validator
yield number_multiple_validator
def confloat(
*,
strict: bool = False,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
) -> Type[float]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace)
if TYPE_CHECKING:
PositiveFloat = float
NegativeFloat = float
NonPositiveFloat = float
NonNegativeFloat = float
StrictFloat = float
else:
class PositiveFloat(ConstrainedFloat):
gt = 0
class NegativeFloat(ConstrainedFloat):
lt = 0
class NonPositiveFloat(ConstrainedFloat):
le = 0
class NonNegativeFloat(ConstrainedFloat):
ge = 0
class StrictFloat(ConstrainedFloat):
strict = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTES TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ConstrainedBytes(bytes):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
strict: bool = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_bytes_validator if cls.strict else bytes_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
def conbytes(
*,
strip_whitespace: bool = False,
to_lower: bool = False,
min_length: int = None,
max_length: int = None,
strict: bool = False,
) -> Type[bytes]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
to_lower=to_lower,
min_length=min_length,
max_length=max_length,
strict=strict,
)
return _registered(type('ConstrainedBytesValue', (ConstrainedBytes,), namespace))
if TYPE_CHECKING:
StrictBytes = bytes
else:
class StrictBytes(ConstrainedBytes):
strict = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STRING TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ConstrainedStr(str):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
curtail_length: OptionalInt = None
regex: Optional[Pattern[str]] = None
strict = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
minLength=cls.min_length,
maxLength=cls.max_length,
pattern=cls.regex and cls.regex.pattern,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_str_validator if cls.strict else str_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> Union[str]:
if cls.curtail_length and len(value) > cls.curtail_length:
value = value[: cls.curtail_length]
if cls.regex:
if not cls.regex.match(value):
raise errors.StrRegexError(pattern=cls.regex.pattern)
return value
def constr(
*,
strip_whitespace: bool = False,
to_lower: bool = False,
strict: bool = False,
min_length: int = None,
max_length: int = None,
curtail_length: int = None,
regex: str = None,
) -> Type[str]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
to_lower=to_lower,
strict=strict,
min_length=min_length,
max_length=max_length,
curtail_length=curtail_length,
regex=regex and re.compile(regex),
)
return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace))
if TYPE_CHECKING:
StrictStr = str
else:
class StrictStr(ConstrainedStr):
strict = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This types superclass should be Set[T], but cython chokes on that...
class ConstrainedSet(set): # type: ignore
# Needed for pydantic to detect that this is a set
__origin__ = set
__args__: Set[Type[T]] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.set_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]':
if v is None:
return None
v = set_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.SetMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.SetMaxLengthError(limit_value=cls.max_items)
return v
def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace))
# This types superclass should be FrozenSet[T], but cython chokes on that...
class ConstrainedFrozenSet(frozenset): # type: ignore
# Needed for pydantic to detect that this is a set
__origin__ = frozenset
__args__: FrozenSet[Type[T]] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.frozenset_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def frozenset_length_validator(cls, v: 'Optional[FrozenSet[T]]') -> 'Optional[FrozenSet[T]]':
if v is None:
return None
v = frozenset_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.FrozenSetMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.FrozenSetMaxLengthError(limit_value=cls.max_items)
return v
def confrozenset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[FrozenSet[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedFrozenSetValue', (ConstrainedFrozenSet,), {}, lambda ns: ns.update(namespace))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LIST TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This types superclass should be List[T], but cython chokes on that...
class ConstrainedList(list): # type: ignore
# Needed for pydantic to detect that this is a list
__origin__ = list
__args__: Tuple[Type[T], ...] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
unique_items: Optional[bool] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.list_length_validator
if cls.unique_items:
yield cls.unique_items_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items, uniqueItems=cls.unique_items)
@classmethod
def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]':
if v is None:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
@classmethod
def unique_items_validator(cls, v: 'List[T]') -> 'List[T]':
for i, value in enumerate(v, start=1):
if value in v[i:]:
raise errors.ListUniqueItemsError()
return v
def conlist(
item_type: Type[T], *, min_items: int = None, max_items: int = None, unique_items: bool = None
) -> Type[List[T]]:
# __args__ is needed to conform to typing generics api
namespace = dict(
min_items=min_items, max_items=max_items, unique_items=unique_items, item_type=item_type, __args__=(item_type,)
)
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PYOBJECT TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
PyObject = Callable[..., Any]
else:
class PyObject:
validate_always = True
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> Any:
if isinstance(value, Callable):
return value
try:
value = str_validator(value)
except errors.StrError:
raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable')
try:
return import_string(value)
except ImportError as e:
raise errors.PyObjectError(error_message=str(e))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECIMAL TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta):
gt: OptionalIntFloatDecimal = None
ge: OptionalIntFloatDecimal = None
lt: OptionalIntFloatDecimal = None
le: OptionalIntFloatDecimal = None
max_digits: OptionalInt = None
decimal_places: OptionalInt = None
multiple_of: OptionalIntFloatDecimal = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield decimal_validator
yield number_size_validator
yield number_multiple_validator
yield cls.validate
@classmethod
def validate(cls, value: Decimal) -> Decimal:
digit_tuple, exponent = value.as_tuple()[1:]
if exponent in {'F', 'n', 'N'}:
raise errors.DecimalIsNotFiniteError()
if exponent >= 0:
# A positive exponent adds that many trailing zeros.
digits = len(digit_tuple) + exponent
decimals = 0
else:
# If the absolute value of the negative exponent is larger than the
# number of digits, then it's the same as the number of digits,
# because it'll consume all of the digits in digit_tuple and then
# add abs(exponent) - len(digit_tuple) leading zeros after the
# decimal point.
if abs(exponent) > len(digit_tuple):
digits = decimals = abs(exponent)
else:
digits = len(digit_tuple)
decimals = abs(exponent)
whole_digits = digits - decimals
if cls.max_digits is not None and digits > cls.max_digits:
raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits)
if cls.decimal_places is not None and decimals > cls.decimal_places:
raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places)
if cls.max_digits is not None and cls.decimal_places is not None:
expected = cls.max_digits - cls.decimal_places
if whole_digits > expected:
raise errors.DecimalWholeDigitsError(whole_digits=expected)
return value
def condecimal(
*,
gt: Decimal = None,
ge: Decimal = None,
lt: Decimal = None,
le: Decimal = None,
max_digits: int = None,
decimal_places: int = None,
multiple_of: Decimal = None,
) -> Type[Decimal]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of
)
return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ UUID TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
UUID1 = UUID
UUID3 = UUID
UUID4 = UUID
UUID5 = UUID
else:
class UUID1(UUID):
_required_version = 1
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format=f'uuid{cls._required_version}')
class UUID3(UUID1):
_required_version = 3
class UUID4(UUID1):
_required_version = 4
class UUID5(UUID1):
_required_version = 5
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATH TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
FilePath = Path
DirectoryPath = Path
else:
class FilePath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='file-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_file():
raise errors.PathNotAFileError(path=value)
return value
class DirectoryPath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='directory-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_dir():
raise errors.PathNotADirectoryError(path=value)
return value
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class JsonWrapper:
pass
class JsonMeta(type):
def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]:
return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t}))
if TYPE_CHECKING:
Json = str
else:
class Json(metaclass=JsonMeta):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='json-string')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class SecretStr:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretStr':
if isinstance(value, cls):
return value
value = str_validator(value)
return cls(value)
def __init__(self, value: str):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretStr('{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretStr) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> str:
return self._secret_value
class SecretBytes:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretBytes':
if isinstance(value, cls):
return value
value = bytes_validator(value)
return cls(value)
def __init__(self, value: bytes):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretBytes(b'{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretBytes) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> bytes:
return self._secret_value
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PAYMENT CARD TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PaymentCardBrand(str, Enum):
# If you add another card type, please also add it to the
# Hypothesis strategy in `pydantic._hypothesis_plugin`.
amex = 'American Express'
mastercard = 'Mastercard'
visa = 'Visa'
other = 'other'
def __str__(self) -> str:
return self.value
class PaymentCardNumber(str):
"""
Based on: https://en.wikipedia.org/wiki/Payment_card_number
"""
strip_whitespace: ClassVar[bool] = True
min_length: ClassVar[int] = 12
max_length: ClassVar[int] = 19
bin: str
last4: str
brand: PaymentCardBrand
def __init__(self, card_number: str):
self.bin = card_number[:6]
self.last4 = card_number[-4:]
self.brand = self._get_brand(card_number)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield str_validator
yield constr_strip_whitespace
yield constr_length_validator
yield cls.validate_digits
yield cls.validate_luhn_check_digit
yield cls
yield cls.validate_length_for_brand
@property
def masked(self) -> str:
num_masked = len(self) - 10 # len(bin) + len(last4) == 10
return f'{self.bin}{"*" * num_masked}{self.last4}'
@classmethod
def validate_digits(cls, card_number: str) -> str:
if not card_number.isdigit():
raise errors.NotDigitError
return card_number
@classmethod
def validate_luhn_check_digit(cls, card_number: str) -> str:
"""
Based on: https://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum_ = int(card_number[-1])
length = len(card_number)
parity = length % 2
for i in range(length - 1):
digit = int(card_number[i])
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9
sum_ += digit
valid = sum_ % 10 == 0
if not valid:
raise errors.LuhnValidationError
return card_number
@classmethod
def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber':
"""
Validate length based on BIN for major brands:
https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN)
"""
required_length: Union[None, int, str] = None
if card_number.brand in PaymentCardBrand.mastercard:
required_length = 16
valid = len(card_number) == required_length
elif card_number.brand == PaymentCardBrand.visa:
required_length = '13, 16 or 19'
valid = len(card_number) in {13, 16, 19}
elif card_number.brand == PaymentCardBrand.amex:
required_length = 15
valid = len(card_number) == required_length
else:
valid = True
if not valid:
raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length)
return card_number
@staticmethod
def _get_brand(card_number: str) -> PaymentCardBrand:
if card_number[0] == '4':
brand = PaymentCardBrand.visa
elif 51 <= int(card_number[:2]) <= 55:
brand = PaymentCardBrand.mastercard
elif card_number[:2] in {'34', '37'}:
brand = PaymentCardBrand.amex
else:
brand = PaymentCardBrand.other
return brand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTE SIZE TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BYTE_SIZES = {
'b': 1,
'kb': 10**3,
'mb': 10**6,
'gb': 10**9,
'tb': 10**12,
'pb': 10**15,
'eb': 10**18,
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
'eib': 2**60,
}
BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k})
byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE)
class ByteSize(int):
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, v: StrIntFloat) -> 'ByteSize':
try:
return cls(int(v))
except ValueError:
pass
str_match = byte_string_re.match(str(v))
if str_match is None:
raise errors.InvalidByteSize()
scalar, unit = str_match.groups()
if unit is None:
unit = 'b'
try:
unit_mult = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return cls(int(float(scalar) * unit_mult))
def human_readable(self, decimal: bool = False) -> str:
if decimal:
divisor = 1000
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
final_unit = 'EB'
else:
divisor = 1024
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
final_unit = 'EiB'
num = float(self)
for unit in units:
if abs(num) < divisor:
return f'{num:0.1f}{unit}'
num /= divisor
return f'{num:0.1f}{final_unit}'
def to(self, unit: str) -> float:
try:
unit_div = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return self / unit_div
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DATE TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
PastDate = date
FutureDate = date
else:
class PastDate(date):
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield parse_date
yield cls.validate
@classmethod
def validate(cls, value: date) -> date:
if value >= date.today():
raise errors.DateNotInThePastError()
return value
class FutureDate(date):
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield parse_date
yield cls.validate
@classmethod
def validate(cls, value: date) -> date:
if value <= date.today():
raise errors.DateNotInTheFutureError()
return value
| 32,645 | Python | 28.174263 | 119 | 0.590381 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/typing.py | import sys
from os import PathLike
from typing import ( # type: ignore
TYPE_CHECKING,
AbstractSet,
Any,
ClassVar,
Dict,
Generator,
Iterable,
List,
Mapping,
NewType,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
_eval_type,
cast,
get_type_hints,
)
from typing_extensions import Annotated, Literal
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import GenericAlias as TypingGenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
TypingGenericAlias = ()
try:
from types import UnionType as TypesUnionType # type: ignore
except ImportError:
# python < 3.10 does not have UnionType (str | int, byte | bool and so on)
TypesUnionType = ()
if sys.version_info < (3, 7):
if TYPE_CHECKING:
class ForwardRef:
def __init__(self, arg: Any):
pass
def _eval_type(self, globalns: Any, localns: Any) -> Any:
pass
else:
from typing import _ForwardRef as ForwardRef
else:
from typing import ForwardRef
if sys.version_info < (3, 7):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._eval_type(globalns, localns)
elif sys.version_info < (3, 9):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._evaluate(globalns, localns)
else:
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
# Even though it is the right signature for python 3.9, mypy complains with
# `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast...
return cast(Any, type_)._evaluate(globalns, localns, set())
if sys.version_info < (3, 9):
# Ensure we always get all the whole `Annotated` hint, not just the annotated type.
# For 3.6 to 3.8, `get_type_hints` doesn't recognize `typing_extensions.Annotated`,
# so it already returns the full annotation
get_all_type_hints = get_type_hints
else:
def get_all_type_hints(obj: Any, globalns: Any = None, localns: Any = None) -> Any:
return get_type_hints(obj, globalns, localns, include_extras=True)
if sys.version_info < (3, 7):
from typing import Callable as Callable
AnyCallable = Callable[..., Any]
NoArgAnyCallable = Callable[[], Any]
else:
from collections.abc import Callable as Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
NoArgAnyCallable = TypingCallable[[], Any]
# Annotated[...] is implemented by returning an instance of one of these classes, depending on
# python/typing_extensions version.
AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'}
if sys.version_info < (3, 8):
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
if type(t).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm in py3.6
return getattr(t, '__origin__', None)
else:
from typing import get_origin as _typing_get_origin
def get_origin(tp: Type[Any]) -> Optional[Type[Any]]:
"""
We can't directly use `typing.get_origin` since we need a fallback to support
custom generic classes like `ConstrainedList`
It should be useless once https://github.com/cython/cython/issues/3537 is
solved and https://github.com/samuelcolvin/pydantic/pull/1753 is merged.
"""
if type(tp).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm
return _typing_get_origin(tp) or getattr(tp, '__origin__', None)
if sys.version_info < (3, 7): # noqa: C901 (ignore complexity)
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Simplest get_args compatibility layer possible.
The Python 3.6 typing module does not have `_GenericAlias` so
this won't work for everything. In particular this will not
support the `generics` module (we don't support generic models in
python 3.6).
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
return getattr(t, '__args__', ())
elif sys.version_info < (3, 8): # noqa: C901
from typing import _GenericAlias
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Compatibility version of get_args for python 3.7.
Mostly compatible with the python 3.8 `typing` module version
and able to handle almost all use cases.
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
if isinstance(t, _GenericAlias):
res = t.__args__
if t.__origin__ is Callable and res and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return getattr(t, '__args__', ())
else:
from typing import get_args as _typing_get_args
def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""
In python 3.9, `typing.Dict`, `typing.List`, ...
do have an empty `__args__` by default (instead of the generic ~T for example).
In order to still support `Dict` for example and consider it as `Dict[Any, Any]`,
we retrieve the `_nparams` value that tells us how many parameters it needs.
"""
if hasattr(tp, '_nparams'):
return (Any,) * tp._nparams
return ()
def get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if type(tp).__name__ in AnnotatedTypeNames:
return tp.__args__ + tp.__metadata__
# the fallback is needed for the same reasons as `get_origin` (see above)
return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp)
if sys.version_info < (3, 9):
def convert_generics(tp: Type[Any]) -> Type[Any]:
"""Python 3.9 and older only supports generics from `typing` module.
They convert strings to ForwardRef automatically.
Examples::
typing.List['Hero'] == typing.List[ForwardRef('Hero')]
"""
return tp
else:
from typing import _UnionGenericAlias # type: ignore
from typing_extensions import _AnnotatedAlias
def convert_generics(tp: Type[Any]) -> Type[Any]:
"""
Recursively searches for `str` type hints and replaces them with ForwardRef.
Examples::
convert_generics(list['Hero']) == list[ForwardRef('Hero')]
convert_generics(dict['Hero', 'Team']) == dict[ForwardRef('Hero'), ForwardRef('Team')]
convert_generics(typing.Dict['Hero', 'Team']) == typing.Dict[ForwardRef('Hero'), ForwardRef('Team')]
convert_generics(list[str | 'Hero'] | int) == list[str | ForwardRef('Hero')] | int
"""
origin = get_origin(tp)
if not origin or not hasattr(tp, '__args__'):
return tp
args = get_args(tp)
# typing.Annotated needs special treatment
if origin is Annotated:
return _AnnotatedAlias(convert_generics(args[0]), args[1:])
# recursively replace `str` instances inside of `GenericAlias` with `ForwardRef(arg)`
converted = tuple(
ForwardRef(arg) if isinstance(arg, str) and isinstance(tp, TypingGenericAlias) else convert_generics(arg)
for arg in args
)
if converted == args:
return tp
elif isinstance(tp, TypingGenericAlias):
return TypingGenericAlias(origin, converted)
elif isinstance(tp, TypesUnionType):
# recreate types.UnionType (PEP604, Python >= 3.10)
return _UnionGenericAlias(origin, converted)
else:
try:
setattr(tp, '__args__', converted)
except AttributeError:
pass
return tp
if sys.version_info < (3, 10):
def is_union(tp: Optional[Type[Any]]) -> bool:
return tp is Union
WithArgsTypes = (TypingGenericAlias,)
else:
import types
import typing
def is_union(tp: Optional[Type[Any]]) -> bool:
return tp is Union or tp is types.UnionType # noqa: E721
WithArgsTypes = (typing._GenericAlias, types.GenericAlias, types.UnionType)
if sys.version_info < (3, 9):
StrPath = Union[str, PathLike]
else:
StrPath = Union[str, PathLike]
# TODO: Once we switch to Cython 3 to handle generics properly
# (https://github.com/cython/cython/issues/2753), use following lines instead
# of the one above
# # os.PathLike only becomes subscriptable from Python 3.9 onwards
# StrPath = Union[str, PathLike[str]]
if TYPE_CHECKING:
from .fields import ModelField
TupleGenerator = Generator[Tuple[str, Any], None, None]
DictStrAny = Dict[str, Any]
DictAny = Dict[Any, Any]
SetStr = Set[str]
ListStr = List[str]
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
DictIntStrAny = Dict[IntStr, Any]
MappingIntStrAny = Mapping[IntStr, Any]
CallableGenerator = Generator[AnyCallable, None, None]
ReprArgs = Sequence[Tuple[Optional[str], Any]]
AnyClassMethod = classmethod[Any]
__all__ = (
'ForwardRef',
'Callable',
'AnyCallable',
'NoArgAnyCallable',
'NoneType',
'is_none_type',
'display_as_type',
'resolve_annotations',
'is_callable_type',
'is_literal_type',
'all_literal_values',
'is_namedtuple',
'is_typeddict',
'is_new_type',
'new_type_supertype',
'is_classvar',
'update_field_forward_refs',
'update_model_forward_refs',
'TupleGenerator',
'DictStrAny',
'DictAny',
'SetStr',
'ListStr',
'IntStr',
'AbstractSetIntStr',
'DictIntStrAny',
'CallableGenerator',
'ReprArgs',
'AnyClassMethod',
'CallableGenerator',
'WithArgsTypes',
'get_args',
'get_origin',
'get_sub_types',
'typing_base',
'get_all_type_hints',
'is_union',
'StrPath',
)
NoneType = None.__class__
NONE_TYPES: Tuple[Any, Any, Any] = (None, NoneType, Literal[None])
if sys.version_info < (3, 8):
# Even though this implementation is slower, we need it for python 3.6/3.7:
# In python 3.6/3.7 "Literal" is not a builtin type and uses a different
# mechanism.
# for this reason `Literal[None] is Literal[None]` evaluates to `False`,
# breaking the faster implementation used for the other python versions.
def is_none_type(type_: Any) -> bool:
return type_ in NONE_TYPES
elif sys.version_info[:2] == (3, 8):
# We can use the fast implementation for 3.8 but there is a very weird bug
# where it can fail for `Literal[None]`.
# We just need to redefine a useless `Literal[None]` inside the function body to fix this
def is_none_type(type_: Any) -> bool:
Literal[None] # fix edge case
for none_type in NONE_TYPES:
if type_ is none_type:
return True
return False
else:
def is_none_type(type_: Any) -> bool:
for none_type in NONE_TYPES:
if type_ is none_type:
return True
return False
def display_as_type(v: Type[Any]) -> str:
if not isinstance(v, typing_base) and not isinstance(v, WithArgsTypes) and not isinstance(v, type):
v = v.__class__
if is_union(get_origin(v)):
return f'Union[{", ".join(map(display_as_type, get_args(v)))}]'
if isinstance(v, WithArgsTypes):
# Generic alias are constructs like `list[int]`
return str(v).replace('typing.', '')
try:
return v.__name__
except AttributeError:
# happens with typing objects
return str(v).replace('typing.', '')
def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
base_globals: Optional[Dict[str, Any]] = None
if module_name:
try:
module = sys.modules[module_name]
except KeyError:
# happens occasionally, see https://github.com/samuelcolvin/pydantic/issues/2363
pass
else:
base_globals = module.__dict__
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
if (3, 10) > sys.version_info >= (3, 9, 8) or sys.version_info >= (3, 10, 1):
value = ForwardRef(value, is_argument=False, is_class=True)
elif sys.version_info >= (3, 7):
value = ForwardRef(value, is_argument=False)
else:
value = ForwardRef(value)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: Type[Any]) -> bool:
return type_ is Callable or get_origin(type_) is Callable
if sys.version_info >= (3, 7):
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and get_origin(type_) is Literal
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return get_args(type_)
else:
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and hasattr(type_, '__values__') and type_ == Literal[type_.__values__]
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return type_.__values__
def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
"""
This method is used to retrieve all Literal values as
Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586)
e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]`
"""
if not is_literal_type(type_):
return (type_,)
values = literal_values(type_)
return tuple(x for value in values for x in all_literal_values(value))
def is_namedtuple(type_: Type[Any]) -> bool:
"""
Check if a given class is a named tuple.
It can be either a `typing.NamedTuple` or `collections.namedtuple`
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields')
def is_typeddict(type_: Type[Any]) -> bool:
"""
Check if a given class is a typed dict (from `typing` or `typing_extensions`)
In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict)
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, dict) and hasattr(type_, '__total__')
test_type = NewType('test_type', str)
def is_new_type(type_: Type[Any]) -> bool:
"""
Check whether type_ was created using typing.NewType
"""
return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore
def new_type_supertype(type_: Type[Any]) -> Type[Any]:
while hasattr(type_, '__supertype__'):
type_ = type_.__supertype__
return type_
def _check_classvar(v: Optional[Type[Any]]) -> bool:
if v is None:
return False
return v.__class__ == ClassVar.__class__ and (sys.version_info < (3, 7) or getattr(v, '_name', None) == 'ClassVar')
def is_classvar(ann_type: Type[Any]) -> bool:
if _check_classvar(ann_type) or _check_classvar(get_origin(ann_type)):
return True
# this is an ugly workaround for class vars that contain forward references and are therefore themselves
# forward references, see #3679
if ann_type.__class__ == ForwardRef and ann_type.__forward_arg__.startswith('ClassVar['):
return True
return False
def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this ModelField, globalns and localns.
"""
if field.type_.__class__ == ForwardRef:
field.type_ = evaluate_forwardref(field.type_, globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns)
if field.discriminator_key is not None:
field.prepare_discriminated_union_sub_fields()
def update_model_forward_refs(
model: Type[Any],
fields: Iterable['ModelField'],
json_encoders: Dict[Union[Type[Any], str], AnyCallable],
localns: 'DictStrAny',
exc_to_suppress: Tuple[Type[BaseException], ...] = (),
) -> None:
"""
Try to update model fields ForwardRefs based on model and localns.
"""
if model.__module__ in sys.modules:
globalns = sys.modules[model.__module__].__dict__.copy()
else:
globalns = {}
globalns.setdefault(model.__name__, model)
for f in fields:
try:
update_field_forward_refs(f, globalns=globalns, localns=localns)
except exc_to_suppress:
pass
for key in set(json_encoders.keys()):
if isinstance(key, str):
fr: ForwardRef = ForwardRef(key)
elif isinstance(key, ForwardRef):
fr = key
else:
continue
try:
new_key = evaluate_forwardref(fr, globalns, localns or None)
except exc_to_suppress: # pragma: no cover
continue
json_encoders[new_key] = json_encoders.pop(key)
def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]:
"""
Tries to get the class of a Type[T] annotation. Returns True if Type is used
without brackets. Otherwise returns None.
"""
try:
origin = get_origin(type_)
if origin is None: # Python 3.6
origin = type_
if issubclass(origin, Type): # type: ignore
if not get_args(type_) or not isinstance(get_args(type_)[0], type):
return True
return get_args(type_)[0]
except (AttributeError, TypeError):
pass
return None
def get_sub_types(tp: Any) -> List[Any]:
"""
Return all the types that are allowed by type `tp`
`tp` can be a `Union` of allowed types or an `Annotated` type
"""
origin = get_origin(tp)
if origin is Annotated:
return get_sub_types(get_args(tp)[0])
elif is_union(origin):
return [x for t in get_args(tp) for x in get_sub_types(t)]
else:
return [tp]
| 19,171 | Python | 30.900166 | 119 | 0.615513 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/annotated_types.py | from typing import TYPE_CHECKING, Any, Dict, FrozenSet, NamedTuple, Type
from .fields import Required
from .main import BaseModel, create_model
if TYPE_CHECKING:
from typing_extensions import TypedDict
def create_model_from_typeddict(
# Mypy bug: `Type[TypedDict]` is resolved as `Any` https://github.com/python/mypy/issues/11030
typeddict_cls: Type['TypedDict'], # type: ignore[valid-type]
**kwargs: Any,
) -> Type['BaseModel']:
"""
Create a `BaseModel` based on the fields of a `TypedDict`.
Since `typing.TypedDict` in Python 3.8 does not store runtime information about optional keys,
we raise an error if this happens (see https://bugs.python.org/issue38834).
"""
field_definitions: Dict[str, Any]
# Best case scenario: with python 3.9+ or when `TypedDict` is imported from `typing_extensions`
if not hasattr(typeddict_cls, '__required_keys__'):
raise TypeError(
'You should use `typing_extensions.TypedDict` instead of `typing.TypedDict` with Python < 3.9.2. '
'Without it, there is no way to differentiate required and optional fields when subclassed.'
)
required_keys: FrozenSet[str] = typeddict_cls.__required_keys__ # type: ignore[attr-defined]
field_definitions = {
field_name: (field_type, Required if field_name in required_keys else None)
for field_name, field_type in typeddict_cls.__annotations__.items()
}
return create_model(typeddict_cls.__name__, **kwargs, **field_definitions)
def create_model_from_namedtuple(namedtuple_cls: Type['NamedTuple'], **kwargs: Any) -> Type['BaseModel']:
"""
Create a `BaseModel` based on the fields of a named tuple.
A named tuple can be created with `typing.NamedTuple` and declared annotations
but also with `collections.namedtuple`, in this case we consider all fields
to have type `Any`.
"""
# With python 3.10+, `__annotations__` always exists but can be empty hence the `getattr... or...` logic
namedtuple_annotations: Dict[str, Type[Any]] = getattr(namedtuple_cls, '__annotations__', None) or {
k: Any for k in namedtuple_cls._fields
}
field_definitions: Dict[str, Any] = {
field_name: (field_type, Required) for field_name, field_type in namedtuple_annotations.items()
}
return create_model(namedtuple_cls.__name__, **kwargs, **field_definitions)
| 2,399 | Python | 44.283018 | 110 | 0.685702 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/color.py | """
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the range 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x` or nothing)
* hex long eg. `<prefix>ffffff` (prefix can be `#`, `0x` or nothing)
* `rgb(<r>, <g>, <b>) `
* `rgba(<r>, <g>, <b>, <a>)`
"""
value_lower = value.lower()
try:
r, g, b = COLORS_BY_NAME[value_lower]
except KeyError:
pass
else:
return ints_to_rgba(r, g, b, None)
m = re.fullmatch(r_hex_short, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v * 2, 16) for v in rgb]
if a:
alpha: Optional[float] = int(a * 2, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_hex_long, value_lower)
if m:
*rgb, a = m.groups()
r, g, b = [int(v, 16) for v in rgb]
if a:
alpha = int(a, 16) / 255
else:
alpha = None
return ints_to_rgba(r, g, b, alpha)
m = re.fullmatch(r_rgb, value_lower)
if m:
return ints_to_rgba(*m.groups(), None) # type: ignore
m = re.fullmatch(r_rgba, value_lower)
if m:
return ints_to_rgba(*m.groups()) # type: ignore
m = re.fullmatch(r_hsl, value_lower)
if m:
h, h_units, s, l_ = m.groups()
return parse_hsl(h, h_units, s, l_)
m = re.fullmatch(r_hsla, value_lower)
if m:
h, h_units, s, l_, a = m.groups()
return parse_hsl(h, h_units, s, l_, parse_float_alpha(a))
raise ColorError(reason='string not recognised as a valid color')
def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA:
return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha))
def parse_color_value(value: Union[int, str], max_val: int = 255) -> float:
"""
Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number
in the range 0 to 1
"""
try:
color = float(value)
except ValueError:
raise ColorError(reason='color values must be a valid number')
if 0 <= color <= max_val:
return color / max_val
else:
raise ColorError(reason=f'color values must be in the range 0 to {max_val}')
def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]:
"""
Parse a value checking it's a valid float in the range 0 to 1
"""
if value is None:
return None
try:
if isinstance(value, str) and value.endswith('%'):
alpha = float(value[:-1]) / 100
else:
alpha = float(value)
except ValueError:
raise ColorError(reason='alpha values must be a valid float')
if almost_equal_floats(alpha, 1):
return None
elif 0 <= alpha <= 1:
return alpha
else:
raise ColorError(reason='alpha values must be in the range 0 to 1')
def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: Optional[float] = None) -> RGBA:
"""
Parse raw hue, saturation, lightness and alpha values and convert to RGBA.
"""
s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100)
h_value = float(h)
if h_units in {None, 'deg'}:
h_value = h_value % 360 / 360
elif h_units == 'rad':
h_value = h_value % rads / rads
else:
# turns
h_value = h_value % 1
r, g, b = hls_to_rgb(h_value, l_value, s_value)
return RGBA(r, g, b, alpha)
def float_to_255(c: float) -> int:
return int(round(c * 255))
COLORS_BY_NAME = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()}
| 16,607 | Python | 32.96319 | 120 | 0.543867 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/json.py | import datetime
import re
import sys
from collections import deque
from decimal import Decimal
from enum import Enum
from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network
from pathlib import Path
from types import GeneratorType
from typing import Any, Callable, Dict, Type, Union
from uuid import UUID
if sys.version_info >= (3, 7):
Pattern = re.Pattern
else:
# python 3.6
Pattern = re.compile('a').__class__
from .color import Color
from .networks import NameEmail
from .types import SecretBytes, SecretStr
__all__ = 'pydantic_encoder', 'custom_pydantic_encoder', 'timedelta_isoformat'
def isoformat(o: Union[datetime.date, datetime.time]) -> str:
return o.isoformat()
def decimal_encoder(dec_value: Decimal) -> Union[int, float]:
"""
Encodes a Decimal as int of there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where a integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and parse.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
"""
if dec_value.as_tuple().exponent >= 0:
return int(dec_value)
else:
return float(dec_value)
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
Color: str,
datetime.date: isoformat,
datetime.datetime: isoformat,
datetime.time: isoformat,
datetime.timedelta: lambda td: td.total_seconds(),
Decimal: decimal_encoder,
Enum: lambda o: o.value,
frozenset: list,
deque: list,
GeneratorType: list,
IPv4Address: str,
IPv4Interface: str,
IPv4Network: str,
IPv6Address: str,
IPv6Interface: str,
IPv6Network: str,
NameEmail: str,
Path: str,
Pattern: lambda o: o.pattern,
SecretBytes: str,
SecretStr: str,
set: list,
UUID: str,
}
def pydantic_encoder(obj: Any) -> Any:
from dataclasses import asdict, is_dataclass
from .main import BaseModel
if isinstance(obj, BaseModel):
return obj.dict()
elif is_dataclass(obj):
return asdict(obj)
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = ENCODERS_BY_TYPE[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any:
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = type_encoders[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
return pydantic_encoder(obj)
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'
| 3,418 | Python | 27.491666 | 102 | 0.665301 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/datetime_parse.py | """
Functions to parse datetime objects.
We're using regular expressions rather than time.strptime because:
- They provide both validation and parsing.
- They're more flexible for datetimes.
- The date/datetime/time constructors produce friendlier error messages.
Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
9718fa2e8abe430c3526a9278dd976443d4ae3c6
Changed to:
* use standard python datetime types not django.utils.timezone
* raise ValueError when regex doesn't match rather than returning None
* support parsing unix timestamps for dates and datetimes
"""
import re
from datetime import date, datetime, time, timedelta, timezone
from typing import Dict, Optional, Type, Union
from . import errors
date_expr = r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
time_expr = (
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
date_re = re.compile(f'{date_expr}$')
time_re = re.compile(time_expr)
datetime_re = re.compile(f'{date_expr}[T ]{time_expr}')
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days?, )?)?'
r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>-?\d+):)?'
r'(?P<seconds>-?\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by timedelta
iso8601_duration_re = re.compile(
r'^(?P<sign>[-+]?)'
r'P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
EPOCH = datetime(1970, 1, 1)
# if greater than this, the number is in ms, if less than or equal it's in seconds
# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
MS_WATERSHED = int(2e10)
# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
MAX_NUMBER = int(3e20)
StrBytesIntFloat = Union[str, bytes, int, float]
def get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
if isinstance(value, (int, float)):
return value
try:
return float(value)
except ValueError:
return None
except TypeError:
raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')
def from_unix_seconds(seconds: Union[int, float]) -> datetime:
if seconds > MAX_NUMBER:
return datetime.max
elif seconds < -MAX_NUMBER:
return datetime.min
while abs(seconds) > MS_WATERSHED:
seconds /= 1000
dt = EPOCH + timedelta(seconds=seconds)
return dt.replace(tzinfo=timezone.utc)
def _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:
if value == 'Z':
return timezone.utc
elif value is not None:
offset_mins = int(value[-2:]) if len(value) > 3 else 0
offset = 60 * int(value[1:3]) + offset_mins
if value[0] == '-':
offset = -offset
try:
return timezone(timedelta(minutes=offset))
except ValueError:
raise error()
else:
return None
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
"""
Parse a date/int/float/string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
else:
return value
number = get_numeric(value, 'date')
if number is not None:
return from_unix_seconds(number).date()
if isinstance(value, bytes):
value = value.decode()
match = date_re.match(value) # type: ignore
if match is None:
raise errors.DateError()
kw = {k: int(v) for k, v in match.groupdict().items()}
try:
return date(**kw)
except ValueError:
raise errors.DateError()
def parse_time(value: Union[time, StrBytesIntFloat]) -> time:
"""
Parse a time/string and return a datetime.time.
Raise ValueError if the input is well formatted but not a valid time.
Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
"""
if isinstance(value, time):
return value
number = get_numeric(value, 'time')
if number is not None:
if number >= 86400:
# doesn't make sense since the time time loop back around to 0
raise errors.TimeError()
return (datetime.min + timedelta(seconds=number)).time()
if isinstance(value, bytes):
value = value.decode()
match = time_re.match(value) # type: ignore
if match is None:
raise errors.TimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
try:
return time(**kw_) # type: ignore
except ValueError:
raise errors.TimeError()
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = get_numeric(value, 'datetime')
if number is not None:
return from_unix_seconds(number)
if isinstance(value, bytes):
value = value.decode()
match = datetime_re.match(value) # type: ignore
if match is None:
raise errors.DateTimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
try:
return datetime(**kw_) # type: ignore
except ValueError:
raise errors.DateTimeError()
def parse_duration(value: StrBytesIntFloat) -> timedelta:
"""
Parse a duration int/float/string and return a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
if isinstance(value, timedelta):
return value
if isinstance(value, (int, float)):
# below code requires a string
value = str(value)
elif isinstance(value, bytes):
value = value.decode()
try:
match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
except TypeError:
raise TypeError('invalid type; expected timedelta, string, bytes, int or float')
if not match:
raise errors.DurationError()
kw = match.groupdict()
sign = -1 if kw.pop('sign', '+') == '-' else 1
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
kw['microseconds'] = '-' + kw['microseconds']
kw_ = {k: float(v) for k, v in kw.items() if v is not None}
return sign * timedelta(**kw_)
| 7,714 | Python | 29.983936 | 102 | 0.627042 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pydantic/class_validators.py | import warnings
from collections import ChainMap
from functools import wraps
from itertools import chain
from types import FunctionType
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union, overload
from .errors import ConfigError
from .typing import AnyCallable
from .utils import ROOT_KEY, in_ipython
if TYPE_CHECKING:
from .typing import AnyClassMethod
class Validator:
__slots__ = 'func', 'pre', 'each_item', 'always', 'check_fields', 'skip_on_failure'
def __init__(
self,
func: AnyCallable,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = False,
skip_on_failure: bool = False,
):
self.func = func
self.pre = pre
self.each_item = each_item
self.always = always
self.check_fields = check_fields
self.skip_on_failure = skip_on_failure
if TYPE_CHECKING:
from inspect import Signature
from .config import BaseConfig
from .fields import ModelField
from .types import ModelOrDc
ValidatorCallable = Callable[[Optional[ModelOrDc], Any, Dict[str, Any], ModelField, Type[BaseConfig]], Any]
ValidatorsList = List[ValidatorCallable]
ValidatorListDict = Dict[str, List[Validator]]
_FUNCS: Set[str] = set()
VALIDATOR_CONFIG_KEY = '__validator_config__'
ROOT_VALIDATOR_CONFIG_KEY = '__root_validator_config__'
def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = True,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], 'AnyClassMethod']:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the
whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
:param allow_reuse: whether to track and raise an error if another validator refers to the decorated function
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. " # noqa: Q000
"E.g. usage should be `@validator('<field_name>', ...)`"
)
if whole is not None:
warnings.warn(
'The "whole" keyword argument is deprecated, use "each_item" (inverse meaning, default False) instead',
DeprecationWarning,
)
assert each_item is False, '"each_item" and "whole" conflict, remove "whole"'
each_item = not whole
def dec(f: AnyCallable) -> 'AnyClassMethod':
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls,
VALIDATOR_CONFIG_KEY,
(
fields,
Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields),
),
)
return f_cls
return dec
@overload
def root_validator(_func: AnyCallable) -> 'AnyClassMethod':
...
@overload
def root_validator(
*, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False
) -> Callable[[AnyCallable], 'AnyClassMethod']:
...
def root_validator(
_func: Optional[AnyCallable] = None, *, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False
) -> Union['AnyClassMethod', Callable[[AnyCallable], 'AnyClassMethod']]:
"""
Decorate methods on a model indicating that they should be used to validate (and perhaps modify) data either
before or after standard model parsing/validation is performed.
"""
if _func:
f_cls = _prepare_validator(_func, allow_reuse)
setattr(
f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure)
)
return f_cls
def dec(f: AnyCallable) -> 'AnyClassMethod':
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure)
)
return f_cls
return dec
def _prepare_validator(function: AnyCallable, allow_reuse: bool) -> 'AnyClassMethod':
"""
Avoid validators with duplicated names since without this, validators can be overwritten silently
which generally isn't the intended behaviour, don't run in ipython (see #312) or if allow_reuse is False.
"""
f_cls = function if isinstance(function, classmethod) else classmethod(function)
if not in_ipython() and not allow_reuse:
ref = f_cls.__func__.__module__ + '.' + f_cls.__func__.__qualname__
if ref in _FUNCS:
raise ConfigError(f'duplicate validator function "{ref}"; if this is intended, set `allow_reuse=True`')
_FUNCS.add(ref)
return f_cls
class ValidatorGroup:
def __init__(self, validators: 'ValidatorListDict') -> None:
self.validators = validators
self.used_validators = {'*'}
def get_validators(self, name: str) -> Optional[Dict[str, Validator]]:
self.used_validators.add(name)
validators = self.validators.get(name, [])
if name != ROOT_KEY:
validators += self.validators.get('*', [])
if validators:
return {v.func.__name__: v for v in validators}
else:
return None
def check_for_unused(self) -> None:
unused_validators = set(
chain.from_iterable(
(v.func.__name__ for v in self.validators[f] if v.check_fields)
for f in (self.validators.keys() - self.used_validators)
)
)
if unused_validators:
fn = ', '.join(unused_validators)
raise ConfigError(
f"Validators defined with incorrect fields: {fn} " # noqa: Q000
f"(use check_fields=False if you're inheriting from the model and intended this)"
)
def extract_validators(namespace: Dict[str, Any]) -> Dict[str, List[Validator]]:
validators: Dict[str, List[Validator]] = {}
for var_name, value in namespace.items():
validator_config = getattr(value, VALIDATOR_CONFIG_KEY, None)
if validator_config:
fields, v = validator_config
for field in fields:
if field in validators:
validators[field].append(v)
else:
validators[field] = [v]
return validators
def extract_root_validators(namespace: Dict[str, Any]) -> Tuple[List[AnyCallable], List[Tuple[bool, AnyCallable]]]:
from inspect import signature
pre_validators: List[AnyCallable] = []
post_validators: List[Tuple[bool, AnyCallable]] = []
for name, value in namespace.items():
validator_config: Optional[Validator] = getattr(value, ROOT_VALIDATOR_CONFIG_KEY, None)
if validator_config:
sig = signature(validator_config.func)
args = list(sig.parameters.keys())
if args[0] == 'self':
raise ConfigError(
f'Invalid signature for root validator {name}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, values).'
)
if len(args) != 2:
raise ConfigError(f'Invalid signature for root validator {name}: {sig}, should be: (cls, values).')
# check function signature
if validator_config.pre:
pre_validators.append(validator_config.func)
else:
post_validators.append((validator_config.skip_on_failure, validator_config.func))
return pre_validators, post_validators
def inherit_validators(base_validators: 'ValidatorListDict', validators: 'ValidatorListDict') -> 'ValidatorListDict':
for field, field_validators in base_validators.items():
if field not in validators:
validators[field] = []
validators[field] += field_validators
return validators
def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable':
"""
Make a generic function which calls a validator with the right arguments.
Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow,
hence this laborious way of doing things.
It's done like this so validators don't all need **kwargs in their signature, eg. any combination of
the arguments "values", "fields" and/or "config" are permitted.
"""
from inspect import signature
sig = signature(validator)
args = list(sig.parameters.keys())
first_arg = args.pop(0)
if first_arg == 'self':
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, '
f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
elif first_arg == 'cls':
# assume the second argument is value
return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:])))
else:
# assume the first argument was value which has already been removed
return wraps(validator)(_generic_validator_basic(validator, sig, set(args)))
def prep_validators(v_funcs: Iterable[AnyCallable]) -> 'ValidatorsList':
return [make_generic_validator(f) for f in v_funcs if f]
all_kwargs = {'values', 'field', 'config'}
def _generic_validator_cls(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable':
# assume the first argument is value
has_kwargs = False
if 'kwargs' in args:
has_kwargs = True
args -= {'kwargs'}
if not args.issubset(all_kwargs):
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, should be: '
f'(cls, value, values, config, field), "values", "config" and "field" are all optional.'
)
if has_kwargs:
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config)
elif args == set():
return lambda cls, v, values, field, config: validator(cls, v)
elif args == {'values'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values)
elif args == {'field'}:
return lambda cls, v, values, field, config: validator(cls, v, field=field)
elif args == {'config'}:
return lambda cls, v, values, field, config: validator(cls, v, config=config)
elif args == {'values', 'field'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field)
elif args == {'values', 'config'}:
return lambda cls, v, values, field, config: validator(cls, v, values=values, config=config)
elif args == {'field', 'config'}:
return lambda cls, v, values, field, config: validator(cls, v, field=field, config=config)
else:
# args == {'values', 'field', 'config'}
return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config)
def _generic_validator_basic(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable':
has_kwargs = False
if 'kwargs' in args:
has_kwargs = True
args -= {'kwargs'}
if not args.issubset(all_kwargs):
raise ConfigError(
f'Invalid signature for validator {validator}: {sig}, should be: '
f'(value, values, config, field), "values", "config" and "field" are all optional.'
)
if has_kwargs:
return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config)
elif args == set():
return lambda cls, v, values, field, config: validator(v)
elif args == {'values'}:
return lambda cls, v, values, field, config: validator(v, values=values)
elif args == {'field'}:
return lambda cls, v, values, field, config: validator(v, field=field)
elif args == {'config'}:
return lambda cls, v, values, field, config: validator(v, config=config)
elif args == {'values', 'field'}:
return lambda cls, v, values, field, config: validator(v, values=values, field=field)
elif args == {'values', 'config'}:
return lambda cls, v, values, field, config: validator(v, values=values, config=config)
elif args == {'field', 'config'}:
return lambda cls, v, values, field, config: validator(v, field=field, config=config)
else:
# args == {'values', 'field', 'config'}
return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config)
def gather_all_validators(type_: 'ModelOrDc') -> Dict[str, 'AnyClassMethod']:
all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__]) # type: ignore[arg-type,var-annotated]
return {
k: v
for k, v in all_attributes.items()
if hasattr(v, VALIDATOR_CONFIG_KEY) or hasattr(v, ROOT_VALIDATOR_CONFIG_KEY)
}
| 13,555 | Python | 39.106509 | 119 | 0.63401 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_receivebuffer.py | import re
import sys
from typing import List, Optional, Union
__all__ = ["ReceiveBuffer"]
# Operations we want to support:
# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable),
# or wait until there is one
# - read at-most-N bytes
# Goals:
# - on average, do this fast
# - worst case, do this in O(n) where n is the number of bytes processed
# Plan:
# - store bytearray, offset, how far we've searched for a separator token
# - use the how-far-we've-searched data to avoid rescanning
# - while doing a stream of uninterrupted processing, advance offset instead
# of constantly copying
# WARNING:
# - I haven't benchmarked or profiled any of this yet.
#
# Note that starting in Python 3.4, deleting the initial n bytes from a
# bytearray is amortized O(n), thanks to some excellent work by Antoine
# Martin:
#
# https://bugs.python.org/issue19087
#
# This means that if we only supported 3.4+, we could get rid of the code here
# involving self._start and self.compress, because it's doing exactly the same
# thing that bytearray now does internally.
#
# BUT unfortunately, we still support 2.7, and reading short segments out of a
# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually
# delete this code. Yet:
#
# https://pythonclock.org/
#
# (Two things to double-check first though: make sure PyPy also has the
# optimization, and benchmark to make sure it's a win, since we do have a
# slightly clever thing where we delay calling compress() until we've
# processed a whole event, which could in theory be slightly more efficient
# than the internal bytearray support.)
blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE)
class ReceiveBuffer:
def __init__(self) -> None:
self._data = bytearray()
self._next_line_search = 0
self._multiple_lines_search = 0
def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer":
self._data += byteslike
return self
def __bool__(self) -> bool:
return bool(len(self))
def __len__(self) -> int:
return len(self._data)
# for @property unprocessed_data
def __bytes__(self) -> bytes:
return bytes(self._data)
def _extract(self, count: int) -> bytearray:
# extracting an initial slice of the data buffer and return it
out = self._data[:count]
del self._data[:count]
self._next_line_search = 0
self._multiple_lines_search = 0
return out
def maybe_extract_at_most(self, count: int) -> Optional[bytearray]:
"""
Extract a fixed number of bytes from the buffer.
"""
out = self._data[:count]
if not out:
return None
return self._extract(count)
def maybe_extract_next_line(self) -> Optional[bytearray]:
"""
Extract the first line, if it is completed in the buffer.
"""
# Only search in buffer space that we've not already looked at.
search_start_index = max(0, self._next_line_search - 1)
partial_idx = self._data.find(b"\r\n", search_start_index)
if partial_idx == -1:
self._next_line_search = len(self._data)
return None
# + 2 is to compensate len(b"\r\n")
idx = partial_idx + 2
return self._extract(idx)
def maybe_extract_lines(self) -> Optional[List[bytearray]]:
"""
Extract everything up to the first blank line, and return a list of lines.
"""
# Handle the case where we have an immediate empty line.
if self._data[:1] == b"\n":
self._extract(1)
return []
if self._data[:2] == b"\r\n":
self._extract(2)
return []
# Only search in buffer space that we've not already looked at.
match = blank_line_regex.search(self._data, self._multiple_lines_search)
if match is None:
self._multiple_lines_search = max(0, len(self._data) - 2)
return None
# Truncate the buffer and return it.
idx = match.span(0)[-1]
out = self._extract(idx)
lines = out.split(b"\n")
for line in lines:
if line.endswith(b"\r"):
del line[-1]
assert lines[-2] == lines[-1] == b""
del lines[-2:]
return lines
# In theory we should wait until `\r\n` before starting to validate
# incoming data. However it's interesting to detect (very) invalid data
# early given they might not even contain `\r\n` at all (hence only
# timeout will get rid of them).
# This is not a 100% effective detection but more of a cheap sanity check
# allowing for early abort in some useful cases.
# This is especially interesting when peer is messing up with HTTPS and
# sent us a TLS stream where we were expecting plain HTTP given all
# versions of TLS so far start handshake with a 0x16 message type code.
def is_next_line_obviously_invalid_request_line(self) -> bool:
try:
# HTTP header line must not contain non-printable characters
# and should not start with a space
return self._data[0] < 0x21
except IndexError:
return False
| 5,252 | Python | 33.110389 | 82 | 0.628332 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_version.py | # This file must be kept very simple, because it is consumed from several
# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
# We use a simple scheme:
# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
# where the +dev versions are never released into the wild, they're just what
# we stick into the VCS in between releases.
#
# This is compatible with PEP 440:
# http://legacy.python.org/dev/peps/pep-0440/
# via the use of the "local suffix" "+dev", which is disallowed on index
# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
# 1.0.0.)
__version__ = "0.14.0"
| 686 | Python | 39.411762 | 77 | 0.686589 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_headers.py | import re
from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union
from ._abnf import field_name, field_value
from ._util import bytesify, LocalProtocolError, validate
if TYPE_CHECKING:
from ._events import Request
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
# Facts
# -----
#
# Headers are:
# keys: case-insensitive ascii
# values: mixture of ascii and raw bytes
#
# "Historically, HTTP has allowed field content with text in the ISO-8859-1
# charset [ISO-8859-1], supporting other charsets only through use of
# [RFC2047] encoding. In practice, most HTTP header field values use only a
# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD
# limit their field values to US-ASCII octets. A recipient SHOULD treat other
# octets in field content (obs-text) as opaque data."
# And it deprecates all non-ascii values
#
# Leading/trailing whitespace in header names is forbidden
#
# Values get leading/trailing whitespace stripped
#
# Content-Disposition actually needs to contain unicode semantically; to
# accomplish this it has a terrifically weird way of encoding the filename
# itself as ascii (and even this still has lots of cross-browser
# incompatibilities)
#
# Order is important:
# "a proxy MUST NOT change the order of these field values when forwarding a
# message"
# (and there are several headers where the order indicates a preference)
#
# Multiple occurences of the same header:
# "A sender MUST NOT generate multiple header fields with the same field name
# in a message unless either the entire field value for that header field is
# defined as a comma-separated list [or the header is Set-Cookie which gets a
# special exception]" - RFC 7230. (cookies are in RFC 6265)
#
# So every header aside from Set-Cookie can be merged by b", ".join if it
# occurs repeatedly. But, of course, they can't necessarily be split by
# .split(b","), because quoting.
#
# Given all this mess (case insensitive, duplicates allowed, order is
# important, ...), there doesn't appear to be any standard way to handle
# headers in Python -- they're almost like dicts, but... actually just
# aren't. For now we punt and just use a super simple representation: headers
# are a list of pairs
#
# [(name1, value1), (name2, value2), ...]
#
# where all entries are bytestrings, names are lowercase and have no
# leading/trailing whitespace, and values are bytestrings with no
# leading/trailing whitespace. Searching and updating are done via naive O(n)
# methods.
#
# Maybe a dict-of-lists would be better?
_content_length_re = re.compile(rb"[0-9]+")
_field_name_re = re.compile(field_name.encode("ascii"))
_field_value_re = re.compile(field_value.encode("ascii"))
class Headers(Sequence[Tuple[bytes, bytes]]):
"""
A list-like interface that allows iterating over headers as byte-pairs
of (lowercased-name, value).
Internally we actually store the representation as three-tuples,
including both the raw original casing, in order to preserve casing
over-the-wire, and the lowercased name, for case-insensitive comparisions.
r = Request(
method="GET",
target="/",
headers=[("Host", "example.org"), ("Connection", "keep-alive")],
http_version="1.1",
)
assert r.headers == [
(b"host", b"example.org"),
(b"connection", b"keep-alive")
]
assert r.headers.raw_items() == [
(b"Host", b"example.org"),
(b"Connection", b"keep-alive")
]
"""
__slots__ = "_full_items"
def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
self._full_items = full_items
def __bool__(self) -> bool:
return bool(self._full_items)
def __eq__(self, other: object) -> bool:
return list(self) == list(other) # type: ignore
def __len__(self) -> int:
return len(self._full_items)
def __repr__(self) -> str:
return "<Headers(%s)>" % repr(list(self))
def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
_, name, value = self._full_items[idx]
return (name, value)
def raw_items(self) -> List[Tuple[bytes, bytes]]:
return [(raw_name, value) for raw_name, _, value in self._full_items]
HeaderTypes = Union[
List[Tuple[bytes, bytes]],
List[Tuple[bytes, str]],
List[Tuple[str, bytes]],
List[Tuple[str, str]],
]
@overload
def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
...
@overload
def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers:
...
@overload
def normalize_and_validate(
headers: Union[Headers, HeaderTypes], _parsed: bool = False
) -> Headers:
...
def normalize_and_validate(
headers: Union[Headers, HeaderTypes], _parsed: bool = False
) -> Headers:
new_headers = []
seen_content_length = None
saw_transfer_encoding = False
for name, value in headers:
# For headers coming out of the parser, we can safely skip some steps,
# because it always returns bytes and has already run these regexes
# over the data:
if not _parsed:
name = bytesify(name)
value = bytesify(value)
validate(_field_name_re, name, "Illegal header name {!r}", name)
validate(_field_value_re, value, "Illegal header value {!r}", value)
assert isinstance(name, bytes)
assert isinstance(value, bytes)
raw_name = name
name = name.lower()
if name == b"content-length":
lengths = {length.strip() for length in value.split(b",")}
if len(lengths) != 1:
raise LocalProtocolError("conflicting Content-Length headers")
value = lengths.pop()
validate(_content_length_re, value, "bad Content-Length")
if seen_content_length is None:
seen_content_length = value
new_headers.append((raw_name, name, value))
elif seen_content_length != value:
raise LocalProtocolError("conflicting Content-Length headers")
elif name == b"transfer-encoding":
# "A server that receives a request message with a transfer coding
# it does not understand SHOULD respond with 501 (Not
# Implemented)."
# https://tools.ietf.org/html/rfc7230#section-3.3.1
if saw_transfer_encoding:
raise LocalProtocolError(
"multiple Transfer-Encoding headers", error_status_hint=501
)
# "All transfer-coding names are case-insensitive"
# -- https://tools.ietf.org/html/rfc7230#section-4
value = value.lower()
if value != b"chunked":
raise LocalProtocolError(
"Only Transfer-Encoding: chunked is supported",
error_status_hint=501,
)
saw_transfer_encoding = True
new_headers.append((raw_name, name, value))
else:
new_headers.append((raw_name, name, value))
return Headers(new_headers)
def get_comma_header(headers: Headers, name: bytes) -> List[bytes]:
# Should only be used for headers whose value is a list of
# comma-separated, case-insensitive values.
#
# The header name `name` is expected to be lower-case bytes.
#
# Connection: meets these criteria (including cast insensitivity).
#
# Content-Length: technically is just a single value (1*DIGIT), but the
# standard makes reference to implementations that do multiple values, and
# using this doesn't hurt. Ditto, case insensitivity doesn't things either
# way.
#
# Transfer-Encoding: is more complex (allows for quoted strings), so
# splitting on , is actually wrong. For example, this is legal:
#
# Transfer-Encoding: foo; options="1,2", chunked
#
# and should be parsed as
#
# foo; options="1,2"
# chunked
#
# but this naive function will parse it as
#
# foo; options="1
# 2"
# chunked
#
# However, this is okay because the only thing we are going to do with
# any Transfer-Encoding is reject ones that aren't just "chunked", so
# both of these will be treated the same anyway.
#
# Expect: the only legal value is the literal string
# "100-continue". Splitting on commas is harmless. Case insensitive.
#
out: List[bytes] = []
for _, found_name, found_raw_value in headers._full_items:
if found_name == name:
found_raw_value = found_raw_value.lower()
for found_split_value in found_raw_value.split(b","):
found_split_value = found_split_value.strip()
if found_split_value:
out.append(found_split_value)
return out
def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers:
# The header name `name` is expected to be lower-case bytes.
#
# Note that when we store the header we use title casing for the header
# names, in order to match the conventional HTTP header style.
#
# Simply calling `.title()` is a blunt approach, but it's correct
# here given the cases where we're using `set_comma_header`...
#
# Connection, Content-Length, Transfer-Encoding.
new_headers: List[Tuple[bytes, bytes]] = []
for found_raw_name, found_name, found_raw_value in headers._full_items:
if found_name != name:
new_headers.append((found_raw_name, found_raw_value))
for new_value in new_values:
new_headers.append((name.title(), new_value))
return normalize_and_validate(new_headers)
def has_expect_100_continue(request: "Request") -> bool:
# https://tools.ietf.org/html/rfc7231#section-5.1.1
# "A server that receives a 100-continue expectation in an HTTP/1.0 request
# MUST ignore that expectation."
if request.http_version < b"1.1":
return False
expect = get_comma_header(request.headers, b"expect")
return b"100-continue" in expect
| 10,230 | Python | 35.670251 | 88 | 0.648485 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/__init__.py | # A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230),
# containing no networking code at all, loosely modelled on hyper-h2's generic
# implementation of HTTP/2 (and in particular the h2.connection.H2Connection
# class). There's still a bunch of subtle details you need to get right if you
# want to make this actually useful, because it doesn't implement all the
# semantics to check that what you're asking to write to the wire is sensible,
# but at least it gets you out of dealing with the wire itself.
from h11._connection import Connection, NEED_DATA, PAUSED
from h11._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from h11._state import (
CLIENT,
CLOSED,
DONE,
ERROR,
IDLE,
MIGHT_SWITCH_PROTOCOL,
MUST_CLOSE,
SEND_BODY,
SEND_RESPONSE,
SERVER,
SWITCHED_PROTOCOL,
)
from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError
from h11._version import __version__
PRODUCT_ID = "python-h11/" + __version__
__all__ = (
"Connection",
"NEED_DATA",
"PAUSED",
"ConnectionClosed",
"Data",
"EndOfMessage",
"Event",
"InformationalResponse",
"Request",
"Response",
"CLIENT",
"CLOSED",
"DONE",
"ERROR",
"IDLE",
"MUST_CLOSE",
"SEND_BODY",
"SEND_RESPONSE",
"SERVER",
"SWITCHED_PROTOCOL",
"ProtocolError",
"LocalProtocolError",
"RemoteProtocolError",
)
| 1,507 | Python | 22.936508 | 78 | 0.667551 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_readers.py | # Code to read HTTP data
#
# Strategy: each reader is a callable which takes a ReceiveBuffer object, and
# either:
# 1) consumes some of it and returns an Event
# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate()
# and it might raise a LocalProtocolError, so simpler just to always use
# this)
# 3) returns None, meaning "I need more data"
#
# If they have a .read_eof attribute, then this will be called if an EOF is
# received -- but this is optional. Either way, the actual ConnectionClosed
# event will be generated afterwards.
#
# READERS is a dict describing how to pick a reader. It maps states to either:
# - a reader
# - or, for body readers, a dict of per-framing reader factories
import re
from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union
from ._abnf import chunk_header, header_field, request_line, status_line
from ._events import Data, EndOfMessage, InformationalResponse, Request, Response
from ._receivebuffer import ReceiveBuffer
from ._state import (
CLIENT,
CLOSED,
DONE,
IDLE,
MUST_CLOSE,
SEND_BODY,
SEND_RESPONSE,
SERVER,
)
from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate
__all__ = ["READERS"]
header_field_re = re.compile(header_field.encode("ascii"))
obs_fold_re = re.compile(rb"[ \t]+")
def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]:
it = iter(lines)
last: Optional[bytes] = None
for line in it:
match = obs_fold_re.match(line)
if match:
if last is None:
raise LocalProtocolError("continuation line at start of headers")
if not isinstance(last, bytearray):
# Cast to a mutable type, avoiding copy on append to ensure O(n) time
last = bytearray(last)
last += b" "
last += line[match.end() :]
else:
if last is not None:
yield last
last = line
if last is not None:
yield last
def _decode_header_lines(
lines: Iterable[bytes],
) -> Iterable[Tuple[bytes, bytes]]:
for line in _obsolete_line_fold(lines):
matches = validate(header_field_re, line, "illegal header line: {!r}", line)
yield (matches["field_name"], matches["field_value"])
request_line_re = re.compile(request_line.encode("ascii"))
def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]:
lines = buf.maybe_extract_lines()
if lines is None:
if buf.is_next_line_obviously_invalid_request_line():
raise LocalProtocolError("illegal request line")
return None
if not lines:
raise LocalProtocolError("no request line received")
matches = validate(
request_line_re, lines[0], "illegal request line: {!r}", lines[0]
)
return Request(
headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches
)
status_line_re = re.compile(status_line.encode("ascii"))
def maybe_read_from_SEND_RESPONSE_server(
buf: ReceiveBuffer,
) -> Union[InformationalResponse, Response, None]:
lines = buf.maybe_extract_lines()
if lines is None:
if buf.is_next_line_obviously_invalid_request_line():
raise LocalProtocolError("illegal request line")
return None
if not lines:
raise LocalProtocolError("no response line received")
matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0])
http_version = (
b"1.1" if matches["http_version"] is None else matches["http_version"]
)
reason = b"" if matches["reason"] is None else matches["reason"]
status_code = int(matches["status_code"])
class_: Union[Type[InformationalResponse], Type[Response]] = (
InformationalResponse if status_code < 200 else Response
)
return class_(
headers=list(_decode_header_lines(lines[1:])),
_parsed=True,
status_code=status_code,
reason=reason,
http_version=http_version,
)
class ContentLengthReader:
def __init__(self, length: int) -> None:
self._length = length
self._remaining = length
def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:
if self._remaining == 0:
return EndOfMessage()
data = buf.maybe_extract_at_most(self._remaining)
if data is None:
return None
self._remaining -= len(data)
return Data(data=data)
def read_eof(self) -> NoReturn:
raise RemoteProtocolError(
"peer closed connection without sending complete message body "
"(received {} bytes, expected {})".format(
self._length - self._remaining, self._length
)
)
chunk_header_re = re.compile(chunk_header.encode("ascii"))
class ChunkedReader:
def __init__(self) -> None:
self._bytes_in_chunk = 0
# After reading a chunk, we have to throw away the trailing \r\n; if
# this is >0 then we discard that many bytes before resuming regular
# de-chunkification.
self._bytes_to_discard = 0
self._reading_trailer = False
def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:
if self._reading_trailer:
lines = buf.maybe_extract_lines()
if lines is None:
return None
return EndOfMessage(headers=list(_decode_header_lines(lines)))
if self._bytes_to_discard > 0:
data = buf.maybe_extract_at_most(self._bytes_to_discard)
if data is None:
return None
self._bytes_to_discard -= len(data)
if self._bytes_to_discard > 0:
return None
# else, fall through and read some more
assert self._bytes_to_discard == 0
if self._bytes_in_chunk == 0:
# We need to refill our chunk count
chunk_header = buf.maybe_extract_next_line()
if chunk_header is None:
return None
matches = validate(
chunk_header_re,
chunk_header,
"illegal chunk header: {!r}",
chunk_header,
)
# XX FIXME: we discard chunk extensions. Does anyone care?
self._bytes_in_chunk = int(matches["chunk_size"], base=16)
if self._bytes_in_chunk == 0:
self._reading_trailer = True
return self(buf)
chunk_start = True
else:
chunk_start = False
assert self._bytes_in_chunk > 0
data = buf.maybe_extract_at_most(self._bytes_in_chunk)
if data is None:
return None
self._bytes_in_chunk -= len(data)
if self._bytes_in_chunk == 0:
self._bytes_to_discard = 2
chunk_end = True
else:
chunk_end = False
return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end)
def read_eof(self) -> NoReturn:
raise RemoteProtocolError(
"peer closed connection without sending complete message body "
"(incomplete chunked read)"
)
class Http10Reader:
def __call__(self, buf: ReceiveBuffer) -> Optional[Data]:
data = buf.maybe_extract_at_most(999999999)
if data is None:
return None
return Data(data=data)
def read_eof(self) -> EndOfMessage:
return EndOfMessage()
def expect_nothing(buf: ReceiveBuffer) -> None:
if buf:
raise LocalProtocolError("Got data when expecting EOF")
return None
ReadersType = Dict[
Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]],
Union[Callable[..., Any], Dict[str, Callable[..., Any]]],
]
READERS: ReadersType = {
(CLIENT, IDLE): maybe_read_from_IDLE_client,
(SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server,
(SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server,
(CLIENT, DONE): expect_nothing,
(CLIENT, MUST_CLOSE): expect_nothing,
(CLIENT, CLOSED): expect_nothing,
(SERVER, DONE): expect_nothing,
(SERVER, MUST_CLOSE): expect_nothing,
(SERVER, CLOSED): expect_nothing,
SEND_BODY: {
"chunked": ChunkedReader,
"content-length": ContentLengthReader,
"http/1.0": Http10Reader,
},
}
| 8,383 | Python | 32.806451 | 88 | 0.613384 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_events.py | # High level events that make up HTTP/1.1 conversations. Loosely inspired by
# the corresponding events in hyper-h2:
#
# http://python-hyper.org/h2/en/stable/api.html#events
#
# Don't subclass these. Stuff will break.
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = [
"Event",
"Request",
"InformationalResponse",
"Response",
"Data",
"EndOfMessage",
"ConnectionClosed",
]
method_re = re.compile(method.encode("ascii"))
request_target_re = re.compile(request_target.encode("ascii"))
class Event(ABC):
"""
Base class for h11 events.
"""
__slots__ = ()
@dataclass(init=False, frozen=True)
class Request(Event):
"""The beginning of an HTTP request.
Fields:
.. attribute:: method
An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte
string. :term:`Bytes-like objects <bytes-like object>` and native
strings containing only ascii characters will be automatically
converted to byte strings.
.. attribute:: target
The target of an HTTP request, e.g. ``b"/index.html"``, or one of the
more exotic formats described in `RFC 7320, section 5.3
<https://tools.ietf.org/html/rfc7230#section-5.3>`_. Always a byte
string. :term:`Bytes-like objects <bytes-like object>` and native
strings containing only ascii characters will be automatically
converted to byte strings.
.. attribute:: headers
Request headers, represented as a list of (name, value) pairs. See
:ref:`the header normalization rules <headers-format>` for details.
.. attribute:: http_version
The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
"""
__slots__ = ("method", "headers", "target", "http_version")
method: bytes
headers: Headers
target: bytes
http_version: bytes
def __init__(
self,
*,
method: Union[bytes, str],
headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
target: Union[bytes, str],
http_version: Union[bytes, str] = b"1.1",
_parsed: bool = False,
) -> None:
super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
self, "headers", normalize_and_validate(headers, _parsed=_parsed)
)
if not _parsed:
object.__setattr__(self, "method", bytesify(method))
object.__setattr__(self, "target", bytesify(target))
object.__setattr__(self, "http_version", bytesify(http_version))
else:
object.__setattr__(self, "method", method)
object.__setattr__(self, "target", target)
object.__setattr__(self, "http_version", http_version)
# "A server MUST respond with a 400 (Bad Request) status code to any
# HTTP/1.1 request message that lacks a Host header field and to any
# request message that contains more than one Host header field or a
# Host header field with an invalid field-value."
# -- https://tools.ietf.org/html/rfc7230#section-5.4
host_count = 0
for name, value in self.headers:
if name == b"host":
host_count += 1
if self.http_version == b"1.1" and host_count == 0:
raise LocalProtocolError("Missing mandatory Host: header")
if host_count > 1:
raise LocalProtocolError("Found multiple Host: headers")
validate(method_re, self.method, "Illegal method characters")
validate(request_target_re, self.target, "Illegal target characters")
# This is an unhashable type.
__hash__ = None # type: ignore
@dataclass(init=False, frozen=True)
class _ResponseBase(Event):
__slots__ = ("headers", "http_version", "reason", "status_code")
headers: Headers
http_version: bytes
reason: bytes
status_code: int
def __init__(
self,
*,
headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
status_code: int,
http_version: Union[bytes, str] = b"1.1",
reason: Union[bytes, str] = b"",
_parsed: bool = False,
) -> None:
super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
self, "headers", normalize_and_validate(headers, _parsed=_parsed)
)
if not _parsed:
object.__setattr__(self, "reason", bytesify(reason))
object.__setattr__(self, "http_version", bytesify(http_version))
if not isinstance(status_code, int):
raise LocalProtocolError("status code must be integer")
# Because IntEnum objects are instances of int, but aren't
# duck-compatible (sigh), see gh-72.
object.__setattr__(self, "status_code", int(status_code))
else:
object.__setattr__(self, "reason", reason)
object.__setattr__(self, "http_version", http_version)
object.__setattr__(self, "status_code", status_code)
self.__post_init__()
def __post_init__(self) -> None:
pass
# This is an unhashable type.
__hash__ = None # type: ignore
@dataclass(init=False, frozen=True)
class InformationalResponse(_ResponseBase):
"""An HTTP informational response.
Fields:
.. attribute:: status_code
The status code of this response, as an integer. For an
:class:`InformationalResponse`, this is always in the range [100,
200).
.. attribute:: headers
Request headers, represented as a list of (name, value) pairs. See
:ref:`the header normalization rules <headers-format>` for
details.
.. attribute:: http_version
The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
.. attribute:: reason
The reason phrase of this response, as a byte string. For example:
``b"OK"``, or ``b"Not Found"``.
"""
def __post_init__(self) -> None:
if not (100 <= self.status_code < 200):
raise LocalProtocolError(
"InformationalResponse status_code should be in range "
"[100, 200), not {}".format(self.status_code)
)
# This is an unhashable type.
__hash__ = None # type: ignore
@dataclass(init=False, frozen=True)
class Response(_ResponseBase):
"""The beginning of an HTTP response.
Fields:
.. attribute:: status_code
The status code of this response, as an integer. For an
:class:`Response`, this is always in the range [200,
1000).
.. attribute:: headers
Request headers, represented as a list of (name, value) pairs. See
:ref:`the header normalization rules <headers-format>` for details.
.. attribute:: http_version
The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
.. attribute:: reason
The reason phrase of this response, as a byte string. For example:
``b"OK"``, or ``b"Not Found"``.
"""
def __post_init__(self) -> None:
if not (200 <= self.status_code < 1000):
raise LocalProtocolError(
"Response status_code should be in range [200, 1000), not {}".format(
self.status_code
)
)
# This is an unhashable type.
__hash__ = None # type: ignore
@dataclass(init=False, frozen=True)
class Data(Event):
"""Part of an HTTP message body.
Fields:
.. attribute:: data
A :term:`bytes-like object` containing part of a message body. Or, if
using the ``combine=False`` argument to :meth:`Connection.send`, then
any object that your socket writing code knows what to do with, and for
which calling :func:`len` returns the number of bytes that will be
written -- see :ref:`sendfile` for details.
.. attribute:: chunk_start
A marker that indicates whether this data object is from the start of a
chunked transfer encoding chunk. This field is ignored when when a Data
event is provided to :meth:`Connection.send`: it is only valid on
events emitted from :meth:`Connection.next_event`. You probably
shouldn't use this attribute at all; see
:ref:`chunk-delimiters-are-bad` for details.
.. attribute:: chunk_end
A marker that indicates whether this data object is the last for a
given chunked transfer encoding chunk. This field is ignored when when
a Data event is provided to :meth:`Connection.send`: it is only valid
on events emitted from :meth:`Connection.next_event`. You probably
shouldn't use this attribute at all; see
:ref:`chunk-delimiters-are-bad` for details.
"""
__slots__ = ("data", "chunk_start", "chunk_end")
data: bytes
chunk_start: bool
chunk_end: bool
def __init__(
self, data: bytes, chunk_start: bool = False, chunk_end: bool = False
) -> None:
object.__setattr__(self, "data", data)
object.__setattr__(self, "chunk_start", chunk_start)
object.__setattr__(self, "chunk_end", chunk_end)
# This is an unhashable type.
__hash__ = None # type: ignore
# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that
# are forbidden to be sent in a trailer, since processing them as if they were
# present in the header section might bypass external security filters."
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part
# Unfortunately, the list of forbidden fields is long and vague :-/
@dataclass(init=False, frozen=True)
class EndOfMessage(Event):
"""The end of an HTTP message.
Fields:
.. attribute:: headers
Default value: ``[]``
Any trailing headers attached to this message, represented as a list of
(name, value) pairs. See :ref:`the header normalization rules
<headers-format>` for details.
Must be empty unless ``Transfer-Encoding: chunked`` is in use.
"""
__slots__ = ("headers",)
headers: Headers
def __init__(
self,
*,
headers: Union[
Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None
] = None,
_parsed: bool = False,
) -> None:
super().__init__()
if headers is None:
headers = Headers([])
elif not isinstance(headers, Headers):
headers = normalize_and_validate(headers, _parsed=_parsed)
object.__setattr__(self, "headers", headers)
# This is an unhashable type.
__hash__ = None # type: ignore
@dataclass(frozen=True)
class ConnectionClosed(Event):
"""This event indicates that the sender has closed their outgoing
connection.
Note that this does not necessarily mean that they can't *receive* further
data, because TCP connections are composed to two one-way channels which
can be closed independently. See :ref:`closing` for details.
No fields.
"""
pass
| 11,816 | Python | 30.937838 | 85 | 0.616622 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_state.py | ################################################################
# The core state machine
################################################################
#
# Rule 1: everything that affects the state machine and state transitions must
# live here in this file. As much as possible goes into the table-based
# representation, but for the bits that don't quite fit, the actual code and
# state must nonetheless live here.
#
# Rule 2: this file does not know about what role we're playing; it only knows
# about HTTP request/response cycles in the abstract. This ensures that we
# don't cheat and apply different rules to local and remote parties.
#
#
# Theory of operation
# ===================
#
# Possibly the simplest way to think about this is that we actually have 5
# different state machines here. Yes, 5. These are:
#
# 1) The client state, with its complicated automaton (see the docs)
# 2) The server state, with its complicated automaton (see the docs)
# 3) The keep-alive state, with possible states {True, False}
# 4) The SWITCH_CONNECT state, with possible states {False, True}
# 5) The SWITCH_UPGRADE state, with possible states {False, True}
#
# For (3)-(5), the first state listed is the initial state.
#
# (1)-(3) are stored explicitly in member variables. The last
# two are stored implicitly in the pending_switch_proposals set as:
# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)
# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)
#
# And each of these machines has two different kinds of transitions:
#
# a) Event-triggered
# b) State-triggered
#
# Event triggered is the obvious thing that you'd think it is: some event
# happens, and if it's the right event at the right time then a transition
# happens. But there are somewhat complicated rules for which machines can
# "see" which events. (As a rule of thumb, if a machine "sees" an event, this
# means two things: the event can affect the machine, and if the machine is
# not in a state where it expects that event then it's an error.) These rules
# are:
#
# 1) The client machine sees all h11.events objects emitted by the client.
#
# 2) The server machine sees all h11.events objects emitted by the server.
#
# It also sees the client's Request event.
#
# And sometimes, server events are annotated with a _SWITCH_* event. For
# example, we can have a (Response, _SWITCH_CONNECT) event, which is
# different from a regular Response event.
#
# 3) The keep-alive machine sees the process_keep_alive_disabled() event
# (which is derived from Request/Response events), and this event
# transitions it from True -> False, or from False -> False. There's no way
# to transition back.
#
# 4&5) The _SWITCH_* machines transition from False->True when we get a
# Request that proposes the relevant type of switch (via
# process_client_switch_proposals), and they go from True->False when we
# get a Response that has no _SWITCH_* annotation.
#
# So that's event-triggered transitions.
#
# State-triggered transitions are less standard. What they do here is couple
# the machines together. The way this works is, when certain *joint*
# configurations of states are achieved, then we automatically transition to a
# new *joint* state. So, for example, if we're ever in a joint state with
#
# client: DONE
# keep-alive: False
#
# then the client state immediately transitions to:
#
# client: MUST_CLOSE
#
# This is fundamentally different from an event-based transition, because it
# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state
# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive
# transitioned True -> False. Either way, once this precondition is satisfied,
# this transition is immediately triggered.
#
# What if two conflicting state-based transitions get enabled at the same
# time? In practice there's only one case where this arises (client DONE ->
# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by
# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.
#
# Implementation
# --------------
#
# The event-triggered transitions for the server and client machines are all
# stored explicitly in a table. Ditto for the state-triggered transitions that
# involve just the server and client state.
#
# The transitions for the other machines, and the state-triggered transitions
# that involve the other machines, are written out as explicit Python code.
#
# It'd be nice if there were some cleaner way to do all this. This isn't
# *too* terrible, but I feel like it could probably be better.
#
# WARNING
# -------
#
# The script that generates the state machine diagrams for the docs knows how
# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
# tables. But it can't automatically read the transitions that are written
# directly in Python code. So if you touch those, you need to also update the
# script to keep it in sync!
from typing import cast, Dict, Optional, Set, Tuple, Type, Union
from ._events import *
from ._util import LocalProtocolError, Sentinel
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = [
"CLIENT",
"SERVER",
"IDLE",
"SEND_RESPONSE",
"SEND_BODY",
"DONE",
"MUST_CLOSE",
"CLOSED",
"MIGHT_SWITCH_PROTOCOL",
"SWITCHED_PROTOCOL",
"ERROR",
]
class CLIENT(Sentinel, metaclass=Sentinel):
pass
class SERVER(Sentinel, metaclass=Sentinel):
pass
# States
class IDLE(Sentinel, metaclass=Sentinel):
pass
class SEND_RESPONSE(Sentinel, metaclass=Sentinel):
pass
class SEND_BODY(Sentinel, metaclass=Sentinel):
pass
class DONE(Sentinel, metaclass=Sentinel):
pass
class MUST_CLOSE(Sentinel, metaclass=Sentinel):
pass
class CLOSED(Sentinel, metaclass=Sentinel):
pass
class ERROR(Sentinel, metaclass=Sentinel):
pass
# Switch types
class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):
pass
class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):
pass
class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):
pass
class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):
pass
EventTransitionType = Dict[
Type[Sentinel],
Dict[
Type[Sentinel],
Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]],
],
]
EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = {
CLIENT: {
IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
DONE: {ConnectionClosed: CLOSED},
MUST_CLOSE: {ConnectionClosed: CLOSED},
CLOSED: {ConnectionClosed: CLOSED},
MIGHT_SWITCH_PROTOCOL: {},
SWITCHED_PROTOCOL: {},
ERROR: {},
},
SERVER: {
IDLE: {
ConnectionClosed: CLOSED,
Response: SEND_BODY,
# Special case: server sees client Request events, in this form
(Request, CLIENT): SEND_RESPONSE,
},
SEND_RESPONSE: {
InformationalResponse: SEND_RESPONSE,
Response: SEND_BODY,
(InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,
(Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,
},
SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
DONE: {ConnectionClosed: CLOSED},
MUST_CLOSE: {ConnectionClosed: CLOSED},
CLOSED: {ConnectionClosed: CLOSED},
SWITCHED_PROTOCOL: {},
ERROR: {},
},
}
StateTransitionType = Dict[
Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]]
]
# NB: there are also some special-case state-triggered transitions hard-coded
# into _fire_state_triggered_transitions below.
STATE_TRIGGERED_TRANSITIONS: StateTransitionType = {
# (Client state, Server state) -> new states
# Protocol negotiation
(MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},
# Socket shutdown
(CLOSED, DONE): {SERVER: MUST_CLOSE},
(CLOSED, IDLE): {SERVER: MUST_CLOSE},
(ERROR, DONE): {SERVER: MUST_CLOSE},
(DONE, CLOSED): {CLIENT: MUST_CLOSE},
(IDLE, CLOSED): {CLIENT: MUST_CLOSE},
(DONE, ERROR): {CLIENT: MUST_CLOSE},
}
class ConnectionState:
def __init__(self) -> None:
# Extra bits of state that don't quite fit into the state model.
# If this is False then it enables the automatic DONE -> MUST_CLOSE
# transition. Don't set this directly; call .keep_alive_disabled()
self.keep_alive = True
# This is a subset of {UPGRADE, CONNECT}, containing the proposals
# made by the client for switching protocols.
self.pending_switch_proposals: Set[Type[Sentinel]] = set()
self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}
def process_error(self, role: Type[Sentinel]) -> None:
self.states[role] = ERROR
self._fire_state_triggered_transitions()
def process_keep_alive_disabled(self) -> None:
self.keep_alive = False
self._fire_state_triggered_transitions()
def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:
self.pending_switch_proposals.add(switch_event)
self._fire_state_triggered_transitions()
def process_event(
self,
role: Type[Sentinel],
event_type: Type[Event],
server_switch_event: Optional[Type[Sentinel]] = None,
) -> None:
_event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type
if server_switch_event is not None:
assert role is SERVER
if server_switch_event not in self.pending_switch_proposals:
raise LocalProtocolError(
"Received server {} event without a pending proposal".format(
server_switch_event
)
)
_event_type = (event_type, server_switch_event)
if server_switch_event is None and _event_type is Response:
self.pending_switch_proposals = set()
self._fire_event_triggered_transitions(role, _event_type)
# Special case: the server state does get to see Request
# events.
if _event_type is Request:
assert role is CLIENT
self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))
self._fire_state_triggered_transitions()
def _fire_event_triggered_transitions(
self,
role: Type[Sentinel],
event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],
) -> None:
state = self.states[role]
try:
new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]
except KeyError:
event_type = cast(Type[Event], event_type)
raise LocalProtocolError(
"can't handle event type {} when role={} and state={}".format(
event_type.__name__, role, self.states[role]
)
) from None
self.states[role] = new_state
def _fire_state_triggered_transitions(self) -> None:
# We apply these rules repeatedly until converging on a fixed point
while True:
start_states = dict(self.states)
# It could happen that both these special-case transitions are
# enabled at the same time:
#
# DONE -> MIGHT_SWITCH_PROTOCOL
# DONE -> MUST_CLOSE
#
# For example, this will always be true of a HTTP/1.0 client
# requesting CONNECT. If this happens, the protocol switch takes
# priority. From there the client will either go to
# SWITCHED_PROTOCOL, in which case it's none of our business when
# they close the connection, or else the server will deny the
# request, in which case the client will go back to DONE and then
# from there to MUST_CLOSE.
if self.pending_switch_proposals:
if self.states[CLIENT] is DONE:
self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL
if not self.pending_switch_proposals:
if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:
self.states[CLIENT] = DONE
if not self.keep_alive:
for role in (CLIENT, SERVER):
if self.states[role] is DONE:
self.states[role] = MUST_CLOSE
# Tabular state-triggered transitions
joint_state = (self.states[CLIENT], self.states[SERVER])
changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})
self.states.update(changes)
if self.states == start_states:
# Fixed point reached
return
def start_next_cycle(self) -> None:
if self.states != {CLIENT: DONE, SERVER: DONE}:
raise LocalProtocolError(
"not in a reusable state. self.states={}".format(self.states)
)
# Can't reach DONE/DONE with any of these active, but still, let's be
# sure.
assert self.keep_alive
assert not self.pending_switch_proposals
self.states = {CLIENT: IDLE, SERVER: IDLE}
| 13,300 | Python | 35.144022 | 88 | 0.651203 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_writers.py | # Code to read HTTP data
#
# Strategy: each writer takes an event + a write-some-bytes function, which is
# calls.
#
# WRITERS is a dict describing how to pick a reader. It maps states to either:
# - a writer
# - or, for body writers, a dict of framin-dependent writer factories
from typing import Any, Callable, Dict, List, Tuple, Type, Union
from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response
from ._headers import Headers
from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER
from ._util import LocalProtocolError, Sentinel
__all__ = ["WRITERS"]
Writer = Callable[[bytes], Any]
def write_headers(headers: Headers, write: Writer) -> None:
# "Since the Host field-value is critical information for handling a
# request, a user agent SHOULD generate Host as the first header field
# following the request-line." - RFC 7230
raw_items = headers._full_items
for raw_name, name, value in raw_items:
if name == b"host":
write(b"%s: %s\r\n" % (raw_name, value))
for raw_name, name, value in raw_items:
if name != b"host":
write(b"%s: %s\r\n" % (raw_name, value))
write(b"\r\n")
def write_request(request: Request, write: Writer) -> None:
if request.http_version != b"1.1":
raise LocalProtocolError("I only send HTTP/1.1")
write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target))
write_headers(request.headers, write)
# Shared between InformationalResponse and Response
def write_any_response(
response: Union[InformationalResponse, Response], write: Writer
) -> None:
if response.http_version != b"1.1":
raise LocalProtocolError("I only send HTTP/1.1")
status_bytes = str(response.status_code).encode("ascii")
# We don't bother sending ascii status messages like "OK"; they're
# optional and ignored by the protocol. (But the space after the numeric
# status code is mandatory.)
#
# XX FIXME: could at least make an effort to pull out the status message
# from stdlib's http.HTTPStatus table. Or maybe just steal their enums
# (either by import or copy/paste). We already accept them as status codes
# since they're of type IntEnum < int.
write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason))
write_headers(response.headers, write)
class BodyWriter:
def __call__(self, event: Event, write: Writer) -> None:
if type(event) is Data:
self.send_data(event.data, write)
elif type(event) is EndOfMessage:
self.send_eom(event.headers, write)
else: # pragma: no cover
assert False
def send_data(self, data: bytes, write: Writer) -> None:
pass
def send_eom(self, headers: Headers, write: Writer) -> None:
pass
#
# These are all careful not to do anything to 'data' except call len(data) and
# write(data). This allows us to transparently pass-through funny objects,
# like placeholder objects referring to files on disk that will be sent via
# sendfile(2).
#
class ContentLengthWriter(BodyWriter):
def __init__(self, length: int) -> None:
self._length = length
def send_data(self, data: bytes, write: Writer) -> None:
self._length -= len(data)
if self._length < 0:
raise LocalProtocolError("Too much data for declared Content-Length")
write(data)
def send_eom(self, headers: Headers, write: Writer) -> None:
if self._length != 0:
raise LocalProtocolError("Too little data for declared Content-Length")
if headers:
raise LocalProtocolError("Content-Length and trailers don't mix")
class ChunkedWriter(BodyWriter):
def send_data(self, data: bytes, write: Writer) -> None:
# if we encoded 0-length data in the naive way, it would look like an
# end-of-message.
if not data:
return
write(b"%x\r\n" % len(data))
write(data)
write(b"\r\n")
def send_eom(self, headers: Headers, write: Writer) -> None:
write(b"0\r\n")
write_headers(headers, write)
class Http10Writer(BodyWriter):
def send_data(self, data: bytes, write: Writer) -> None:
write(data)
def send_eom(self, headers: Headers, write: Writer) -> None:
if headers:
raise LocalProtocolError("can't send trailers to HTTP/1.0 client")
# no need to close the socket ourselves, that will be taken care of by
# Connection: close machinery
WritersType = Dict[
Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]],
Union[
Dict[str, Type[BodyWriter]],
Callable[[Union[InformationalResponse, Response], Writer], None],
Callable[[Request, Writer], None],
],
]
WRITERS: WritersType = {
(CLIENT, IDLE): write_request,
(SERVER, IDLE): write_any_response,
(SERVER, SEND_RESPONSE): write_any_response,
SEND_BODY: {
"chunked": ChunkedWriter,
"content-length": ContentLengthWriter,
"http/1.0": Http10Writer,
},
}
| 5,081 | Python | 33.808219 | 88 | 0.652037 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_connection.py | # This contains the main Connection class. Everything in h11 revolves around
# this.
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union
from ._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from ._headers import get_comma_header, has_expect_100_continue, set_comma_header
from ._readers import READERS, ReadersType
from ._receivebuffer import ReceiveBuffer
from ._state import (
_SWITCH_CONNECT,
_SWITCH_UPGRADE,
CLIENT,
ConnectionState,
DONE,
ERROR,
MIGHT_SWITCH_PROTOCOL,
SEND_BODY,
SERVER,
SWITCHED_PROTOCOL,
)
from ._util import ( # Import the internal things we need
LocalProtocolError,
RemoteProtocolError,
Sentinel,
)
from ._writers import WRITERS, WritersType
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = ["Connection", "NEED_DATA", "PAUSED"]
class NEED_DATA(Sentinel, metaclass=Sentinel):
pass
class PAUSED(Sentinel, metaclass=Sentinel):
pass
# If we ever have this much buffered without it making a complete parseable
# event, we error out. The only time we really buffer is when reading the
# request/response line + headers together, so this is effectively the limit on
# the size of that.
#
# Some precedents for defaults:
# - node.js: 80 * 1024
# - tomcat: 8 * 1024
# - IIS: 16 * 1024
# - Apache: <8 KiB per line>
DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024
# RFC 7230's rules for connection lifecycles:
# - If either side says they want to close the connection, then the connection
# must close.
# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close
# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive
# (and even this is a mess -- e.g. if you're implementing a proxy then
# sending Connection: keep-alive is forbidden).
#
# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So
# our rule is:
# - If someone says Connection: close, we will close
# - If someone uses HTTP/1.0, we will close.
def _keep_alive(event: Union[Request, Response]) -> bool:
connection = get_comma_header(event.headers, b"connection")
if b"close" in connection:
return False
if getattr(event, "http_version", b"1.1") < b"1.1":
return False
return True
def _body_framing(
request_method: bytes, event: Union[Request, Response]
) -> Tuple[str, Union[Tuple[()], Tuple[int]]]:
# Called when we enter SEND_BODY to figure out framing information for
# this body.
#
# These are the only two events that can trigger a SEND_BODY state:
assert type(event) in (Request, Response)
# Returns one of:
#
# ("content-length", count)
# ("chunked", ())
# ("http/1.0", ())
#
# which are (lookup key, *args) for constructing body reader/writer
# objects.
#
# Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3
#
# Step 1: some responses always have an empty body, regardless of what the
# headers say.
if type(event) is Response:
if (
event.status_code in (204, 304)
or request_method == b"HEAD"
or (request_method == b"CONNECT" and 200 <= event.status_code < 300)
):
return ("content-length", (0,))
# Section 3.3.3 also lists another case -- responses with status_code
# < 200. For us these are InformationalResponses, not Responses, so
# they can't get into this function in the first place.
assert event.status_code >= 200
# Step 2: check for Transfer-Encoding (T-E beats C-L):
transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")
if transfer_encodings:
assert transfer_encodings == [b"chunked"]
return ("chunked", ())
# Step 3: check for Content-Length
content_lengths = get_comma_header(event.headers, b"content-length")
if content_lengths:
return ("content-length", (int(content_lengths[0]),))
# Step 4: no applicable headers; fallback/default depends on type
if type(event) is Request:
return ("content-length", (0,))
else:
return ("http/1.0", ())
################################################################
#
# The main Connection class
#
################################################################
class Connection:
"""An object encapsulating the state of an HTTP connection.
Args:
our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If
you're implementing a server, pass :data:`h11.SERVER`.
max_incomplete_event_size (int):
The maximum number of bytes we're willing to buffer of an
incomplete event. In practice this mostly sets a limit on the
maximum size of the request/response line + headers. If this is
exceeded, then :meth:`next_event` will raise
:exc:`RemoteProtocolError`.
"""
def __init__(
self,
our_role: Type[Sentinel],
max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
) -> None:
self._max_incomplete_event_size = max_incomplete_event_size
# State and role tracking
if our_role not in (CLIENT, SERVER):
raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role))
self.our_role = our_role
self.their_role: Type[Sentinel]
if our_role is CLIENT:
self.their_role = SERVER
else:
self.their_role = CLIENT
self._cstate = ConnectionState()
# Callables for converting data->events or vice-versa given the
# current state
self._writer = self._get_io_object(self.our_role, None, WRITERS)
self._reader = self._get_io_object(self.their_role, None, READERS)
# Holds any unprocessed received data
self._receive_buffer = ReceiveBuffer()
# If this is true, then it indicates that the incoming connection was
# closed *after* the end of whatever's in self._receive_buffer:
self._receive_buffer_closed = False
# Extra bits of state that don't fit into the state machine.
#
# These two are only used to interpret framing headers for figuring
# out how to read/write response bodies. their_http_version is also
# made available as a convenient public API.
self.their_http_version: Optional[bytes] = None
self._request_method: Optional[bytes] = None
# This is pure flow-control and doesn't at all affect the set of legal
# transitions, so no need to bother ConnectionState with it:
self.client_is_waiting_for_100_continue = False
@property
def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]:
"""A dictionary like::
{CLIENT: <client state>, SERVER: <server state>}
See :ref:`state-machine` for details.
"""
return dict(self._cstate.states)
@property
def our_state(self) -> Type[Sentinel]:
"""The current state of whichever role we are playing. See
:ref:`state-machine` for details.
"""
return self._cstate.states[self.our_role]
@property
def their_state(self) -> Type[Sentinel]:
"""The current state of whichever role we are NOT playing. See
:ref:`state-machine` for details.
"""
return self._cstate.states[self.their_role]
@property
def they_are_waiting_for_100_continue(self) -> bool:
return self.their_role is CLIENT and self.client_is_waiting_for_100_continue
def start_next_cycle(self) -> None:
"""Attempt to reset our connection state for a new request/response
cycle.
If both client and server are in :data:`DONE` state, then resets them
both to :data:`IDLE` state in preparation for a new request/response
cycle on this same connection. Otherwise, raises a
:exc:`LocalProtocolError`.
See :ref:`keepalive-and-pipelining`.
"""
old_states = dict(self._cstate.states)
self._cstate.start_next_cycle()
self._request_method = None
# self.their_http_version gets left alone, since it presumably lasts
# beyond a single request/response cycle
assert not self.client_is_waiting_for_100_continue
self._respond_to_state_changes(old_states)
def _process_error(self, role: Type[Sentinel]) -> None:
old_states = dict(self._cstate.states)
self._cstate.process_error(role)
self._respond_to_state_changes(old_states)
def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]:
if type(event) is InformationalResponse and event.status_code == 101:
return _SWITCH_UPGRADE
if type(event) is Response:
if (
_SWITCH_CONNECT in self._cstate.pending_switch_proposals
and 200 <= event.status_code < 300
):
return _SWITCH_CONNECT
return None
# All events go through here
def _process_event(self, role: Type[Sentinel], event: Event) -> None:
# First, pass the event through the state machine to make sure it
# succeeds.
old_states = dict(self._cstate.states)
if role is CLIENT and type(event) is Request:
if event.method == b"CONNECT":
self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)
if get_comma_header(event.headers, b"upgrade"):
self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)
server_switch_event = None
if role is SERVER:
server_switch_event = self._server_switch_event(event)
self._cstate.process_event(role, type(event), server_switch_event)
# Then perform the updates triggered by it.
if type(event) is Request:
self._request_method = event.method
if role is self.their_role and type(event) in (
Request,
Response,
InformationalResponse,
):
event = cast(Union[Request, Response, InformationalResponse], event)
self.their_http_version = event.http_version
# Keep alive handling
#
# RFC 7230 doesn't really say what one should do if Connection: close
# shows up on a 1xx InformationalResponse. I think the idea is that
# this is not supposed to happen. In any case, if it does happen, we
# ignore it.
if type(event) in (Request, Response) and not _keep_alive(
cast(Union[Request, Response], event)
):
self._cstate.process_keep_alive_disabled()
# 100-continue
if type(event) is Request and has_expect_100_continue(event):
self.client_is_waiting_for_100_continue = True
if type(event) in (InformationalResponse, Response):
self.client_is_waiting_for_100_continue = False
if role is CLIENT and type(event) in (Data, EndOfMessage):
self.client_is_waiting_for_100_continue = False
self._respond_to_state_changes(old_states, event)
def _get_io_object(
self,
role: Type[Sentinel],
event: Optional[Event],
io_dict: Union[ReadersType, WritersType],
) -> Optional[Callable[..., Any]]:
# event may be None; it's only used when entering SEND_BODY
state = self._cstate.states[role]
if state is SEND_BODY:
# Special case: the io_dict has a dict of reader/writer factories
# that depend on the request/response framing.
framing_type, args = _body_framing(
cast(bytes, self._request_method), cast(Union[Request, Response], event)
)
return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index]
else:
# General case: the io_dict just has the appropriate reader/writer
# for this state
return io_dict.get((role, state)) # type: ignore[return-value]
# This must be called after any action that might have caused
# self._cstate.states to change.
def _respond_to_state_changes(
self,
old_states: Dict[Type[Sentinel], Type[Sentinel]],
event: Optional[Event] = None,
) -> None:
# Update reader/writer
if self.our_state != old_states[self.our_role]:
self._writer = self._get_io_object(self.our_role, event, WRITERS)
if self.their_state != old_states[self.their_role]:
self._reader = self._get_io_object(self.their_role, event, READERS)
@property
def trailing_data(self) -> Tuple[bytes, bool]:
"""Data that has been received, but not yet processed, represented as
a tuple with two elements, where the first is a byte-string containing
the unprocessed data itself, and the second is a bool that is True if
the receive connection was closed.
See :ref:`switching-protocols` for discussion of why you'd want this.
"""
return (bytes(self._receive_buffer), self._receive_buffer_closed)
def receive_data(self, data: bytes) -> None:
"""Add data to our internal receive buffer.
This does not actually do any processing on the data, just stores
it. To trigger processing, you have to call :meth:`next_event`.
Args:
data (:term:`bytes-like object`):
The new data that was just received.
Special case: If *data* is an empty byte-string like ``b""``,
then this indicates that the remote side has closed the
connection (end of file). Normally this is convenient, because
standard Python APIs like :meth:`file.read` or
:meth:`socket.recv` use ``b""`` to indicate end-of-file, while
other failures to read are indicated using other mechanisms
like raising :exc:`TimeoutError`. When using such an API you
can just blindly pass through whatever you get from ``read``
to :meth:`receive_data`, and everything will work.
But, if you have an API where reading an empty string is a
valid non-EOF condition, then you need to be aware of this and
make sure to check for such strings and avoid passing them to
:meth:`receive_data`.
Returns:
Nothing, but after calling this you should call :meth:`next_event`
to parse the newly received data.
Raises:
RuntimeError:
Raised if you pass an empty *data*, indicating EOF, and then
pass a non-empty *data*, indicating more data that somehow
arrived after the EOF.
(Calling ``receive_data(b"")`` multiple times is fine,
and equivalent to calling it once.)
"""
if data:
if self._receive_buffer_closed:
raise RuntimeError("received close, then received more data?")
self._receive_buffer += data
else:
self._receive_buffer_closed = True
def _extract_next_receive_event(
self,
) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
state = self.their_state
# We don't pause immediately when they enter DONE, because even in
# DONE state we can still process a ConnectionClosed() event. But
# if we have data in our buffer, then we definitely aren't getting
# a ConnectionClosed() immediately and we need to pause.
if state is DONE and self._receive_buffer:
return PAUSED
if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
return PAUSED
assert self._reader is not None
event = self._reader(self._receive_buffer)
if event is None:
if not self._receive_buffer and self._receive_buffer_closed:
# In some unusual cases (basically just HTTP/1.0 bodies), EOF
# triggers an actual protocol event; in that case, we want to
# return that event, and then the state will change and we'll
# get called again to generate the actual ConnectionClosed().
if hasattr(self._reader, "read_eof"):
event = self._reader.read_eof() # type: ignore[attr-defined]
else:
event = ConnectionClosed()
if event is None:
event = NEED_DATA
return event # type: ignore[no-any-return]
def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
"""Parse the next event out of our receive buffer, update our internal
state, and return it.
This is a mutating operation -- think of it like calling :func:`next`
on an iterator.
Returns:
: One of three things:
1) An event object -- see :ref:`events`.
2) The special constant :data:`NEED_DATA`, which indicates that
you need to read more data from your socket and pass it to
:meth:`receive_data` before this method will be able to return
any more events.
3) The special constant :data:`PAUSED`, which indicates that we
are not in a state where we can process incoming data (usually
because the peer has finished their part of the current
request/response cycle, and you have not yet called
:meth:`start_next_cycle`). See :ref:`flow-control` for details.
Raises:
RemoteProtocolError:
The peer has misbehaved. You should close the connection
(possibly after sending some kind of 4xx response).
Once this method returns :class:`ConnectionClosed` once, then all
subsequent calls will also return :class:`ConnectionClosed`.
If this method raises any exception besides :exc:`RemoteProtocolError`
then that's a bug -- if it happens please file a bug report!
If this method raises any exception then it also sets
:attr:`Connection.their_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
if self.their_state is ERROR:
raise RemoteProtocolError("Can't receive data when peer state is ERROR")
try:
event = self._extract_next_receive_event()
if event not in [NEED_DATA, PAUSED]:
self._process_event(self.their_role, cast(Event, event))
if event is NEED_DATA:
if len(self._receive_buffer) > self._max_incomplete_event_size:
# 431 is "Request header fields too large" which is pretty
# much the only situation where we can get here
raise RemoteProtocolError(
"Receive buffer too long", error_status_hint=431
)
if self._receive_buffer_closed:
# We're still trying to complete some event, but that's
# never going to happen because no more data is coming
raise RemoteProtocolError("peer unexpectedly closed connection")
return event
except BaseException as exc:
self._process_error(self.their_role)
if isinstance(exc, LocalProtocolError):
exc._reraise_as_remote_protocol_error()
else:
raise
def send(self, event: Event) -> Optional[bytes]:
"""Convert a high-level event into bytes that can be sent to the peer,
while updating our internal state machine.
Args:
event: The :ref:`event <events>` to send.
Returns:
If ``type(event) is ConnectionClosed``, then returns
``None``. Otherwise, returns a :term:`bytes-like object`.
Raises:
LocalProtocolError:
Sending this event at this time would violate our
understanding of the HTTP/1.1 protocol.
If this method raises any exception then it also sets
:attr:`Connection.our_state` to :data:`ERROR` -- see
:ref:`error-handling` for discussion.
"""
data_list = self.send_with_data_passthrough(event)
if data_list is None:
return None
else:
return b"".join(data_list)
def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]:
"""Identical to :meth:`send`, except that in situations where
:meth:`send` returns a single :term:`bytes-like object`, this instead
returns a list of them -- and when sending a :class:`Data` event, this
list is guaranteed to contain the exact object you passed in as
:attr:`Data.data`. See :ref:`sendfile` for discussion.
"""
if self.our_state is ERROR:
raise LocalProtocolError("Can't send data when our state is ERROR")
try:
if type(event) is Response:
event = self._clean_up_response_headers_for_sending(event)
# We want to call _process_event before calling the writer,
# because if someone tries to do something invalid then this will
# give a sensible error message, while our writers all just assume
# they will only receive valid events. But, _process_event might
# change self._writer. So we have to do a little dance:
writer = self._writer
self._process_event(self.our_role, event)
if type(event) is ConnectionClosed:
return None
else:
# In any situation where writer is None, process_event should
# have raised ProtocolError
assert writer is not None
data_list: List[bytes] = []
writer(event, data_list.append)
return data_list
except:
self._process_error(self.our_role)
raise
def send_failed(self) -> None:
"""Notify the state machine that we failed to send the data it gave
us.
This causes :attr:`Connection.our_state` to immediately become
:data:`ERROR` -- see :ref:`error-handling` for discussion.
"""
self._process_error(self.our_role)
# When sending a Response, we take responsibility for a few things:
#
# - Sometimes you MUST set Connection: close. We take care of those
# times. (You can also set it yourself if you want, and if you do then
# we'll respect that and close the connection at the right time. But you
# don't have to worry about that unless you want to.)
#
# - The user has to set Content-Length if they want it. Otherwise, for
# responses that have bodies (e.g. not HEAD), then we will automatically
# select the right mechanism for streaming a body of unknown length,
# which depends on depending on the peer's HTTP version.
#
# This function's *only* responsibility is making sure headers are set up
# right -- everything downstream just looks at the headers. There are no
# side channels.
def _clean_up_response_headers_for_sending(self, response: Response) -> Response:
assert type(response) is Response
headers = response.headers
need_close = False
# HEAD requests need some special handling: they always act like they
# have Content-Length: 0, and that's how _body_framing treats
# them. But their headers are supposed to match what we would send if
# the request was a GET. (Technically there is one deviation allowed:
# we're allowed to leave out the framing headers -- see
# https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as
# easy to get them right.)
method_for_choosing_headers = cast(bytes, self._request_method)
if method_for_choosing_headers == b"HEAD":
method_for_choosing_headers = b"GET"
framing_type, _ = _body_framing(method_for_choosing_headers, response)
if framing_type in ("chunked", "http/1.0"):
# This response has a body of unknown length.
# If our peer is HTTP/1.1, we use Transfer-Encoding: chunked
# If our peer is HTTP/1.0, we use no framing headers, and close the
# connection afterwards.
#
# Make sure to clear Content-Length (in principle user could have
# set both and then we ignored Content-Length b/c
# Transfer-Encoding overwrote it -- this would be naughty of them,
# but the HTTP spec says that if our peer does this then we have
# to fix it instead of erroring out, so we'll accord the user the
# same respect).
headers = set_comma_header(headers, b"content-length", [])
if self.their_http_version is None or self.their_http_version < b"1.1":
# Either we never got a valid request and are sending back an
# error (their_http_version is None), so we assume the worst;
# or else we did get a valid HTTP/1.0 request, so we know that
# they don't understand chunked encoding.
headers = set_comma_header(headers, b"transfer-encoding", [])
# This is actually redundant ATM, since currently we
# unconditionally disable keep-alive when talking to HTTP/1.0
# peers. But let's be defensive just in case we add
# Connection: keep-alive support later:
if self._request_method != b"HEAD":
need_close = True
else:
headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"])
if not self._cstate.keep_alive or need_close:
# Make sure Connection: close is set
connection = set(get_comma_header(headers, b"connection"))
connection.discard(b"keep-alive")
connection.add(b"close")
headers = set_comma_header(headers, b"connection", sorted(connection))
return Response(
headers=headers,
status_code=response.status_code,
http_version=response.http_version,
reason=response.reason,
)
| 26,539 | Python | 40.861199 | 88 | 0.614228 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_util.py | from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union
__all__ = [
"ProtocolError",
"LocalProtocolError",
"RemoteProtocolError",
"validate",
"bytesify",
]
class ProtocolError(Exception):
"""Exception indicating a violation of the HTTP/1.1 protocol.
This as an abstract base class, with two concrete base classes:
:exc:`LocalProtocolError`, which indicates that you tried to do something
that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
indicates that the remote peer tried to do something that HTTP/1.1 says is
illegal. See :ref:`error-handling` for details.
In addition to the normal :exc:`Exception` features, it has one attribute:
.. attribute:: error_status_hint
This gives a suggestion as to what status code a server might use if
this error occurred as part of a request.
For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
how you might want to respond to a misbehaving peer, if you're
implementing a server.
For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
how your peer might have responded to *you* if h11 had allowed you to
continue.
The default is 400 Bad Request, a generic catch-all for protocol
violations.
"""
def __init__(self, msg: str, error_status_hint: int = 400) -> None:
if type(self) is ProtocolError:
raise TypeError("tried to directly instantiate ProtocolError")
Exception.__init__(self, msg)
self.error_status_hint = error_status_hint
# Strategy: there are a number of public APIs where a LocalProtocolError can
# be raised (send(), all the different event constructors, ...), and only one
# public API where RemoteProtocolError can be raised
# (receive_data()). Therefore we always raise LocalProtocolError internally,
# and then receive_data will translate this into a RemoteProtocolError.
#
# Internally:
# LocalProtocolError is the generic "ProtocolError".
# Externally:
# LocalProtocolError is for local errors and RemoteProtocolError is for
# remote errors.
class LocalProtocolError(ProtocolError):
def _reraise_as_remote_protocol_error(self) -> NoReturn:
# After catching a LocalProtocolError, use this method to re-raise it
# as a RemoteProtocolError. This method must be called from inside an
# except: block.
#
# An easy way to get an equivalent RemoteProtocolError is just to
# modify 'self' in place.
self.__class__ = RemoteProtocolError # type: ignore
# But the re-raising is somewhat non-trivial -- you might think that
# now that we've modified the in-flight exception object, that just
# doing 'raise' to re-raise it would be enough. But it turns out that
# this doesn't work, because Python tracks the exception type
# (exc_info[0]) separately from the exception object (exc_info[1]),
# and we only modified the latter. So we really do need to re-raise
# the new type explicitly.
# On py3, the traceback is part of the exception object, so our
# in-place modification preserved it and we can just re-raise:
raise self
class RemoteProtocolError(ProtocolError):
pass
def validate(
regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
) -> Dict[str, bytes]:
match = regex.fullmatch(data)
if not match:
if format_args:
msg = msg.format(*format_args)
raise LocalProtocolError(msg)
return match.groupdict()
# Sentinel values
#
# - Inherit identity-based comparison and hashing from object
# - Have a nice repr
# - Have a *bonus property*: type(sentinel) is sentinel
#
# The bonus property is useful if you want to take the return value from
# next_event() and do some sort of dispatch based on type(event).
_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel")
class Sentinel(type):
def __new__(
cls: Type[_T_Sentinel],
name: str,
bases: Tuple[type, ...],
namespace: Dict[str, Any],
**kwds: Any
) -> _T_Sentinel:
assert bases == (Sentinel,)
v = super().__new__(cls, name, bases, namespace, **kwds)
v.__class__ = v # type: ignore
return v
def __repr__(self) -> str:
return self.__name__
# Used for methods, request targets, HTTP versions, header names, and header
# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always
# returns bytes.
def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# Fast-path:
if type(s) is bytes:
return s
if isinstance(s, str):
s = s.encode("ascii")
if isinstance(s, int):
raise TypeError("expected bytes-like object, not int")
return bytes(s)
| 4,888 | Python | 34.948529 | 86 | 0.669394 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/_abnf.py | # We use native strings for all the re patterns, to take advantage of string
# formatting, and then convert to bytestrings when compiling the final re
# objects.
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace
# OWS = *( SP / HTAB )
# ; optional whitespace
OWS = r"[ \t]*"
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+"
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields
# field-name = token
field_name = token
# The standard says:
#
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
#
# https://tools.ietf.org/html/rfc5234#appendix-B.1
#
# VCHAR = %x21-7E
# ; visible (printing) characters
#
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string
# obs-text = %x80-FF
#
# However, the standard definition of field-content is WRONG! It disallows
# fields containing a single visible character surrounded by whitespace,
# e.g. "foo a bar".
#
# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
#
# So our definition of field_content attempts to fix it up...
#
# Also, we allow lots of control characters, because apparently people assume
# that they're legal in practice (e.g., google analytics makes cookies with
# \x01 in them!):
# https://github.com/python-hyper/h11/issues/57
# We still don't allow NUL or whitespace, because those are often treated as
# meta-characters and letting them through can lead to nasty issues like SSRF.
vchar = r"[\x21-\x7e]"
vchar_or_obs_text = r"[^\x00\s]"
field_vchar = vchar_or_obs_text
field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals())
# We handle obs-fold at a different level, and our fixed-up field_content
# already grows to swallow the whole value, so ? instead of *
field_value = r"({field_content})?".format(**globals())
# header-field = field-name ":" OWS field-value OWS
header_field = (
r"(?P<field_name>{field_name})"
r":"
r"{OWS}"
r"(?P<field_value>{field_value})"
r"{OWS}".format(**globals())
)
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line
#
# request-line = method SP request-target SP HTTP-version CRLF
# method = token
# HTTP-version = HTTP-name "/" DIGIT "." DIGIT
# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive
#
# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full
# URL, host+port (for connect), or even "*", but in any case we are guaranteed
# that it contists of the visible printing characters.
method = token
request_target = r"{vchar}+".format(**globals())
http_version = r"HTTP/(?P<http_version>[0-9]\.[0-9])"
request_line = (
r"(?P<method>{method})"
r" "
r"(?P<target>{request_target})"
r" "
r"{http_version}".format(**globals())
)
# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line
#
# status-line = HTTP-version SP status-code SP reason-phrase CRLF
# status-code = 3DIGIT
# reason-phrase = *( HTAB / SP / VCHAR / obs-text )
status_code = r"[0-9]{3}"
reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals())
status_line = (
r"{http_version}"
r" "
r"(?P<status_code>{status_code})"
# However, there are apparently a few too many servers out there that just
# leave out the reason phrase:
# https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036
# https://github.com/seanmonstar/httparse/issues/29
# so make it optional. ?: is a non-capturing group.
r"(?: (?P<reason>{reason_phrase}))?".format(**globals())
)
HEXDIG = r"[0-9A-Fa-f]"
# Actually
#
# chunk-size = 1*HEXDIG
#
# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20
chunk_size = r"({HEXDIG}){{1,20}}".format(**globals())
# Actually
#
# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
#
# but we aren't parsing the things so we don't really care.
chunk_ext = r";.*"
chunk_header = (
r"(?P<chunk_size>{chunk_size})"
r"(?P<chunk_ext>{chunk_ext})?"
r"{OWS}\r\n".format(
**globals()
) # Even though the specification does not allow for extra whitespaces,
# we are lenient with trailing whitespaces because some servers on the wild use it.
)
| 4,815 | Python | 35.210526 | 87 | 0.622845 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_state.py | import pytest
from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .._state import (
_SWITCH_CONNECT,
_SWITCH_UPGRADE,
CLIENT,
CLOSED,
ConnectionState,
DONE,
IDLE,
MIGHT_SWITCH_PROTOCOL,
MUST_CLOSE,
SEND_BODY,
SEND_RESPONSE,
SERVER,
SWITCHED_PROTOCOL,
)
from .._util import LocalProtocolError
def test_ConnectionState() -> None:
cs = ConnectionState()
# Basic event-triggered transitions
assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
cs.process_event(CLIENT, Request)
# The SERVER-Request special case:
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
# Illegal transitions raise an error and nothing happens
with pytest.raises(LocalProtocolError):
cs.process_event(CLIENT, Request)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
cs.process_event(SERVER, InformationalResponse)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
cs.process_event(SERVER, Response)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
cs.process_event(CLIENT, EndOfMessage)
cs.process_event(SERVER, EndOfMessage)
assert cs.states == {CLIENT: DONE, SERVER: DONE}
# State-triggered transition
cs.process_event(SERVER, ConnectionClosed)
assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
def test_ConnectionState_keep_alive() -> None:
# keep_alive = False
cs = ConnectionState()
cs.process_event(CLIENT, Request)
cs.process_keep_alive_disabled()
cs.process_event(CLIENT, EndOfMessage)
assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE}
cs.process_event(SERVER, Response)
cs.process_event(SERVER, EndOfMessage)
assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
def test_ConnectionState_keep_alive_in_DONE() -> None:
# Check that if keep_alive is disabled when the CLIENT is already in DONE,
# then this is sufficient to immediately trigger the DONE -> MUST_CLOSE
# transition
cs = ConnectionState()
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
assert cs.states[CLIENT] is DONE
cs.process_keep_alive_disabled()
assert cs.states[CLIENT] is MUST_CLOSE
def test_ConnectionState_switch_denied() -> None:
for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE):
for deny_early in (True, False):
cs = ConnectionState()
cs.process_client_switch_proposal(switch_type)
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, Data)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
assert switch_type in cs.pending_switch_proposals
if deny_early:
# before client reaches DONE
cs.process_event(SERVER, Response)
assert not cs.pending_switch_proposals
cs.process_event(CLIENT, EndOfMessage)
if deny_early:
assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
else:
assert cs.states == {
CLIENT: MIGHT_SWITCH_PROTOCOL,
SERVER: SEND_RESPONSE,
}
cs.process_event(SERVER, InformationalResponse)
assert cs.states == {
CLIENT: MIGHT_SWITCH_PROTOCOL,
SERVER: SEND_RESPONSE,
}
cs.process_event(SERVER, Response)
assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
assert not cs.pending_switch_proposals
_response_type_for_switch = {
_SWITCH_UPGRADE: InformationalResponse,
_SWITCH_CONNECT: Response,
None: Response,
}
def test_ConnectionState_protocol_switch_accepted() -> None:
for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]:
cs = ConnectionState()
cs.process_client_switch_proposal(switch_event)
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, Data)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
cs.process_event(CLIENT, EndOfMessage)
assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
cs.process_event(SERVER, InformationalResponse)
assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event)
assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
def test_ConnectionState_double_protocol_switch() -> None:
# CONNECT + Upgrade is legal! Very silly, but legal. So we support
# it. Because sometimes doing the silly thing is easier than not.
for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]:
cs = ConnectionState()
cs.process_client_switch_proposal(_SWITCH_UPGRADE)
cs.process_client_switch_proposal(_SWITCH_CONNECT)
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
cs.process_event(
SERVER, _response_type_for_switch[server_switch], server_switch
)
if server_switch is None:
assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
else:
assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
def test_ConnectionState_inconsistent_protocol_switch() -> None:
for client_switches, server_switch in [
([], _SWITCH_CONNECT),
([], _SWITCH_UPGRADE),
([_SWITCH_UPGRADE], _SWITCH_CONNECT),
([_SWITCH_CONNECT], _SWITCH_UPGRADE),
]:
cs = ConnectionState()
for client_switch in client_switches: # type: ignore[attr-defined]
cs.process_client_switch_proposal(client_switch)
cs.process_event(CLIENT, Request)
with pytest.raises(LocalProtocolError):
cs.process_event(SERVER, Response, server_switch)
def test_ConnectionState_keepalive_protocol_switch_interaction() -> None:
# keep_alive=False + pending_switch_proposals
cs = ConnectionState()
cs.process_client_switch_proposal(_SWITCH_UPGRADE)
cs.process_event(CLIENT, Request)
cs.process_keep_alive_disabled()
cs.process_event(CLIENT, Data)
assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
# the protocol switch "wins"
cs.process_event(CLIENT, EndOfMessage)
assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
# but when the server denies the request, keep_alive comes back into play
cs.process_event(SERVER, Response)
assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY}
def test_ConnectionState_reuse() -> None:
cs = ConnectionState()
with pytest.raises(LocalProtocolError):
cs.start_next_cycle()
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
with pytest.raises(LocalProtocolError):
cs.start_next_cycle()
cs.process_event(SERVER, Response)
cs.process_event(SERVER, EndOfMessage)
cs.start_next_cycle()
assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
# No keepalive
cs.process_event(CLIENT, Request)
cs.process_keep_alive_disabled()
cs.process_event(CLIENT, EndOfMessage)
cs.process_event(SERVER, Response)
cs.process_event(SERVER, EndOfMessage)
with pytest.raises(LocalProtocolError):
cs.start_next_cycle()
# One side closed
cs = ConnectionState()
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
cs.process_event(CLIENT, ConnectionClosed)
cs.process_event(SERVER, Response)
cs.process_event(SERVER, EndOfMessage)
with pytest.raises(LocalProtocolError):
cs.start_next_cycle()
# Succesful protocol switch
cs = ConnectionState()
cs.process_client_switch_proposal(_SWITCH_UPGRADE)
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE)
with pytest.raises(LocalProtocolError):
cs.start_next_cycle()
# Failed protocol switch
cs = ConnectionState()
cs.process_client_switch_proposal(_SWITCH_UPGRADE)
cs.process_event(CLIENT, Request)
cs.process_event(CLIENT, EndOfMessage)
cs.process_event(SERVER, Response)
cs.process_event(SERVER, EndOfMessage)
cs.start_next_cycle()
assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
def test_server_request_is_illegal() -> None:
# There used to be a bug in how we handled the Request special case that
# made this allowed...
cs = ConnectionState()
with pytest.raises(LocalProtocolError):
cs.process_event(SERVER, Request)
| 8,928 | Python | 31.827206 | 87 | 0.661962 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/helpers.py | from typing import cast, List, Type, Union, ValuesView
from .._connection import Connection, NEED_DATA, PAUSED
from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .._state import CLIENT, CLOSED, DONE, MUST_CLOSE, SERVER
from .._util import Sentinel
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
def get_all_events(conn: Connection) -> List[Event]:
got_events = []
while True:
event = conn.next_event()
if event in (NEED_DATA, PAUSED):
break
event = cast(Event, event)
got_events.append(event)
if type(event) is ConnectionClosed:
break
return got_events
def receive_and_get(conn: Connection, data: bytes) -> List[Event]:
conn.receive_data(data)
return get_all_events(conn)
# Merges adjacent Data events, converts payloads to bytestrings, and removes
# chunk boundaries.
def normalize_data_events(in_events: List[Event]) -> List[Event]:
out_events: List[Event] = []
for event in in_events:
if type(event) is Data:
event = Data(data=bytes(event.data), chunk_start=False, chunk_end=False)
if out_events and type(out_events[-1]) is type(event) is Data:
out_events[-1] = Data(
data=out_events[-1].data + event.data,
chunk_start=out_events[-1].chunk_start,
chunk_end=out_events[-1].chunk_end,
)
else:
out_events.append(event)
return out_events
# Given that we want to write tests that push some events through a Connection
# and check that its state updates appropriately... we might as make a habit
# of pushing them through two Connections with a fake network link in
# between.
class ConnectionPair:
def __init__(self) -> None:
self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)}
self.other = {CLIENT: SERVER, SERVER: CLIENT}
@property
def conns(self) -> ValuesView[Connection]:
return self.conn.values()
# expect="match" if expect=send_events; expect=[...] to say what expected
def send(
self,
role: Type[Sentinel],
send_events: Union[List[Event], Event],
expect: Union[List[Event], Event, Literal["match"]] = "match",
) -> bytes:
if not isinstance(send_events, list):
send_events = [send_events]
data = b""
closed = False
for send_event in send_events:
new_data = self.conn[role].send(send_event)
if new_data is None:
closed = True
else:
data += new_data
# send uses b"" to mean b"", and None to mean closed
# receive uses b"" to mean closed, and None to mean "try again"
# so we have to translate between the two conventions
if data:
self.conn[self.other[role]].receive_data(data)
if closed:
self.conn[self.other[role]].receive_data(b"")
got_events = get_all_events(self.conn[self.other[role]])
if expect == "match":
expect = send_events
if not isinstance(expect, list):
expect = [expect]
assert got_events == expect
return data
| 3,355 | Python | 31.90196 | 84 | 0.61073 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_headers.py | import pytest
from .._events import Request
from .._headers import (
get_comma_header,
has_expect_100_continue,
Headers,
normalize_and_validate,
set_comma_header,
)
from .._util import LocalProtocolError
def test_normalize_and_validate() -> None:
assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")]
assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")]
# no leading/trailing whitespace in names
with pytest.raises(LocalProtocolError):
normalize_and_validate([(b"foo ", "bar")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([(b" foo", "bar")])
# no weird characters in names
with pytest.raises(LocalProtocolError) as excinfo:
normalize_and_validate([(b"foo bar", b"baz")])
assert "foo bar" in str(excinfo.value)
with pytest.raises(LocalProtocolError):
normalize_and_validate([(b"foo\x00bar", b"baz")])
# Not even 8-bit characters:
with pytest.raises(LocalProtocolError):
normalize_and_validate([(b"foo\xffbar", b"baz")])
# And not even the control characters we allow in values:
with pytest.raises(LocalProtocolError):
normalize_and_validate([(b"foo\x01bar", b"baz")])
# no return or NUL characters in values
with pytest.raises(LocalProtocolError) as excinfo:
normalize_and_validate([("foo", "bar\rbaz")])
assert "bar\\rbaz" in str(excinfo.value)
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", "bar\nbaz")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", "bar\x00baz")])
# no leading/trailing whitespace
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", "barbaz ")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", " barbaz")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", "barbaz\t")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("foo", "\tbarbaz")])
# content-length
assert normalize_and_validate([("Content-Length", "1")]) == [
(b"content-length", b"1")
]
with pytest.raises(LocalProtocolError):
normalize_and_validate([("Content-Length", "asdf")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("Content-Length", "1x")])
with pytest.raises(LocalProtocolError):
normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")])
assert normalize_and_validate(
[("Content-Length", "0"), ("Content-Length", "0")]
) == [(b"content-length", b"0")]
assert normalize_and_validate([("Content-Length", "0 , 0")]) == [
(b"content-length", b"0")
]
with pytest.raises(LocalProtocolError):
normalize_and_validate(
[("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")]
)
with pytest.raises(LocalProtocolError):
normalize_and_validate([("Content-Length", "1 , 1,2")])
# transfer-encoding
assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [
(b"transfer-encoding", b"chunked")
]
assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [
(b"transfer-encoding", b"chunked")
]
with pytest.raises(LocalProtocolError) as excinfo:
normalize_and_validate([("Transfer-Encoding", "gzip")])
assert excinfo.value.error_status_hint == 501 # Not Implemented
with pytest.raises(LocalProtocolError) as excinfo:
normalize_and_validate(
[("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")]
)
assert excinfo.value.error_status_hint == 501 # Not Implemented
def test_get_set_comma_header() -> None:
headers = normalize_and_validate(
[
("Connection", "close"),
("whatever", "something"),
("connectiON", "fOo,, , BAR"),
]
)
assert get_comma_header(headers, b"connection") == [b"close", b"foo", b"bar"]
headers = set_comma_header(headers, b"newthing", ["a", "b"]) # type: ignore
with pytest.raises(LocalProtocolError):
set_comma_header(headers, b"newthing", [" a", "b"]) # type: ignore
assert headers == [
(b"connection", b"close"),
(b"whatever", b"something"),
(b"connection", b"fOo,, , BAR"),
(b"newthing", b"a"),
(b"newthing", b"b"),
]
headers = set_comma_header(headers, b"whatever", ["different thing"]) # type: ignore
assert headers == [
(b"connection", b"close"),
(b"connection", b"fOo,, , BAR"),
(b"newthing", b"a"),
(b"newthing", b"b"),
(b"whatever", b"different thing"),
]
def test_has_100_continue() -> None:
assert has_expect_100_continue(
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Expect", "100-continue")],
)
)
assert not has_expect_100_continue(
Request(method="GET", target="/", headers=[("Host", "example.com")])
)
# Case insensitive
assert has_expect_100_continue(
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Expect", "100-Continue")],
)
)
# Doesn't work in HTTP/1.0
assert not has_expect_100_continue(
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Expect", "100-continue")],
http_version="1.0",
)
)
| 5,612 | Python | 34.525316 | 89 | 0.594262 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_connection.py | from typing import Any, cast, Dict, List, Optional, Tuple, Type
import pytest
from .._connection import _body_framing, _keep_alive, Connection, NEED_DATA, PAUSED
from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .._state import (
CLIENT,
CLOSED,
DONE,
ERROR,
IDLE,
MIGHT_SWITCH_PROTOCOL,
MUST_CLOSE,
SEND_BODY,
SEND_RESPONSE,
SERVER,
SWITCHED_PROTOCOL,
)
from .._util import LocalProtocolError, RemoteProtocolError, Sentinel
from .helpers import ConnectionPair, get_all_events, receive_and_get
def test__keep_alive() -> None:
assert _keep_alive(
Request(method="GET", target="/", headers=[("Host", "Example.com")])
)
assert not _keep_alive(
Request(
method="GET",
target="/",
headers=[("Host", "Example.com"), ("Connection", "close")],
)
)
assert not _keep_alive(
Request(
method="GET",
target="/",
headers=[("Host", "Example.com"), ("Connection", "a, b, cLOse, foo")],
)
)
assert not _keep_alive(
Request(method="GET", target="/", headers=[], http_version="1.0") # type: ignore[arg-type]
)
assert _keep_alive(Response(status_code=200, headers=[])) # type: ignore[arg-type]
assert not _keep_alive(Response(status_code=200, headers=[("Connection", "close")]))
assert not _keep_alive(
Response(status_code=200, headers=[("Connection", "a, b, cLOse, foo")])
)
assert not _keep_alive(Response(status_code=200, headers=[], http_version="1.0")) # type: ignore[arg-type]
def test__body_framing() -> None:
def headers(cl: Optional[int], te: bool) -> List[Tuple[str, str]]:
headers = []
if cl is not None:
headers.append(("Content-Length", str(cl)))
if te:
headers.append(("Transfer-Encoding", "chunked"))
return headers
def resp(
status_code: int = 200, cl: Optional[int] = None, te: bool = False
) -> Response:
return Response(status_code=status_code, headers=headers(cl, te))
def req(cl: Optional[int] = None, te: bool = False) -> Request:
h = headers(cl, te)
h += [("Host", "example.com")]
return Request(method="GET", target="/", headers=h)
# Special cases where the headers are ignored:
for kwargs in [{}, {"cl": 100}, {"te": True}, {"cl": 100, "te": True}]:
kwargs = cast(Dict[str, Any], kwargs)
for meth, r in [
(b"HEAD", resp(**kwargs)),
(b"GET", resp(status_code=204, **kwargs)),
(b"GET", resp(status_code=304, **kwargs)),
]:
assert _body_framing(meth, r) == ("content-length", (0,))
# Transfer-encoding
for kwargs in [{"te": True}, {"cl": 100, "te": True}]:
kwargs = cast(Dict[str, Any], kwargs)
for meth, r in [(None, req(**kwargs)), (b"GET", resp(**kwargs))]: # type: ignore
assert _body_framing(meth, r) == ("chunked", ())
# Content-Length
for meth, r in [(None, req(cl=100)), (b"GET", resp(cl=100))]: # type: ignore
assert _body_framing(meth, r) == ("content-length", (100,))
# No headers
assert _body_framing(None, req()) == ("content-length", (0,)) # type: ignore
assert _body_framing(b"GET", resp()) == ("http/1.0", ())
def test_Connection_basics_and_content_length() -> None:
with pytest.raises(ValueError):
Connection("CLIENT") # type: ignore
p = ConnectionPair()
assert p.conn[CLIENT].our_role is CLIENT
assert p.conn[CLIENT].their_role is SERVER
assert p.conn[SERVER].our_role is SERVER
assert p.conn[SERVER].their_role is CLIENT
data = p.send(
CLIENT,
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Content-Length", "10")],
),
)
assert data == (
b"GET / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 10\r\n\r\n"
)
for conn in p.conns:
assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
assert p.conn[CLIENT].our_state is SEND_BODY
assert p.conn[CLIENT].their_state is SEND_RESPONSE
assert p.conn[SERVER].our_state is SEND_RESPONSE
assert p.conn[SERVER].their_state is SEND_BODY
assert p.conn[CLIENT].their_http_version is None
assert p.conn[SERVER].their_http_version == b"1.1"
data = p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type]
assert data == b"HTTP/1.1 100 \r\n\r\n"
data = p.send(SERVER, Response(status_code=200, headers=[("Content-Length", "11")]))
assert data == b"HTTP/1.1 200 \r\nContent-Length: 11\r\n\r\n"
for conn in p.conns:
assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
assert p.conn[CLIENT].their_http_version == b"1.1"
assert p.conn[SERVER].their_http_version == b"1.1"
data = p.send(CLIENT, Data(data=b"12345"))
assert data == b"12345"
data = p.send(
CLIENT, Data(data=b"67890"), expect=[Data(data=b"67890"), EndOfMessage()]
)
assert data == b"67890"
data = p.send(CLIENT, EndOfMessage(), expect=[])
assert data == b""
for conn in p.conns:
assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
data = p.send(SERVER, Data(data=b"1234567890"))
assert data == b"1234567890"
data = p.send(SERVER, Data(data=b"1"), expect=[Data(data=b"1"), EndOfMessage()])
assert data == b"1"
data = p.send(SERVER, EndOfMessage(), expect=[])
assert data == b""
for conn in p.conns:
assert conn.states == {CLIENT: DONE, SERVER: DONE}
def test_chunked() -> None:
p = ConnectionPair()
p.send(
CLIENT,
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
),
)
data = p.send(CLIENT, Data(data=b"1234567890", chunk_start=True, chunk_end=True))
assert data == b"a\r\n1234567890\r\n"
data = p.send(CLIENT, Data(data=b"abcde", chunk_start=True, chunk_end=True))
assert data == b"5\r\nabcde\r\n"
data = p.send(CLIENT, Data(data=b""), expect=[])
assert data == b""
data = p.send(CLIENT, EndOfMessage(headers=[("hello", "there")]))
assert data == b"0\r\nhello: there\r\n\r\n"
p.send(
SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
)
p.send(SERVER, Data(data=b"54321", chunk_start=True, chunk_end=True))
p.send(SERVER, Data(data=b"12345", chunk_start=True, chunk_end=True))
p.send(SERVER, EndOfMessage())
for conn in p.conns:
assert conn.states == {CLIENT: DONE, SERVER: DONE}
def test_chunk_boundaries() -> None:
conn = Connection(our_role=SERVER)
request = (
b"POST / HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
)
conn.receive_data(request)
assert conn.next_event() == Request(
method="POST",
target="/",
headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
)
assert conn.next_event() is NEED_DATA
conn.receive_data(b"5\r\nhello\r\n")
assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
conn.receive_data(b"5\r\nhel")
assert conn.next_event() == Data(data=b"hel", chunk_start=True, chunk_end=False)
conn.receive_data(b"l")
assert conn.next_event() == Data(data=b"l", chunk_start=False, chunk_end=False)
conn.receive_data(b"o\r\n")
assert conn.next_event() == Data(data=b"o", chunk_start=False, chunk_end=True)
conn.receive_data(b"5\r\nhello")
assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
conn.receive_data(b"\r\n")
assert conn.next_event() == NEED_DATA
conn.receive_data(b"0\r\n\r\n")
assert conn.next_event() == EndOfMessage()
def test_client_talking_to_http10_server() -> None:
c = Connection(CLIENT)
c.send(Request(method="GET", target="/", headers=[("Host", "example.com")]))
c.send(EndOfMessage())
assert c.our_state is DONE
# No content-length, so Http10 framing for body
assert receive_and_get(c, b"HTTP/1.0 200 OK\r\n\r\n") == [
Response(status_code=200, headers=[], http_version="1.0", reason=b"OK") # type: ignore[arg-type]
]
assert c.our_state is MUST_CLOSE
assert receive_and_get(c, b"12345") == [Data(data=b"12345")]
assert receive_and_get(c, b"67890") == [Data(data=b"67890")]
assert receive_and_get(c, b"") == [EndOfMessage(), ConnectionClosed()]
assert c.their_state is CLOSED
def test_server_talking_to_http10_client() -> None:
c = Connection(SERVER)
# No content-length, so no body
# NB: no host header
assert receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") == [
Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type]
EndOfMessage(),
]
assert c.their_state is MUST_CLOSE
# We automatically Connection: close back at them
assert (
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
== b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
)
assert c.send(Data(data=b"12345")) == b"12345"
assert c.send(EndOfMessage()) == b""
assert c.our_state is MUST_CLOSE
# Check that it works if they do send Content-Length
c = Connection(SERVER)
# NB: no host header
assert receive_and_get(c, b"POST / HTTP/1.0\r\nContent-Length: 10\r\n\r\n1") == [
Request(
method="POST",
target="/",
headers=[("Content-Length", "10")],
http_version="1.0",
),
Data(data=b"1"),
]
assert receive_and_get(c, b"234567890") == [Data(data=b"234567890"), EndOfMessage()]
assert c.their_state is MUST_CLOSE
assert receive_and_get(c, b"") == [ConnectionClosed()]
def test_automatic_transfer_encoding_in_response() -> None:
# Check that in responses, the user can specify either Transfer-Encoding:
# chunked or no framing at all, and in both cases we automatically select
# the right option depending on whether the peer speaks HTTP/1.0 or
# HTTP/1.1
for user_headers in [
[("Transfer-Encoding", "chunked")],
[],
# In fact, this even works if Content-Length is set,
# because if both are set then Transfer-Encoding wins
[("Transfer-Encoding", "chunked"), ("Content-Length", "100")],
]:
user_headers = cast(List[Tuple[str, str]], user_headers)
p = ConnectionPair()
p.send(
CLIENT,
[
Request(method="GET", target="/", headers=[("Host", "example.com")]),
EndOfMessage(),
],
)
# When speaking to HTTP/1.1 client, all of the above cases get
# normalized to Transfer-Encoding: chunked
p.send(
SERVER,
Response(status_code=200, headers=user_headers),
expect=Response(
status_code=200, headers=[("Transfer-Encoding", "chunked")]
),
)
# When speaking to HTTP/1.0 client, all of the above cases get
# normalized to no-framing-headers
c = Connection(SERVER)
receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
assert (
c.send(Response(status_code=200, headers=user_headers))
== b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
)
assert c.send(Data(data=b"12345")) == b"12345"
def test_automagic_connection_close_handling() -> None:
p = ConnectionPair()
# If the user explicitly sets Connection: close, then we notice and
# respect it
p.send(
CLIENT,
[
Request(
method="GET",
target="/",
headers=[("Host", "example.com"), ("Connection", "close")],
),
EndOfMessage(),
],
)
for conn in p.conns:
assert conn.states[CLIENT] is MUST_CLOSE
# And if the client sets it, the server automatically echoes it back
p.send(
SERVER,
# no header here...
[Response(status_code=204, headers=[]), EndOfMessage()], # type: ignore[arg-type]
# ...but oh look, it arrived anyway
expect=[
Response(status_code=204, headers=[("connection", "close")]),
EndOfMessage(),
],
)
for conn in p.conns:
assert conn.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
def test_100_continue() -> None:
def setup() -> ConnectionPair:
p = ConnectionPair()
p.send(
CLIENT,
Request(
method="GET",
target="/",
headers=[
("Host", "example.com"),
("Content-Length", "100"),
("Expect", "100-continue"),
],
),
)
for conn in p.conns:
assert conn.client_is_waiting_for_100_continue
assert not p.conn[CLIENT].they_are_waiting_for_100_continue
assert p.conn[SERVER].they_are_waiting_for_100_continue
return p
# Disabled by 100 Continue
p = setup()
p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type]
for conn in p.conns:
assert not conn.client_is_waiting_for_100_continue
assert not conn.they_are_waiting_for_100_continue
# Disabled by a real response
p = setup()
p.send(
SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
)
for conn in p.conns:
assert not conn.client_is_waiting_for_100_continue
assert not conn.they_are_waiting_for_100_continue
# Disabled by the client going ahead and sending stuff anyway
p = setup()
p.send(CLIENT, Data(data=b"12345"))
for conn in p.conns:
assert not conn.client_is_waiting_for_100_continue
assert not conn.they_are_waiting_for_100_continue
def test_max_incomplete_event_size_countermeasure() -> None:
# Infinitely long headers are definitely not okay
c = Connection(SERVER)
c.receive_data(b"GET / HTTP/1.0\r\nEndless: ")
assert c.next_event() is NEED_DATA
with pytest.raises(RemoteProtocolError):
while True:
c.receive_data(b"a" * 1024)
c.next_event()
# Checking that the same header is accepted / rejected depending on the
# max_incomplete_event_size setting:
c = Connection(SERVER, max_incomplete_event_size=5000)
c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
c.receive_data(b"a" * 4000)
c.receive_data(b"\r\n\r\n")
assert get_all_events(c) == [
Request(
method="GET", target="/", http_version="1.0", headers=[("big", "a" * 4000)]
),
EndOfMessage(),
]
c = Connection(SERVER, max_incomplete_event_size=4000)
c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
c.receive_data(b"a" * 4000)
with pytest.raises(RemoteProtocolError):
c.next_event()
# Temporarily exceeding the size limit is fine, as long as its done with
# complete events:
c = Connection(SERVER, max_incomplete_event_size=5000)
c.receive_data(b"GET / HTTP/1.0\r\nContent-Length: 10000")
c.receive_data(b"\r\n\r\n" + b"a" * 10000)
assert get_all_events(c) == [
Request(
method="GET",
target="/",
http_version="1.0",
headers=[("Content-Length", "10000")],
),
Data(data=b"a" * 10000),
EndOfMessage(),
]
c = Connection(SERVER, max_incomplete_event_size=100)
# Two pipelined requests to create a way-too-big receive buffer... but
# it's fine because we're not checking
c.receive_data(
b"GET /1 HTTP/1.1\r\nHost: a\r\n\r\n"
b"GET /2 HTTP/1.1\r\nHost: b\r\n\r\n" + b"X" * 1000
)
assert get_all_events(c) == [
Request(method="GET", target="/1", headers=[("host", "a")]),
EndOfMessage(),
]
# Even more data comes in, still no problem
c.receive_data(b"X" * 1000)
# We can respond and reuse to get the second pipelined request
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
c.start_next_cycle()
assert get_all_events(c) == [
Request(method="GET", target="/2", headers=[("host", "b")]),
EndOfMessage(),
]
# But once we unpause and try to read the next message, and find that it's
# incomplete and the buffer is *still* way too large, then *that's* a
# problem:
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
c.start_next_cycle()
with pytest.raises(RemoteProtocolError):
c.next_event()
def test_reuse_simple() -> None:
p = ConnectionPair()
p.send(
CLIENT,
[Request(method="GET", target="/", headers=[("Host", "a")]), EndOfMessage()],
)
p.send(
SERVER,
[
Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
EndOfMessage(),
],
)
for conn in p.conns:
assert conn.states == {CLIENT: DONE, SERVER: DONE}
conn.start_next_cycle()
p.send(
CLIENT,
[
Request(method="DELETE", target="/foo", headers=[("Host", "a")]),
EndOfMessage(),
],
)
p.send(
SERVER,
[
Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
EndOfMessage(),
],
)
def test_pipelining() -> None:
# Client doesn't support pipelining, so we have to do this by hand
c = Connection(SERVER)
assert c.next_event() is NEED_DATA
# 3 requests all bunched up
c.receive_data(
b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
b"12345"
b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
b"67890"
b"GET /3 HTTP/1.1\r\nHost: a.com\r\n\r\n"
)
assert get_all_events(c) == [
Request(
method="GET",
target="/1",
headers=[("Host", "a.com"), ("Content-Length", "5")],
),
Data(data=b"12345"),
EndOfMessage(),
]
assert c.their_state is DONE
assert c.our_state is SEND_RESPONSE
assert c.next_event() is PAUSED
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
assert c.their_state is DONE
assert c.our_state is DONE
c.start_next_cycle()
assert get_all_events(c) == [
Request(
method="GET",
target="/2",
headers=[("Host", "a.com"), ("Content-Length", "5")],
),
Data(data=b"67890"),
EndOfMessage(),
]
assert c.next_event() is PAUSED
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
c.start_next_cycle()
assert get_all_events(c) == [
Request(method="GET", target="/3", headers=[("Host", "a.com")]),
EndOfMessage(),
]
# Doesn't pause this time, no trailing data
assert c.next_event() is NEED_DATA
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
# Arrival of more data triggers pause
assert c.next_event() is NEED_DATA
c.receive_data(b"SADF")
assert c.next_event() is PAUSED
assert c.trailing_data == (b"SADF", False)
# If EOF arrives while paused, we don't see that either:
c.receive_data(b"")
assert c.trailing_data == (b"SADF", True)
assert c.next_event() is PAUSED
c.receive_data(b"")
assert c.next_event() is PAUSED
# Can't call receive_data with non-empty buf after closing it
with pytest.raises(RuntimeError):
c.receive_data(b"FDSA")
def test_protocol_switch() -> None:
for (req, deny, accept) in [
(
Request(
method="CONNECT",
target="example.com:443",
headers=[("Host", "foo"), ("Content-Length", "1")],
),
Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
),
(
Request(
method="GET",
target="/",
headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
),
Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
InformationalResponse(status_code=101, headers=[("Upgrade", "a")]),
),
(
Request(
method="CONNECT",
target="example.com:443",
headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
),
Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
# Accept CONNECT, not upgrade
Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
),
(
Request(
method="CONNECT",
target="example.com:443",
headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
),
Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
# Accept Upgrade, not CONNECT
InformationalResponse(status_code=101, headers=[("Upgrade", "b")]),
),
]:
def setup() -> ConnectionPair:
p = ConnectionPair()
p.send(CLIENT, req)
# No switch-related state change stuff yet; the client has to
# finish the request before that kicks in
for conn in p.conns:
assert conn.states[CLIENT] is SEND_BODY
p.send(CLIENT, [Data(data=b"1"), EndOfMessage()])
for conn in p.conns:
assert conn.states[CLIENT] is MIGHT_SWITCH_PROTOCOL
assert p.conn[SERVER].next_event() is PAUSED
return p
# Test deny case
p = setup()
p.send(SERVER, deny)
for conn in p.conns:
assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
p.send(SERVER, EndOfMessage())
# Check that re-use is still allowed after a denial
for conn in p.conns:
conn.start_next_cycle()
# Test accept case
p = setup()
p.send(SERVER, accept)
for conn in p.conns:
assert conn.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
conn.receive_data(b"123")
assert conn.next_event() is PAUSED
conn.receive_data(b"456")
assert conn.next_event() is PAUSED
assert conn.trailing_data == (b"123456", False)
# Pausing in might-switch, then recovery
# (weird artificial case where the trailing data actually is valid
# HTTP for some reason, because this makes it easier to test the state
# logic)
p = setup()
sc = p.conn[SERVER]
sc.receive_data(b"GET / HTTP/1.0\r\n\r\n")
assert sc.next_event() is PAUSED
assert sc.trailing_data == (b"GET / HTTP/1.0\r\n\r\n", False)
sc.send(deny)
assert sc.next_event() is PAUSED
sc.send(EndOfMessage())
sc.start_next_cycle()
assert get_all_events(sc) == [
Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type]
EndOfMessage(),
]
# When we're DONE, have no trailing data, and the connection gets
# closed, we report ConnectionClosed(). When we're in might-switch or
# switched, we don't.
p = setup()
sc = p.conn[SERVER]
sc.receive_data(b"")
assert sc.next_event() is PAUSED
assert sc.trailing_data == (b"", True)
p.send(SERVER, accept)
assert sc.next_event() is PAUSED
p = setup()
sc = p.conn[SERVER]
sc.receive_data(b"")
assert sc.next_event() is PAUSED
sc.send(deny)
assert sc.next_event() == ConnectionClosed()
# You can't send after switching protocols, or while waiting for a
# protocol switch
p = setup()
with pytest.raises(LocalProtocolError):
p.conn[CLIENT].send(
Request(method="GET", target="/", headers=[("Host", "a")])
)
p = setup()
p.send(SERVER, accept)
with pytest.raises(LocalProtocolError):
p.conn[SERVER].send(Data(data=b"123"))
def test_close_simple() -> None:
# Just immediately closing a new connection without anything having
# happened yet.
for (who_shot_first, who_shot_second) in [(CLIENT, SERVER), (SERVER, CLIENT)]:
def setup() -> ConnectionPair:
p = ConnectionPair()
p.send(who_shot_first, ConnectionClosed())
for conn in p.conns:
assert conn.states == {
who_shot_first: CLOSED,
who_shot_second: MUST_CLOSE,
}
return p
# You can keep putting b"" into a closed connection, and you keep
# getting ConnectionClosed() out:
p = setup()
assert p.conn[who_shot_second].next_event() == ConnectionClosed()
assert p.conn[who_shot_second].next_event() == ConnectionClosed()
p.conn[who_shot_second].receive_data(b"")
assert p.conn[who_shot_second].next_event() == ConnectionClosed()
# Second party can close...
p = setup()
p.send(who_shot_second, ConnectionClosed())
for conn in p.conns:
assert conn.our_state is CLOSED
assert conn.their_state is CLOSED
# But trying to receive new data on a closed connection is a
# RuntimeError (not ProtocolError, because the problem here isn't
# violation of HTTP, it's violation of physics)
p = setup()
with pytest.raises(RuntimeError):
p.conn[who_shot_second].receive_data(b"123")
# And receiving new data on a MUST_CLOSE connection is a ProtocolError
p = setup()
p.conn[who_shot_first].receive_data(b"GET")
with pytest.raises(RemoteProtocolError):
p.conn[who_shot_first].next_event()
def test_close_different_states() -> None:
req = [
Request(method="GET", target="/foo", headers=[("Host", "a")]),
EndOfMessage(),
]
resp = [
Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
EndOfMessage(),
]
# Client before request
p = ConnectionPair()
p.send(CLIENT, ConnectionClosed())
for conn in p.conns:
assert conn.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
# Client after request
p = ConnectionPair()
p.send(CLIENT, req)
p.send(CLIENT, ConnectionClosed())
for conn in p.conns:
assert conn.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
# Server after request -> not allowed
p = ConnectionPair()
p.send(CLIENT, req)
with pytest.raises(LocalProtocolError):
p.conn[SERVER].send(ConnectionClosed())
p.conn[CLIENT].receive_data(b"")
with pytest.raises(RemoteProtocolError):
p.conn[CLIENT].next_event()
# Server after response
p = ConnectionPair()
p.send(CLIENT, req)
p.send(SERVER, resp)
p.send(SERVER, ConnectionClosed())
for conn in p.conns:
assert conn.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
# Both after closing (ConnectionClosed() is idempotent)
p = ConnectionPair()
p.send(CLIENT, req)
p.send(SERVER, resp)
p.send(CLIENT, ConnectionClosed())
p.send(SERVER, ConnectionClosed())
p.send(CLIENT, ConnectionClosed())
p.send(SERVER, ConnectionClosed())
# In the middle of sending -> not allowed
p = ConnectionPair()
p.send(
CLIENT,
Request(
method="GET", target="/", headers=[("Host", "a"), ("Content-Length", "10")]
),
)
with pytest.raises(LocalProtocolError):
p.conn[CLIENT].send(ConnectionClosed())
p.conn[SERVER].receive_data(b"")
with pytest.raises(RemoteProtocolError):
p.conn[SERVER].next_event()
# Receive several requests and then client shuts down their side of the
# connection; we can respond to each
def test_pipelined_close() -> None:
c = Connection(SERVER)
# 2 requests then a close
c.receive_data(
b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
b"12345"
b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
b"67890"
)
c.receive_data(b"")
assert get_all_events(c) == [
Request(
method="GET",
target="/1",
headers=[("host", "a.com"), ("content-length", "5")],
),
Data(data=b"12345"),
EndOfMessage(),
]
assert c.states[CLIENT] is DONE
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
assert c.states[SERVER] is DONE
c.start_next_cycle()
assert get_all_events(c) == [
Request(
method="GET",
target="/2",
headers=[("host", "a.com"), ("content-length", "5")],
),
Data(data=b"67890"),
EndOfMessage(),
ConnectionClosed(),
]
assert c.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
c.send(EndOfMessage())
assert c.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
c.send(ConnectionClosed())
assert c.states == {CLIENT: CLOSED, SERVER: CLOSED}
def test_sendfile() -> None:
class SendfilePlaceholder:
def __len__(self) -> int:
return 10
placeholder = SendfilePlaceholder()
def setup(
header: Tuple[str, str], http_version: str
) -> Tuple[Connection, Optional[List[bytes]]]:
c = Connection(SERVER)
receive_and_get(
c, "GET / HTTP/{}\r\nHost: a\r\n\r\n".format(http_version).encode("ascii")
)
headers = []
if header:
headers.append(header)
c.send(Response(status_code=200, headers=headers))
return c, c.send_with_data_passthrough(Data(data=placeholder)) # type: ignore
c, data = setup(("Content-Length", "10"), "1.1")
assert data == [placeholder] # type: ignore
# Raises an error if the connection object doesn't think we've sent
# exactly 10 bytes
c.send(EndOfMessage())
_, data = setup(("Transfer-Encoding", "chunked"), "1.1")
assert placeholder in data # type: ignore
data[data.index(placeholder)] = b"x" * 10 # type: ignore
assert b"".join(data) == b"a\r\nxxxxxxxxxx\r\n" # type: ignore
c, data = setup(None, "1.0") # type: ignore
assert data == [placeholder] # type: ignore
assert c.our_state is SEND_BODY
def test_errors() -> None:
# After a receive error, you can't receive
for role in [CLIENT, SERVER]:
c = Connection(our_role=role)
c.receive_data(b"gibberish\r\n\r\n")
with pytest.raises(RemoteProtocolError):
c.next_event()
# Now any attempt to receive continues to raise
assert c.their_state is ERROR
assert c.our_state is not ERROR
print(c._cstate.states)
with pytest.raises(RemoteProtocolError):
c.next_event()
# But we can still yell at the client for sending us gibberish
if role is SERVER:
assert (
c.send(Response(status_code=400, headers=[])) # type: ignore[arg-type]
== b"HTTP/1.1 400 \r\nConnection: close\r\n\r\n"
)
# After an error sending, you can no longer send
# (This is especially important for things like content-length errors,
# where there's complex internal state being modified)
def conn(role: Type[Sentinel]) -> Connection:
c = Connection(our_role=role)
if role is SERVER:
# Put it into the state where it *could* send a response...
receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
assert c.our_state is SEND_RESPONSE
return c
for role in [CLIENT, SERVER]:
if role is CLIENT:
# This HTTP/1.0 request won't be detected as bad until after we go
# through the state machine and hit the writing code
good = Request(method="GET", target="/", headers=[("Host", "example.com")])
bad = Request(
method="GET",
target="/",
headers=[("Host", "example.com")],
http_version="1.0",
)
elif role is SERVER:
good = Response(status_code=200, headers=[]) # type: ignore[arg-type,assignment]
bad = Response(status_code=200, headers=[], http_version="1.0") # type: ignore[arg-type,assignment]
# Make sure 'good' actually is good
c = conn(role)
c.send(good)
assert c.our_state is not ERROR
# Do that again, but this time sending 'bad' first
c = conn(role)
with pytest.raises(LocalProtocolError):
c.send(bad)
assert c.our_state is ERROR
assert c.their_state is not ERROR
# Now 'good' is not so good
with pytest.raises(LocalProtocolError):
c.send(good)
# And check send_failed() too
c = conn(role)
c.send_failed()
assert c.our_state is ERROR
assert c.their_state is not ERROR
# This is idempotent
c.send_failed()
assert c.our_state is ERROR
assert c.their_state is not ERROR
def test_idle_receive_nothing() -> None:
# At one point this incorrectly raised an error
for role in [CLIENT, SERVER]:
c = Connection(role)
assert c.next_event() is NEED_DATA
def test_connection_drop() -> None:
c = Connection(SERVER)
c.receive_data(b"GET /")
assert c.next_event() is NEED_DATA
c.receive_data(b"")
with pytest.raises(RemoteProtocolError):
c.next_event()
def test_408_request_timeout() -> None:
# Should be able to send this spontaneously as a server without seeing
# anything from client
p = ConnectionPair()
p.send(SERVER, Response(status_code=408, headers=[(b"connection", b"close")]))
# This used to raise IndexError
def test_empty_request() -> None:
c = Connection(SERVER)
c.receive_data(b"\r\n")
with pytest.raises(RemoteProtocolError):
c.next_event()
# This used to raise IndexError
def test_empty_response() -> None:
c = Connection(CLIENT)
c.send(Request(method="GET", target="/", headers=[("Host", "a")]))
c.receive_data(b"\r\n")
with pytest.raises(RemoteProtocolError):
c.next_event()
@pytest.mark.parametrize(
"data",
[
b"\x00",
b"\x20",
b"\x16\x03\x01\x00\xa5", # Typical start of a TLS Client Hello
],
)
def test_early_detection_of_invalid_request(data: bytes) -> None:
c = Connection(SERVER)
# Early detection should occur before even receiving a `\r\n`
c.receive_data(data)
with pytest.raises(RemoteProtocolError):
c.next_event()
@pytest.mark.parametrize(
"data",
[
b"\x00",
b"\x20",
b"\x16\x03\x03\x00\x31", # Typical start of a TLS Server Hello
],
)
def test_early_detection_of_invalid_response(data: bytes) -> None:
c = Connection(CLIENT)
# Early detection should occur before even receiving a `\r\n`
c.receive_data(data)
with pytest.raises(RemoteProtocolError):
c.next_event()
# This used to give different headers for HEAD and GET.
# The correct way to handle HEAD is to put whatever headers we *would* have
# put if it were a GET -- even though we know that for HEAD, those headers
# will be ignored.
def test_HEAD_framing_headers() -> None:
def setup(method: bytes, http_version: bytes) -> Connection:
c = Connection(SERVER)
c.receive_data(
method + b" / HTTP/" + http_version + b"\r\n" + b"Host: example.com\r\n\r\n"
)
assert type(c.next_event()) is Request
assert type(c.next_event()) is EndOfMessage
return c
for method in [b"GET", b"HEAD"]:
# No Content-Length, HTTP/1.1 peer, should use chunked
c = setup(method, b"1.1")
assert (
c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type]
b"Transfer-Encoding: chunked\r\n\r\n"
)
# No Content-Length, HTTP/1.0 peer, frame with connection: close
c = setup(method, b"1.0")
assert (
c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type]
b"Connection: close\r\n\r\n"
)
# Content-Length + Transfer-Encoding, TE wins
c = setup(method, b"1.1")
assert (
c.send(
Response(
status_code=200,
headers=[
("Content-Length", "100"),
("Transfer-Encoding", "chunked"),
],
)
)
== b"HTTP/1.1 200 \r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
)
def test_special_exceptions_for_lost_connection_in_message_body() -> None:
c = Connection(SERVER)
c.receive_data(
b"POST / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 100\r\n\r\n"
)
assert type(c.next_event()) is Request
assert c.next_event() is NEED_DATA
c.receive_data(b"12345")
assert c.next_event() == Data(data=b"12345")
c.receive_data(b"")
with pytest.raises(RemoteProtocolError) as excinfo:
c.next_event()
assert "received 5 bytes" in str(excinfo.value)
assert "expected 100" in str(excinfo.value)
c = Connection(SERVER)
c.receive_data(
b"POST / HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
)
assert type(c.next_event()) is Request
assert c.next_event() is NEED_DATA
c.receive_data(b"8\r\n012345")
assert c.next_event().data == b"012345" # type: ignore
c.receive_data(b"")
with pytest.raises(RemoteProtocolError) as excinfo:
c.next_event()
assert "incomplete chunked read" in str(excinfo.value)
| 38,720 | Python | 33.479964 | 112 | 0.577169 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_io.py | from typing import Any, Callable, Generator, List
import pytest
from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .._headers import Headers, normalize_and_validate
from .._readers import (
_obsolete_line_fold,
ChunkedReader,
ContentLengthReader,
Http10Reader,
READERS,
)
from .._receivebuffer import ReceiveBuffer
from .._state import (
CLIENT,
CLOSED,
DONE,
IDLE,
MIGHT_SWITCH_PROTOCOL,
MUST_CLOSE,
SEND_BODY,
SEND_RESPONSE,
SERVER,
SWITCHED_PROTOCOL,
)
from .._util import LocalProtocolError
from .._writers import (
ChunkedWriter,
ContentLengthWriter,
Http10Writer,
write_any_response,
write_headers,
write_request,
WRITERS,
)
from .helpers import normalize_data_events
SIMPLE_CASES = [
(
(CLIENT, IDLE),
Request(
method="GET",
target="/a",
headers=[("Host", "foo"), ("Connection", "close")],
),
b"GET /a HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n",
),
(
(SERVER, SEND_RESPONSE),
Response(status_code=200, headers=[("Connection", "close")], reason=b"OK"),
b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
),
(
(SERVER, SEND_RESPONSE),
Response(status_code=200, headers=[], reason=b"OK"), # type: ignore[arg-type]
b"HTTP/1.1 200 OK\r\n\r\n",
),
(
(SERVER, SEND_RESPONSE),
InformationalResponse(
status_code=101, headers=[("Upgrade", "websocket")], reason=b"Upgrade"
),
b"HTTP/1.1 101 Upgrade\r\nUpgrade: websocket\r\n\r\n",
),
(
(SERVER, SEND_RESPONSE),
InformationalResponse(status_code=101, headers=[], reason=b"Upgrade"), # type: ignore[arg-type]
b"HTTP/1.1 101 Upgrade\r\n\r\n",
),
]
def dowrite(writer: Callable[..., None], obj: Any) -> bytes:
got_list: List[bytes] = []
writer(obj, got_list.append)
return b"".join(got_list)
def tw(writer: Any, obj: Any, expected: Any) -> None:
got = dowrite(writer, obj)
assert got == expected
def makebuf(data: bytes) -> ReceiveBuffer:
buf = ReceiveBuffer()
buf += data
return buf
def tr(reader: Any, data: bytes, expected: Any) -> None:
def check(got: Any) -> None:
assert got == expected
# Headers should always be returned as bytes, not e.g. bytearray
# https://github.com/python-hyper/wsproto/pull/54#issuecomment-377709478
for name, value in getattr(got, "headers", []):
assert type(name) is bytes
assert type(value) is bytes
# Simple: consume whole thing
buf = makebuf(data)
check(reader(buf))
assert not buf
# Incrementally growing buffer
buf = ReceiveBuffer()
for i in range(len(data)):
assert reader(buf) is None
buf += data[i : i + 1]
check(reader(buf))
# Trailing data
buf = makebuf(data)
buf += b"trailing"
check(reader(buf))
assert bytes(buf) == b"trailing"
def test_writers_simple() -> None:
for ((role, state), event, binary) in SIMPLE_CASES:
tw(WRITERS[role, state], event, binary)
def test_readers_simple() -> None:
for ((role, state), event, binary) in SIMPLE_CASES:
tr(READERS[role, state], binary, event)
def test_writers_unusual() -> None:
# Simple test of the write_headers utility routine
tw(
write_headers,
normalize_and_validate([("foo", "bar"), ("baz", "quux")]),
b"foo: bar\r\nbaz: quux\r\n\r\n",
)
tw(write_headers, Headers([]), b"\r\n")
# We understand HTTP/1.0, but we don't speak it
with pytest.raises(LocalProtocolError):
tw(
write_request,
Request(
method="GET",
target="/",
headers=[("Host", "foo"), ("Connection", "close")],
http_version="1.0",
),
None,
)
with pytest.raises(LocalProtocolError):
tw(
write_any_response,
Response(
status_code=200, headers=[("Connection", "close")], http_version="1.0"
),
None,
)
def test_readers_unusual() -> None:
# Reading HTTP/1.0
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.0\r\nSome: header\r\n\r\n",
Request(
method="HEAD",
target="/foo",
headers=[("Some", "header")],
http_version="1.0",
),
)
# check no-headers, since it's only legal with HTTP/1.0
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.0\r\n\r\n",
Request(method="HEAD", target="/foo", headers=[], http_version="1.0"), # type: ignore[arg-type]
)
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.0 200 OK\r\nSome: header\r\n\r\n",
Response(
status_code=200,
headers=[("Some", "header")],
http_version="1.0",
reason=b"OK",
),
)
# single-character header values (actually disallowed by the ABNF in RFC
# 7230 -- this is a bug in the standard that we originally copied...)
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.0 200 OK\r\n" b"Foo: a a a a a \r\n\r\n",
Response(
status_code=200,
headers=[("Foo", "a a a a a")],
http_version="1.0",
reason=b"OK",
),
)
# Empty headers -- also legal
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.0 200 OK\r\n" b"Foo:\r\n\r\n",
Response(
status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
),
)
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.0 200 OK\r\n" b"Foo: \t \t \r\n\r\n",
Response(
status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
),
)
# Tolerate broken servers that leave off the response code
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.0 200\r\n" b"Foo: bar\r\n\r\n",
Response(
status_code=200, headers=[("Foo", "bar")], http_version="1.0", reason=b""
),
)
# Tolerate headers line endings (\r\n and \n)
# \n\r\b between headers and body
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.1 200 OK\r\nSomeHeader: val\n\r\n",
Response(
status_code=200,
headers=[("SomeHeader", "val")],
http_version="1.1",
reason="OK",
),
)
# delimited only with \n
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.1 200 OK\nSomeHeader1: val1\nSomeHeader2: val2\n\n",
Response(
status_code=200,
headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
http_version="1.1",
reason="OK",
),
)
# mixed \r\n and \n
tr(
READERS[SERVER, SEND_RESPONSE],
b"HTTP/1.1 200 OK\r\nSomeHeader1: val1\nSomeHeader2: val2\n\r\n",
Response(
status_code=200,
headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
http_version="1.1",
reason="OK",
),
)
# obsolete line folding
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Some: multi-line\r\n"
b" header\r\n"
b"\tnonsense\r\n"
b" \t \t\tI guess\r\n"
b"Connection: close\r\n"
b"More-nonsense: in the\r\n"
b" last header \r\n\r\n",
Request(
method="HEAD",
target="/foo",
headers=[
("Host", "example.com"),
("Some", "multi-line header nonsense I guess"),
("Connection", "close"),
("More-nonsense", "in the last header"),
],
),
)
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n" b" folded: line\r\n\r\n",
None,
)
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n" b"foo : line\r\n\r\n",
None,
)
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
None,
)
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
None,
)
with pytest.raises(LocalProtocolError):
tr(READERS[CLIENT, IDLE], b"HEAD /foo HTTP/1.1\r\n" b": line\r\n\r\n", None)
def test__obsolete_line_fold_bytes() -> None:
# _obsolete_line_fold has a defensive cast to bytearray, which is
# necessary to protect against O(n^2) behavior in case anyone ever passes
# in regular bytestrings... but right now we never pass in regular
# bytestrings. so this test just exists to get some coverage on that
# defensive cast.
assert list(_obsolete_line_fold([b"aaa", b"bbb", b" ccc", b"ddd"])) == [
b"aaa",
bytearray(b"bbb ccc"),
b"ddd",
]
def _run_reader_iter(
reader: Any, buf: bytes, do_eof: bool
) -> Generator[Any, None, None]:
while True:
event = reader(buf)
if event is None:
break
yield event
# body readers have undefined behavior after returning EndOfMessage,
# because this changes the state so they don't get called again
if type(event) is EndOfMessage:
break
if do_eof:
assert not buf
yield reader.read_eof()
def _run_reader(*args: Any) -> List[Event]:
events = list(_run_reader_iter(*args))
return normalize_data_events(events)
def t_body_reader(thunk: Any, data: bytes, expected: Any, do_eof: bool = False) -> None:
# Simple: consume whole thing
print("Test 1")
buf = makebuf(data)
assert _run_reader(thunk(), buf, do_eof) == expected
# Incrementally growing buffer
print("Test 2")
reader = thunk()
buf = ReceiveBuffer()
events = []
for i in range(len(data)):
events += _run_reader(reader, buf, False)
buf += data[i : i + 1]
events += _run_reader(reader, buf, do_eof)
assert normalize_data_events(events) == expected
is_complete = any(type(event) is EndOfMessage for event in expected)
if is_complete and not do_eof:
buf = makebuf(data + b"trailing")
assert _run_reader(thunk(), buf, False) == expected
def test_ContentLengthReader() -> None:
t_body_reader(lambda: ContentLengthReader(0), b"", [EndOfMessage()])
t_body_reader(
lambda: ContentLengthReader(10),
b"0123456789",
[Data(data=b"0123456789"), EndOfMessage()],
)
def test_Http10Reader() -> None:
t_body_reader(Http10Reader, b"", [EndOfMessage()], do_eof=True)
t_body_reader(Http10Reader, b"asdf", [Data(data=b"asdf")], do_eof=False)
t_body_reader(
Http10Reader, b"asdf", [Data(data=b"asdf"), EndOfMessage()], do_eof=True
)
def test_ChunkedReader() -> None:
t_body_reader(ChunkedReader, b"0\r\n\r\n", [EndOfMessage()])
t_body_reader(
ChunkedReader,
b"0\r\nSome: header\r\n\r\n",
[EndOfMessage(headers=[("Some", "header")])],
)
t_body_reader(
ChunkedReader,
b"5\r\n01234\r\n"
+ b"10\r\n0123456789abcdef\r\n"
+ b"0\r\n"
+ b"Some: header\r\n\r\n",
[
Data(data=b"012340123456789abcdef"),
EndOfMessage(headers=[("Some", "header")]),
],
)
t_body_reader(
ChunkedReader,
b"5\r\n01234\r\n" + b"10\r\n0123456789abcdef\r\n" + b"0\r\n\r\n",
[Data(data=b"012340123456789abcdef"), EndOfMessage()],
)
# handles upper and lowercase hex
t_body_reader(
ChunkedReader,
b"aA\r\n" + b"x" * 0xAA + b"\r\n" + b"0\r\n\r\n",
[Data(data=b"x" * 0xAA), EndOfMessage()],
)
# refuses arbitrarily long chunk integers
with pytest.raises(LocalProtocolError):
# Technically this is legal HTTP/1.1, but we refuse to process chunk
# sizes that don't fit into 20 characters of hex
t_body_reader(ChunkedReader, b"9" * 100 + b"\r\nxxx", [Data(data=b"xxx")])
# refuses garbage in the chunk count
with pytest.raises(LocalProtocolError):
t_body_reader(ChunkedReader, b"10\x00\r\nxxx", None)
# handles (and discards) "chunk extensions" omg wtf
t_body_reader(
ChunkedReader,
b"5; hello=there\r\n"
+ b"xxxxx"
+ b"\r\n"
+ b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n',
[Data(data=b"xxxxx"), EndOfMessage()],
)
t_body_reader(
ChunkedReader,
b"5 \r\n01234\r\n" + b"0\r\n\r\n",
[Data(data=b"01234"), EndOfMessage()],
)
def test_ContentLengthWriter() -> None:
w = ContentLengthWriter(5)
assert dowrite(w, Data(data=b"123")) == b"123"
assert dowrite(w, Data(data=b"45")) == b"45"
assert dowrite(w, EndOfMessage()) == b""
w = ContentLengthWriter(5)
with pytest.raises(LocalProtocolError):
dowrite(w, Data(data=b"123456"))
w = ContentLengthWriter(5)
dowrite(w, Data(data=b"123"))
with pytest.raises(LocalProtocolError):
dowrite(w, Data(data=b"456"))
w = ContentLengthWriter(5)
dowrite(w, Data(data=b"123"))
with pytest.raises(LocalProtocolError):
dowrite(w, EndOfMessage())
w = ContentLengthWriter(5)
dowrite(w, Data(data=b"123")) == b"123"
dowrite(w, Data(data=b"45")) == b"45"
with pytest.raises(LocalProtocolError):
dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
def test_ChunkedWriter() -> None:
w = ChunkedWriter()
assert dowrite(w, Data(data=b"aaa")) == b"3\r\naaa\r\n"
assert dowrite(w, Data(data=b"a" * 20)) == b"14\r\n" + b"a" * 20 + b"\r\n"
assert dowrite(w, Data(data=b"")) == b""
assert dowrite(w, EndOfMessage()) == b"0\r\n\r\n"
assert (
dowrite(w, EndOfMessage(headers=[("Etag", "asdf"), ("a", "b")]))
== b"0\r\nEtag: asdf\r\na: b\r\n\r\n"
)
def test_Http10Writer() -> None:
w = Http10Writer()
assert dowrite(w, Data(data=b"1234")) == b"1234"
assert dowrite(w, EndOfMessage()) == b""
with pytest.raises(LocalProtocolError):
dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
def test_reject_garbage_after_request_line() -> None:
with pytest.raises(LocalProtocolError):
tr(READERS[SERVER, SEND_RESPONSE], b"HTTP/1.0 200 OK\x00xxxx\r\n\r\n", None)
def test_reject_garbage_after_response_line() -> None:
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1 xxxxxx\r\n" b"Host: a\r\n\r\n",
None,
)
def test_reject_garbage_in_header_line() -> None:
with pytest.raises(LocalProtocolError):
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n" b"Host: foo\x00bar\r\n\r\n",
None,
)
def test_reject_non_vchar_in_path() -> None:
for bad_char in b"\x00\x20\x7f\xee":
message = bytearray(b"HEAD /")
message.append(bad_char)
message.extend(b" HTTP/1.1\r\nHost: foobar\r\n\r\n")
with pytest.raises(LocalProtocolError):
tr(READERS[CLIENT, IDLE], message, None)
# https://github.com/python-hyper/h11/issues/57
def test_allow_some_garbage_in_cookies() -> None:
tr(
READERS[CLIENT, IDLE],
b"HEAD /foo HTTP/1.1\r\n"
b"Host: foo\r\n"
b"Set-Cookie: ___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900\r\n"
b"\r\n",
Request(
method="HEAD",
target="/foo",
headers=[
("Host", "foo"),
("Set-Cookie", "___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900"),
],
),
)
def test_host_comes_first() -> None:
tw(
write_headers,
normalize_and_validate([("foo", "bar"), ("Host", "example.com")]),
b"Host: example.com\r\nfoo: bar\r\n\r\n",
)
| 16,386 | Python | 27.598604 | 104 | 0.553704 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_events.py | from http import HTTPStatus
import pytest
from .. import _events
from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .._util import LocalProtocolError
def test_events() -> None:
with pytest.raises(LocalProtocolError):
# Missing Host:
req = Request(
method="GET", target="/", headers=[("a", "b")], http_version="1.1"
)
# But this is okay (HTTP/1.0)
req = Request(method="GET", target="/", headers=[("a", "b")], http_version="1.0")
# fields are normalized
assert req.method == b"GET"
assert req.target == b"/"
assert req.headers == [(b"a", b"b")]
assert req.http_version == b"1.0"
# This is also okay -- has a Host (with weird capitalization, which is ok)
req = Request(
method="GET",
target="/",
headers=[("a", "b"), ("hOSt", "example.com")],
http_version="1.1",
)
# we normalize header capitalization
assert req.headers == [(b"a", b"b"), (b"host", b"example.com")]
# Multiple host is bad too
with pytest.raises(LocalProtocolError):
req = Request(
method="GET",
target="/",
headers=[("Host", "a"), ("Host", "a")],
http_version="1.1",
)
# Even for HTTP/1.0
with pytest.raises(LocalProtocolError):
req = Request(
method="GET",
target="/",
headers=[("Host", "a"), ("Host", "a")],
http_version="1.0",
)
# Header values are validated
for bad_char in "\x00\r\n\f\v":
with pytest.raises(LocalProtocolError):
req = Request(
method="GET",
target="/",
headers=[("Host", "a"), ("Foo", "asd" + bad_char)],
http_version="1.0",
)
# But for compatibility we allow non-whitespace control characters, even
# though they're forbidden by the spec.
Request(
method="GET",
target="/",
headers=[("Host", "a"), ("Foo", "asd\x01\x02\x7f")],
http_version="1.0",
)
# Request target is validated
for bad_byte in b"\x00\x20\x7f\xee":
target = bytearray(b"/")
target.append(bad_byte)
with pytest.raises(LocalProtocolError):
Request(
method="GET", target=target, headers=[("Host", "a")], http_version="1.1"
)
# Request method is validated
with pytest.raises(LocalProtocolError):
Request(
method="GET / HTTP/1.1",
target=target,
headers=[("Host", "a")],
http_version="1.1",
)
ir = InformationalResponse(status_code=100, headers=[("Host", "a")])
assert ir.status_code == 100
assert ir.headers == [(b"host", b"a")]
assert ir.http_version == b"1.1"
with pytest.raises(LocalProtocolError):
InformationalResponse(status_code=200, headers=[("Host", "a")])
resp = Response(status_code=204, headers=[], http_version="1.0") # type: ignore[arg-type]
assert resp.status_code == 204
assert resp.headers == []
assert resp.http_version == b"1.0"
with pytest.raises(LocalProtocolError):
resp = Response(status_code=100, headers=[], http_version="1.0") # type: ignore[arg-type]
with pytest.raises(LocalProtocolError):
Response(status_code="100", headers=[], http_version="1.0") # type: ignore[arg-type]
with pytest.raises(LocalProtocolError):
InformationalResponse(status_code=b"100", headers=[], http_version="1.0") # type: ignore[arg-type]
d = Data(data=b"asdf")
assert d.data == b"asdf"
eom = EndOfMessage()
assert eom.headers == []
cc = ConnectionClosed()
assert repr(cc) == "ConnectionClosed()"
def test_intenum_status_code() -> None:
# https://github.com/python-hyper/h11/issues/72
r = Response(status_code=HTTPStatus.OK, headers=[], http_version="1.0") # type: ignore[arg-type]
assert r.status_code == HTTPStatus.OK
assert type(r.status_code) is not type(HTTPStatus.OK)
assert type(r.status_code) is int
def test_header_casing() -> None:
r = Request(
method="GET",
target="/",
headers=[("Host", "example.org"), ("Connection", "keep-alive")],
http_version="1.1",
)
assert len(r.headers) == 2
assert r.headers[0] == (b"host", b"example.org")
assert r.headers == [(b"host", b"example.org"), (b"connection", b"keep-alive")]
assert r.headers.raw_items() == [
(b"Host", b"example.org"),
(b"Connection", b"keep-alive"),
]
| 4,657 | Python | 29.847682 | 107 | 0.563668 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_against_stdlib_http.py | import json
import os.path
import socket
import socketserver
import threading
from contextlib import closing, contextmanager
from http.server import SimpleHTTPRequestHandler
from typing import Callable, Generator
from urllib.request import urlopen
import h11
@contextmanager
def socket_server(
handler: Callable[..., socketserver.BaseRequestHandler]
) -> Generator[socketserver.TCPServer, None, None]:
httpd = socketserver.TCPServer(("127.0.0.1", 0), handler)
thread = threading.Thread(
target=httpd.serve_forever, kwargs={"poll_interval": 0.01}
)
thread.daemon = True
try:
thread.start()
yield httpd
finally:
httpd.shutdown()
test_file_path = os.path.join(os.path.dirname(__file__), "data/test-file")
with open(test_file_path, "rb") as f:
test_file_data = f.read()
class SingleMindedRequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path: str) -> str:
return test_file_path
def test_h11_as_client() -> None:
with socket_server(SingleMindedRequestHandler) as httpd:
with closing(socket.create_connection(httpd.server_address)) as s:
c = h11.Connection(h11.CLIENT)
s.sendall(
c.send( # type: ignore[arg-type]
h11.Request(
method="GET", target="/foo", headers=[("Host", "localhost")]
)
)
)
s.sendall(c.send(h11.EndOfMessage())) # type: ignore[arg-type]
data = bytearray()
while True:
event = c.next_event()
print(event)
if event is h11.NEED_DATA:
# Use a small read buffer to make things more challenging
# and exercise more paths :-)
c.receive_data(s.recv(10))
continue
if type(event) is h11.Response:
assert event.status_code == 200
if type(event) is h11.Data:
data += event.data
if type(event) is h11.EndOfMessage:
break
assert bytes(data) == test_file_data
class H11RequestHandler(socketserver.BaseRequestHandler):
def handle(self) -> None:
with closing(self.request) as s:
c = h11.Connection(h11.SERVER)
request = None
while True:
event = c.next_event()
if event is h11.NEED_DATA:
# Use a small read buffer to make things more challenging
# and exercise more paths :-)
c.receive_data(s.recv(10))
continue
if type(event) is h11.Request:
request = event
if type(event) is h11.EndOfMessage:
break
assert request is not None
info = json.dumps(
{
"method": request.method.decode("ascii"),
"target": request.target.decode("ascii"),
"headers": {
name.decode("ascii"): value.decode("ascii")
for (name, value) in request.headers
},
}
)
s.sendall(c.send(h11.Response(status_code=200, headers=[]))) # type: ignore[arg-type]
s.sendall(c.send(h11.Data(data=info.encode("ascii"))))
s.sendall(c.send(h11.EndOfMessage()))
def test_h11_as_server() -> None:
with socket_server(H11RequestHandler) as httpd:
host, port = httpd.server_address
url = "http://{}:{}/some-path".format(host, port)
with closing(urlopen(url)) as f:
assert f.getcode() == 200
data = f.read()
info = json.loads(data.decode("ascii"))
print(info)
assert info["method"] == "GET"
assert info["target"] == "/some-path"
assert "urllib" in info["headers"]["user-agent"]
| 3,995 | Python | 33.448276 | 98 | 0.54368 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_util.py | import re
import sys
import traceback
from typing import NoReturn
import pytest
from .._util import (
bytesify,
LocalProtocolError,
ProtocolError,
RemoteProtocolError,
Sentinel,
validate,
)
def test_ProtocolError() -> None:
with pytest.raises(TypeError):
ProtocolError("abstract base class")
def test_LocalProtocolError() -> None:
try:
raise LocalProtocolError("foo")
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 400
try:
raise LocalProtocolError("foo", error_status_hint=418)
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 418
def thunk() -> NoReturn:
raise LocalProtocolError("a", error_status_hint=420)
try:
try:
thunk()
except LocalProtocolError as exc1:
orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
exc1._reraise_as_remote_protocol_error()
except RemoteProtocolError as exc2:
assert type(exc2) is RemoteProtocolError
assert exc2.args == ("a",)
assert exc2.error_status_hint == 420
new_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
assert new_traceback.endswith(orig_traceback)
def test_validate() -> None:
my_re = re.compile(rb"(?P<group1>[0-9]+)\.(?P<group2>[0-9]+)")
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.")
groups = validate(my_re, b"0.1")
assert groups == {"group1": b"0", "group2": b"1"}
# successful partial matches are an error - must match whole string
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.1xx")
with pytest.raises(LocalProtocolError):
validate(my_re, b"0.1\n")
def test_validate_formatting() -> None:
my_re = re.compile(rb"foo")
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops")
assert "oops" in str(excinfo.value)
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {}")
assert "oops {}" in str(excinfo.value)
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {} xx", 10)
assert "oops 10 xx" in str(excinfo.value)
def test_make_sentinel() -> None:
class S(Sentinel, metaclass=Sentinel):
pass
assert repr(S) == "S"
assert S == S
assert type(S).__name__ == "S"
assert S in {S}
assert type(S) is S
class S2(Sentinel, metaclass=Sentinel):
pass
assert repr(S2) == "S2"
assert S != S2
assert S not in {S2}
assert type(S) is not type(S2)
def test_bytesify() -> None:
assert bytesify(b"123") == b"123"
assert bytesify(bytearray(b"123")) == b"123"
assert bytesify("123") == b"123"
with pytest.raises(UnicodeEncodeError):
bytesify("\u1234")
with pytest.raises(TypeError):
bytesify(10)
| 2,970 | Python | 25.292035 | 76 | 0.621549 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_receivebuffer.py | import re
from typing import Tuple
import pytest
from .._receivebuffer import ReceiveBuffer
def test_receivebuffer() -> None:
b = ReceiveBuffer()
assert not b
assert len(b) == 0
assert bytes(b) == b""
b += b"123"
assert b
assert len(b) == 3
assert bytes(b) == b"123"
assert bytes(b) == b"123"
assert b.maybe_extract_at_most(2) == b"12"
assert b
assert len(b) == 1
assert bytes(b) == b"3"
assert bytes(b) == b"3"
assert b.maybe_extract_at_most(10) == b"3"
assert bytes(b) == b""
assert b.maybe_extract_at_most(10) is None
assert not b
################################################################
# maybe_extract_until_next
################################################################
b += b"123\n456\r\n789\r\n"
assert b.maybe_extract_next_line() == b"123\n456\r\n"
assert bytes(b) == b"789\r\n"
assert b.maybe_extract_next_line() == b"789\r\n"
assert bytes(b) == b""
b += b"12\r"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b"12\r"
b += b"345\n\r"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b"12\r345\n\r"
# here we stopped at the middle of b"\r\n" delimiter
b += b"\n6789aaa123\r\n"
assert b.maybe_extract_next_line() == b"12\r345\n\r\n"
assert b.maybe_extract_next_line() == b"6789aaa123\r\n"
assert b.maybe_extract_next_line() is None
assert bytes(b) == b""
################################################################
# maybe_extract_lines
################################################################
b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing"
lines = b.maybe_extract_lines()
assert lines == [b"123", b"a: b", b"foo:bar"]
assert bytes(b) == b"trailing"
assert b.maybe_extract_lines() is None
b += b"\r\n\r"
assert b.maybe_extract_lines() is None
assert b.maybe_extract_at_most(100) == b"trailing\r\n\r"
assert not b
# Empty body case (as happens at the end of chunked encoding if there are
# no trailing headers, e.g.)
b += b"\r\ntrailing"
assert b.maybe_extract_lines() == []
assert bytes(b) == b"trailing"
@pytest.mark.parametrize(
"data",
[
pytest.param(
(
b"HTTP/1.1 200 OK\r\n",
b"Content-type: text/plain\r\n",
b"Connection: close\r\n",
b"\r\n",
b"Some body",
),
id="with_crlf_delimiter",
),
pytest.param(
(
b"HTTP/1.1 200 OK\n",
b"Content-type: text/plain\n",
b"Connection: close\n",
b"\n",
b"Some body",
),
id="with_lf_only_delimiter",
),
pytest.param(
(
b"HTTP/1.1 200 OK\n",
b"Content-type: text/plain\r\n",
b"Connection: close\n",
b"\n",
b"Some body",
),
id="with_mixed_crlf_and_lf",
),
],
)
def test_receivebuffer_for_invalid_delimiter(data: Tuple[bytes]) -> None:
b = ReceiveBuffer()
for line in data:
b += line
lines = b.maybe_extract_lines()
assert lines == [
b"HTTP/1.1 200 OK",
b"Content-type: text/plain",
b"Connection: close",
]
assert bytes(b) == b"Some body"
| 3,454 | Python | 24.404412 | 77 | 0.480892 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/h11/tests/test_helpers.py | from .._events import (
ConnectionClosed,
Data,
EndOfMessage,
Event,
InformationalResponse,
Request,
Response,
)
from .helpers import normalize_data_events
def test_normalize_data_events() -> None:
assert normalize_data_events(
[
Data(data=bytearray(b"1")),
Data(data=b"2"),
Response(status_code=200, headers=[]), # type: ignore[arg-type]
Data(data=b"3"),
Data(data=b"4"),
EndOfMessage(),
Data(data=b"5"),
Data(data=b"6"),
Data(data=b"7"),
]
) == [
Data(data=b"12"),
Response(status_code=200, headers=[]), # type: ignore[arg-type]
Data(data=b"34"),
EndOfMessage(),
Data(data=b"567"),
]
| 794 | Python | 23.090908 | 76 | 0.512594 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/util.py | import math
import re
from typing import List
from qrcode import LUT, base, exceptions
from qrcode.base import RSBlock
# QR encoding modes.
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
# Encoding mode sizes.
MODE_SIZE_SMALL = {
MODE_NUMBER: 10,
MODE_ALPHA_NUM: 9,
MODE_8BIT_BYTE: 8,
MODE_KANJI: 8,
}
MODE_SIZE_MEDIUM = {
MODE_NUMBER: 12,
MODE_ALPHA_NUM: 11,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 10,
}
MODE_SIZE_LARGE = {
MODE_NUMBER: 14,
MODE_ALPHA_NUM: 13,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 12,
}
ALPHA_NUM = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:"
RE_ALPHA_NUM = re.compile(b"^[" + re.escape(ALPHA_NUM) + rb"]*\Z")
# The number of bits for numeric delimited data lengths.
NUMBER_LENGTH = {3: 10, 2: 7, 1: 4}
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170],
]
G15 = (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | (1 << 0)
G18 = (
(1 << 12)
| (1 << 11)
| (1 << 10)
| (1 << 9)
| (1 << 8)
| (1 << 5)
| (1 << 2)
| (1 << 0)
)
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
PAD0 = 0xEC
PAD1 = 0x11
# Precompute bit count limits, indexed by error correction level and code size
def _data_count(block):
return block.data_count
BIT_LIMIT_TABLE = [
[0]
+ [
8 * sum(map(_data_count, base.rs_blocks(version, error_correction)))
for version in range(1, 41)
]
for error_correction in range(4)
]
def BCH_type_info(data):
d = data << 10
while BCH_digit(d) - BCH_digit(G15) >= 0:
d ^= G15 << (BCH_digit(d) - BCH_digit(G15))
return ((data << 10) | d) ^ G15_MASK
def BCH_type_number(data):
d = data << 12
while BCH_digit(d) - BCH_digit(G18) >= 0:
d ^= G18 << (BCH_digit(d) - BCH_digit(G18))
return (data << 12) | d
def BCH_digit(data):
digit = 0
while data != 0:
digit += 1
data >>= 1
return digit
def pattern_position(version):
return PATTERN_POSITION_TABLE[version - 1]
def mask_func(pattern):
"""
Return the mask function for the given mask pattern.
"""
if pattern == 0: # 000
return lambda i, j: (i + j) % 2 == 0
if pattern == 1: # 001
return lambda i, j: i % 2 == 0
if pattern == 2: # 010
return lambda i, j: j % 3 == 0
if pattern == 3: # 011
return lambda i, j: (i + j) % 3 == 0
if pattern == 4: # 100
return lambda i, j: (math.floor(i / 2) + math.floor(j / 3)) % 2 == 0
if pattern == 5: # 101
return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0
if pattern == 6: # 110
return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0
if pattern == 7: # 111
return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0
raise TypeError("Bad mask pattern: " + pattern) # pragma: no cover
def mode_sizes_for_version(version):
if version < 10:
return MODE_SIZE_SMALL
elif version < 27:
return MODE_SIZE_MEDIUM
else:
return MODE_SIZE_LARGE
def length_in_bits(mode, version):
if mode not in (MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE, MODE_KANJI):
raise TypeError(f"Invalid mode ({mode})") # pragma: no cover
check_version(version)
return mode_sizes_for_version(version)[mode]
def check_version(version):
if version < 1 or version > 40:
raise ValueError(f"Invalid version (was {version}, expected 1 to 40)")
def lost_point(modules):
modules_count = len(modules)
lost_point = 0
lost_point = _lost_point_level1(modules, modules_count)
lost_point += _lost_point_level2(modules, modules_count)
lost_point += _lost_point_level3(modules, modules_count)
lost_point += _lost_point_level4(modules, modules_count)
return lost_point
def _lost_point_level1(modules, modules_count):
lost_point = 0
modules_range = range(modules_count)
container = [0] * (modules_count + 1)
for row in modules_range:
this_row = modules[row]
previous_color = this_row[0]
length = 0
for col in modules_range:
if this_row[col] == previous_color:
length += 1
else:
if length >= 5:
container[length] += 1
length = 1
previous_color = this_row[col]
if length >= 5:
container[length] += 1
for col in modules_range:
previous_color = modules[0][col]
length = 0
for row in modules_range:
if modules[row][col] == previous_color:
length += 1
else:
if length >= 5:
container[length] += 1
length = 1
previous_color = modules[row][col]
if length >= 5:
container[length] += 1
lost_point += sum(
container[each_length] * (each_length - 2)
for each_length in range(5, modules_count + 1)
)
return lost_point
def _lost_point_level2(modules, modules_count):
lost_point = 0
modules_range = range(modules_count - 1)
for row in modules_range:
this_row = modules[row]
next_row = modules[row + 1]
# use iter() and next() to skip next four-block. e.g.
# d a f if top-right a != b bottom-right,
# c b e then both abcd and abef won't lost any point.
modules_range_iter = iter(modules_range)
for col in modules_range_iter:
top_right = this_row[col + 1]
if top_right != next_row[col + 1]:
# reduce 33.3% of runtime via next().
# None: raise nothing if there is no next item.
next(modules_range_iter, None)
elif top_right != this_row[col]:
continue
elif top_right != next_row[col]:
continue
else:
lost_point += 3
return lost_point
def _lost_point_level3(modules, modules_count):
# 1 : 1 : 3 : 1 : 1 ratio (dark:light:dark:light:dark) pattern in
# row/column, preceded or followed by light area 4 modules wide. From ISOIEC.
# pattern1: 10111010000
# pattern2: 00001011101
modules_range = range(modules_count)
modules_range_short = range(modules_count - 10)
lost_point = 0
for row in modules_range:
this_row = modules[row]
modules_range_short_iter = iter(modules_range_short)
col = 0
for col in modules_range_short_iter:
if (
not this_row[col + 1]
and this_row[col + 4]
and not this_row[col + 5]
and this_row[col + 6]
and not this_row[col + 9]
and (
this_row[col + 0]
and this_row[col + 2]
and this_row[col + 3]
and not this_row[col + 7]
and not this_row[col + 8]
and not this_row[col + 10]
or not this_row[col + 0]
and not this_row[col + 2]
and not this_row[col + 3]
and this_row[col + 7]
and this_row[col + 8]
and this_row[col + 10]
)
):
lost_point += 40
# horspool algorithm.
# if this_row[col + 10]:
# pattern1 shift 4, pattern2 shift 2. So min=2.
# else:
# pattern1 shift 1, pattern2 shift 1. So min=1.
if this_row[col + 10]:
next(modules_range_short_iter, None)
for col in modules_range:
modules_range_short_iter = iter(modules_range_short)
row = 0
for row in modules_range_short_iter:
if (
not modules[row + 1][col]
and modules[row + 4][col]
and not modules[row + 5][col]
and modules[row + 6][col]
and not modules[row + 9][col]
and (
modules[row + 0][col]
and modules[row + 2][col]
and modules[row + 3][col]
and not modules[row + 7][col]
and not modules[row + 8][col]
and not modules[row + 10][col]
or not modules[row + 0][col]
and not modules[row + 2][col]
and not modules[row + 3][col]
and modules[row + 7][col]
and modules[row + 8][col]
and modules[row + 10][col]
)
):
lost_point += 40
if modules[row + 10][col]:
next(modules_range_short_iter, None)
return lost_point
def _lost_point_level4(modules, modules_count):
dark_count = sum(map(sum, modules))
percent = float(dark_count) / (modules_count**2)
# Every 5% departure from 50%, rating++
rating = int(abs(percent * 100 - 50) / 5)
return rating * 10
def optimal_data_chunks(data, minimum=4):
"""
An iterator returning QRData chunks optimized to the data content.
:param minimum: The minimum number of bytes in a row to split as a chunk.
"""
data = to_bytestring(data)
num_pattern = rb"\d"
alpha_pattern = b"[" + re.escape(ALPHA_NUM) + b"]"
if len(data) <= minimum:
num_pattern = re.compile(b"^" + num_pattern + b"+$")
alpha_pattern = re.compile(b"^" + alpha_pattern + b"+$")
else:
re_repeat = b"{" + str(minimum).encode("ascii") + b",}"
num_pattern = re.compile(num_pattern + re_repeat)
alpha_pattern = re.compile(alpha_pattern + re_repeat)
num_bits = _optimal_split(data, num_pattern)
for is_num, chunk in num_bits:
if is_num:
yield QRData(chunk, mode=MODE_NUMBER, check_data=False)
else:
for is_alpha, sub_chunk in _optimal_split(chunk, alpha_pattern):
mode = MODE_ALPHA_NUM if is_alpha else MODE_8BIT_BYTE
yield QRData(sub_chunk, mode=mode, check_data=False)
def _optimal_split(data, pattern):
while data:
match = re.search(pattern, data)
if not match:
break
start, end = match.start(), match.end()
if start:
yield False, data[:start]
yield True, data[start:end]
data = data[end:]
if data:
yield False, data
def to_bytestring(data):
"""
Convert data to a (utf-8 encoded) byte-string if it isn't a byte-string
already.
"""
if not isinstance(data, bytes):
data = str(data).encode("utf-8")
return data
def optimal_mode(data):
"""
Calculate the optimal mode for this chunk of data.
"""
if data.isdigit():
return MODE_NUMBER
if RE_ALPHA_NUM.match(data):
return MODE_ALPHA_NUM
return MODE_8BIT_BYTE
class QRData:
"""
Data held in a QR compatible format.
Doesn't currently handle KANJI.
"""
def __init__(self, data, mode=None, check_data=True):
"""
If ``mode`` isn't provided, the most compact QR data type possible is
chosen.
"""
if check_data:
data = to_bytestring(data)
if mode is None:
self.mode = optimal_mode(data)
else:
self.mode = mode
if mode not in (MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE):
raise TypeError(f"Invalid mode ({mode})") # pragma: no cover
if check_data and mode < optimal_mode(data): # pragma: no cover
raise ValueError(f"Provided data can not be represented in mode {mode}")
self.data = data
def __len__(self):
return len(self.data)
def write(self, buffer):
if self.mode == MODE_NUMBER:
for i in range(0, len(self.data), 3):
chars = self.data[i : i + 3]
bit_length = NUMBER_LENGTH[len(chars)]
buffer.put(int(chars), bit_length)
elif self.mode == MODE_ALPHA_NUM:
for i in range(0, len(self.data), 2):
chars = self.data[i : i + 2]
if len(chars) > 1:
buffer.put(
ALPHA_NUM.find(chars[0]) * 45 + ALPHA_NUM.find(chars[1]), 11
)
else:
buffer.put(ALPHA_NUM.find(chars), 6)
else:
# Iterating a bytestring in Python 3 returns an integer,
# no need to ord().
data = self.data
for c in data:
buffer.put(c, 8)
def __repr__(self):
return repr(self.data)
class BitBuffer:
def __init__(self):
self.buffer: List[int] = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
buf_index = math.floor(index / 8)
return ((self.buffer[buf_index] >> (7 - index % 8)) & 1) == 1
def put(self, num, length):
for i in range(length):
self.put_bit(((num >> (length - i - 1)) & 1) == 1)
def __len__(self):
return self.length
def put_bit(self, bit):
buf_index = self.length // 8
if len(self.buffer) <= buf_index:
self.buffer.append(0)
if bit:
self.buffer[buf_index] |= 0x80 >> (self.length % 8)
self.length += 1
def create_bytes(buffer: BitBuffer, rs_blocks: List[RSBlock]):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata: List[List[int]] = []
ecdata: List[List[int]] = []
for rs_block in rs_blocks:
dcCount = rs_block.data_count
ecCount = rs_block.total_count - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
current_dc = [0xFF & buffer.buffer[i + offset] for i in range(dcCount)]
offset += dcCount
# Get error correction polynomial.
if ecCount in LUT.rsPoly_LUT:
rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0)
else:
rsPoly = base.Polynomial([1], 0)
for i in range(ecCount):
rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0)
rawPoly = base.Polynomial(current_dc, len(rsPoly) - 1)
modPoly = rawPoly % rsPoly
current_ec = []
mod_offset = len(modPoly) - ecCount
for i in range(ecCount):
modIndex = i + mod_offset
current_ec.append(modPoly[modIndex] if (modIndex >= 0) else 0)
dcdata.append(current_dc)
ecdata.append(current_ec)
data = []
for i in range(maxDcCount):
for dc in dcdata:
if i < len(dc):
data.append(dc[i])
for i in range(maxEcCount):
for ec in ecdata:
if i < len(ec):
data.append(ec[i])
return data
def create_data(version, error_correction, data_list):
buffer = BitBuffer()
for data in data_list:
buffer.put(data.mode, 4)
buffer.put(len(data), length_in_bits(data.mode, version))
data.write(buffer)
# Calculate the maximum number of bits for the given version.
rs_blocks = base.rs_blocks(version, error_correction)
bit_limit = sum(block.data_count * 8 for block in rs_blocks)
if len(buffer) > bit_limit:
raise exceptions.DataOverflowError(
"Code length overflow. Data size (%s) > size available (%s)"
% (len(buffer), bit_limit)
)
# Terminate the bits (add up to four 0s).
for _ in range(min(bit_limit - len(buffer), 4)):
buffer.put_bit(False)
# Delimit the string into 8-bit words, padding with 0s if necessary.
delimit = len(buffer) % 8
if delimit:
for _ in range(8 - delimit):
buffer.put_bit(False)
# Add special alternating padding bitstrings until buffer is full.
bytes_to_fill = (bit_limit - len(buffer)) // 8
for i in range(bytes_to_fill):
if i % 2 == 0:
buffer.put(PAD0, 8)
else:
buffer.put(PAD1, 8)
return create_bytes(buffer, rs_blocks)
| 17,128 | Python | 28.180579 | 88 | 0.522128 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/main.py | import sys
from bisect import bisect_left
from typing import (
Dict,
Generic,
List,
NamedTuple,
Optional,
Type,
TypeVar,
cast,
overload,
)
from typing_extensions import Literal
from qrcode import constants, exceptions, util
from qrcode.image.base import BaseImage
from qrcode.image.pure import PyPNGImage
ModulesType = List[List[Optional[bool]]]
# Cache modules generated just based on the QR Code version
precomputed_qr_blanks: Dict[int, ModulesType] = {}
def make(data=None, **kwargs):
qr = QRCode(**kwargs)
qr.add_data(data)
return qr.make_image()
def _check_box_size(size):
if int(size) <= 0:
raise ValueError(f"Invalid box size (was {size}, expected larger than 0)")
def _check_border(size):
if int(size) < 0:
raise ValueError(
"Invalid border value (was %s, expected 0 or larger than that)" % size
)
def _check_mask_pattern(mask_pattern):
if mask_pattern is None:
return
if not isinstance(mask_pattern, int):
raise TypeError(
f"Invalid mask pattern (was {type(mask_pattern)}, expected int)"
)
if mask_pattern < 0 or mask_pattern > 7:
raise ValueError(f"Mask pattern should be in range(8) (got {mask_pattern})")
def copy_2d_array(x):
return [row[:] for row in x]
class ActiveWithNeighbors(NamedTuple):
NW: bool
N: bool
NE: bool
W: bool
me: bool
E: bool
SW: bool
S: bool
SE: bool
def __bool__(self) -> bool:
return self.me
GenericImage = TypeVar("GenericImage", bound=BaseImage)
GenericImageLocal = TypeVar("GenericImageLocal", bound=BaseImage)
class QRCode(Generic[GenericImage]):
modules: ModulesType
_version: Optional[int] = None
def __init__(
self,
version=None,
error_correction=constants.ERROR_CORRECT_M,
box_size=10,
border=4,
image_factory: Optional[Type[GenericImage]] = None,
mask_pattern=None,
):
_check_box_size(box_size)
_check_border(border)
self.version = version
self.error_correction = int(error_correction)
self.box_size = int(box_size)
# Spec says border should be at least four boxes wide, but allow for
# any (e.g. for producing printable QR codes).
self.border = int(border)
self.mask_pattern = mask_pattern
self.image_factory = image_factory
if image_factory is not None:
assert issubclass(image_factory, BaseImage)
self.clear()
@property
def version(self) -> int:
if self._version is None:
self.best_fit()
return cast(int, self._version)
@version.setter
def version(self, value) -> None:
if value is not None:
value = int(value)
util.check_version(value)
self._version = value
@property
def mask_pattern(self):
return self._mask_pattern
@mask_pattern.setter
def mask_pattern(self, pattern):
_check_mask_pattern(pattern)
self._mask_pattern = pattern
def clear(self):
"""
Reset the internal data.
"""
self.modules = [[]]
self.modules_count = 0
self.data_cache = None
self.data_list = []
def add_data(self, data, optimize=20):
"""
Add data to this QR Code.
:param optimize: Data will be split into multiple chunks to optimize
the QR size by finding to more compressed modes of at least this
length. Set to ``0`` to avoid optimizing at all.
"""
if isinstance(data, util.QRData):
self.data_list.append(data)
elif optimize:
self.data_list.extend(util.optimal_data_chunks(data, minimum=optimize))
else:
self.data_list.append(util.QRData(data))
self.data_cache = None
def make(self, fit=True):
"""
Compile the data into a QR Code array.
:param fit: If ``True`` (or if a size has not been provided), find the
best fit for the data to avoid data overflow errors.
"""
if fit or (self.version is None):
self.best_fit(start=self.version)
if self.mask_pattern is None:
self.makeImpl(False, self.best_mask_pattern())
else:
self.makeImpl(False, self.mask_pattern)
def makeImpl(self, test, mask_pattern):
self.modules_count = self.version * 4 + 17
if self.version in precomputed_qr_blanks:
self.modules = copy_2d_array(precomputed_qr_blanks[self.version])
else:
self.modules = [
[None] * self.modules_count for i in range(self.modules_count)
]
self.setup_position_probe_pattern(0, 0)
self.setup_position_probe_pattern(self.modules_count - 7, 0)
self.setup_position_probe_pattern(0, self.modules_count - 7)
self.setup_position_adjust_pattern()
self.setup_timing_pattern()
precomputed_qr_blanks[self.version] = copy_2d_array(self.modules)
self.setup_type_info(test, mask_pattern)
if self.version >= 7:
self.setup_type_number(test)
if self.data_cache is None:
self.data_cache = util.create_data(
self.version, self.error_correction, self.data_list
)
self.map_data(self.data_cache, mask_pattern)
def setup_position_probe_pattern(self, row, col):
for r in range(-1, 8):
if row + r <= -1 or self.modules_count <= row + r:
continue
for c in range(-1, 8):
if col + c <= -1 or self.modules_count <= col + c:
continue
if (
(0 <= r <= 6 and c in {0, 6})
or (0 <= c <= 6 and r in {0, 6})
or (2 <= r <= 4 and 2 <= c <= 4)
):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def best_fit(self, start=None):
"""
Find the minimum size required to fit in the data.
"""
if start is None:
start = 1
util.check_version(start)
# Corresponds to the code in util.create_data, except we don't yet know
# version, so optimistically assume start and check later
mode_sizes = util.mode_sizes_for_version(start)
buffer = util.BitBuffer()
for data in self.data_list:
buffer.put(data.mode, 4)
buffer.put(len(data), mode_sizes[data.mode])
data.write(buffer)
needed_bits = len(buffer)
self.version = bisect_left(
util.BIT_LIMIT_TABLE[self.error_correction], needed_bits, start
)
if self.version == 41:
raise exceptions.DataOverflowError()
# Now check whether we need more bits for the mode sizes, recursing if
# our guess was too low
if mode_sizes is not util.mode_sizes_for_version(self.version):
self.best_fit(start=self.version)
return self.version
def best_mask_pattern(self):
"""
Find the most efficient mask pattern.
"""
min_lost_point = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i)
lost_point = util.lost_point(self.modules)
if i == 0 or min_lost_point > lost_point:
min_lost_point = lost_point
pattern = i
return pattern
def print_tty(self, out=None):
"""
Output the QR Code only using TTY colors.
If the data has not been compiled yet, make it first.
"""
if out is None:
import sys
out = sys.stdout
if not out.isatty():
raise OSError("Not a tty")
if self.data_cache is None:
self.make()
modcount = self.modules_count
out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n")
for r in range(modcount):
out.write("\x1b[1;47m \x1b[40m")
for c in range(modcount):
if self.modules[r][c]:
out.write(" ")
else:
out.write("\x1b[1;47m \x1b[40m")
out.write("\x1b[1;47m \x1b[0m\n")
out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n")
out.flush()
def print_ascii(self, out=None, tty=False, invert=False):
"""
Output the QR Code using ASCII characters.
:param tty: use fixed TTY color codes (forces invert=True)
:param invert: invert the ASCII characters (solid <-> transparent)
"""
if out is None:
out = sys.stdout
if tty and not out.isatty():
raise OSError("Not a tty")
if self.data_cache is None:
self.make()
modcount = self.modules_count
codes = [bytes((code,)).decode("cp437") for code in (255, 223, 220, 219)]
if tty:
invert = True
if invert:
codes.reverse()
def get_module(x, y) -> int:
if invert and self.border and max(x, y) >= modcount + self.border:
return 1
if min(x, y) < 0 or max(x, y) >= modcount:
return 0
return cast(int, self.modules[x][y])
for r in range(-self.border, modcount + self.border, 2):
if tty:
if not invert or r < modcount + self.border - 1:
out.write("\x1b[48;5;232m") # Background black
out.write("\x1b[38;5;255m") # Foreground white
for c in range(-self.border, modcount + self.border):
pos = get_module(r, c) + (get_module(r + 1, c) << 1)
out.write(codes[pos])
if tty:
out.write("\x1b[0m")
out.write("\n")
out.flush()
@overload
def make_image(self, image_factory: Literal[None] = None, **kwargs) -> GenericImage:
...
@overload
def make_image(
self, image_factory: Type[GenericImageLocal] = None, **kwargs
) -> GenericImageLocal:
...
def make_image(self, image_factory=None, **kwargs):
"""
Make an image from the QR Code data.
If the data has not been compiled yet, make it first.
"""
_check_box_size(self.box_size)
if self.data_cache is None:
self.make()
if image_factory is not None:
assert issubclass(image_factory, BaseImage)
else:
image_factory = self.image_factory
if image_factory is None:
from qrcode.image.pil import Image, PilImage
# Use PIL by default if available, otherwise use PyPNG.
image_factory = PilImage if Image else PyPNGImage
im = image_factory(
self.border,
self.modules_count,
self.box_size,
qrcode_modules=self.modules,
**kwargs,
)
if im.needs_drawrect:
for r in range(self.modules_count):
for c in range(self.modules_count):
if im.needs_context:
im.drawrect_context(r, c, qr=self)
elif self.modules[r][c]:
im.drawrect(r, c)
if im.needs_processing:
im.process()
return im
# return true if and only if (row, col) is in the module
def is_constrained(self, row: int, col: int) -> bool:
return (
row >= 0
and row < len(self.modules)
and col >= 0
and col < len(self.modules[row])
)
def setup_timing_pattern(self):
for r in range(8, self.modules_count - 8):
if self.modules[r][6] is not None:
continue
self.modules[r][6] = r % 2 == 0
for c in range(8, self.modules_count - 8):
if self.modules[6][c] is not None:
continue
self.modules[6][c] = c % 2 == 0
def setup_position_adjust_pattern(self):
pos = util.pattern_position(self.version)
for i in range(len(pos)):
row = pos[i]
for j in range(len(pos)):
col = pos[j]
if self.modules[row][col] is not None:
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (
r == -2
or r == 2
or c == -2
or c == 2
or (r == 0 and c == 0)
):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setup_type_number(self, test):
bits = util.BCH_type_number(self.version)
for i in range(18):
mod = not test and ((bits >> i) & 1) == 1
self.modules[i // 3][i % 3 + self.modules_count - 8 - 3] = mod
for i in range(18):
mod = not test and ((bits >> i) & 1) == 1
self.modules[i % 3 + self.modules_count - 8 - 3][i // 3] = mod
def setup_type_info(self, test, mask_pattern):
data = (self.error_correction << 3) | mask_pattern
bits = util.BCH_type_info(data)
# vertical
for i in range(15):
mod = not test and ((bits >> i) & 1) == 1
if i < 6:
self.modules[i][8] = mod
elif i < 8:
self.modules[i + 1][8] = mod
else:
self.modules[self.modules_count - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = not test and ((bits >> i) & 1) == 1
if i < 8:
self.modules[8][self.modules_count - i - 1] = mod
elif i < 9:
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.modules_count - 8][8] = not test
def map_data(self, data, mask_pattern):
inc = -1
row = self.modules_count - 1
bitIndex = 7
byteIndex = 0
mask_func = util.mask_func(mask_pattern)
data_len = len(data)
for col in range(self.modules_count - 1, 0, -2):
if col <= 6:
col -= 1
col_range = (col, col - 1)
while True:
for c in col_range:
if self.modules[row][c] is None:
dark = False
if byteIndex < data_len:
dark = ((data[byteIndex] >> bitIndex) & 1) == 1
if mask_func(row, c):
dark = not dark
self.modules[row][c] = dark
bitIndex -= 1
if bitIndex == -1:
byteIndex += 1
bitIndex = 7
row += inc
if row < 0 or self.modules_count <= row:
row -= inc
inc = -inc
break
def get_matrix(self):
"""
Return the QR Code as a multidimensional array, including the border.
To return the array without a border, set ``self.border`` to 0 first.
"""
if self.data_cache is None:
self.make()
if not self.border:
return self.modules
width = len(self.modules) + self.border * 2
code = [[False] * width] * self.border
x_border = [False] * self.border
for module in self.modules:
code.append(x_border + cast(List[bool], module) + x_border)
code += [[False] * width] * self.border
return code
def active_with_neighbors(self, row: int, col: int) -> ActiveWithNeighbors:
context: List[bool] = []
for r in range(row - 1, row + 2):
for c in range(col - 1, col + 2):
context.append(self.is_constrained(r, c) and bool(self.modules[r][c]))
return ActiveWithNeighbors(*context)
| 16,462 | Python | 29.041971 | 88 | 0.51464 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/exceptions.py | class DataOverflowError(Exception):
pass
| 45 | Python | 14.333329 | 35 | 0.777778 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/base.py | from typing import NamedTuple
from qrcode import constants
EXP_TABLE = list(range(256))
LOG_TABLE = list(range(256))
for i in range(8):
EXP_TABLE[i] = 1 << i
for i in range(8, 256):
EXP_TABLE[i] = (
EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^ EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8]
)
for i in range(255):
LOG_TABLE[EXP_TABLE[i]] = i
RS_BLOCK_OFFSET = {
constants.ERROR_CORRECT_L: 0,
constants.ERROR_CORRECT_M: 1,
constants.ERROR_CORRECT_Q: 2,
constants.ERROR_CORRECT_H: 3,
}
RS_BLOCK_TABLE = (
# L
# M
# Q
# H
# 1
(1, 26, 19),
(1, 26, 16),
(1, 26, 13),
(1, 26, 9),
# 2
(1, 44, 34),
(1, 44, 28),
(1, 44, 22),
(1, 44, 16),
# 3
(1, 70, 55),
(1, 70, 44),
(2, 35, 17),
(2, 35, 13),
# 4
(1, 100, 80),
(2, 50, 32),
(2, 50, 24),
(4, 25, 9),
# 5
(1, 134, 108),
(2, 67, 43),
(2, 33, 15, 2, 34, 16),
(2, 33, 11, 2, 34, 12),
# 6
(2, 86, 68),
(4, 43, 27),
(4, 43, 19),
(4, 43, 15),
# 7
(2, 98, 78),
(4, 49, 31),
(2, 32, 14, 4, 33, 15),
(4, 39, 13, 1, 40, 14),
# 8
(2, 121, 97),
(2, 60, 38, 2, 61, 39),
(4, 40, 18, 2, 41, 19),
(4, 40, 14, 2, 41, 15),
# 9
(2, 146, 116),
(3, 58, 36, 2, 59, 37),
(4, 36, 16, 4, 37, 17),
(4, 36, 12, 4, 37, 13),
# 10
(2, 86, 68, 2, 87, 69),
(4, 69, 43, 1, 70, 44),
(6, 43, 19, 2, 44, 20),
(6, 43, 15, 2, 44, 16),
# 11
(4, 101, 81),
(1, 80, 50, 4, 81, 51),
(4, 50, 22, 4, 51, 23),
(3, 36, 12, 8, 37, 13),
# 12
(2, 116, 92, 2, 117, 93),
(6, 58, 36, 2, 59, 37),
(4, 46, 20, 6, 47, 21),
(7, 42, 14, 4, 43, 15),
# 13
(4, 133, 107),
(8, 59, 37, 1, 60, 38),
(8, 44, 20, 4, 45, 21),
(12, 33, 11, 4, 34, 12),
# 14
(3, 145, 115, 1, 146, 116),
(4, 64, 40, 5, 65, 41),
(11, 36, 16, 5, 37, 17),
(11, 36, 12, 5, 37, 13),
# 15
(5, 109, 87, 1, 110, 88),
(5, 65, 41, 5, 66, 42),
(5, 54, 24, 7, 55, 25),
(11, 36, 12, 7, 37, 13),
# 16
(5, 122, 98, 1, 123, 99),
(7, 73, 45, 3, 74, 46),
(15, 43, 19, 2, 44, 20),
(3, 45, 15, 13, 46, 16),
# 17
(1, 135, 107, 5, 136, 108),
(10, 74, 46, 1, 75, 47),
(1, 50, 22, 15, 51, 23),
(2, 42, 14, 17, 43, 15),
# 18
(5, 150, 120, 1, 151, 121),
(9, 69, 43, 4, 70, 44),
(17, 50, 22, 1, 51, 23),
(2, 42, 14, 19, 43, 15),
# 19
(3, 141, 113, 4, 142, 114),
(3, 70, 44, 11, 71, 45),
(17, 47, 21, 4, 48, 22),
(9, 39, 13, 16, 40, 14),
# 20
(3, 135, 107, 5, 136, 108),
(3, 67, 41, 13, 68, 42),
(15, 54, 24, 5, 55, 25),
(15, 43, 15, 10, 44, 16),
# 21
(4, 144, 116, 4, 145, 117),
(17, 68, 42),
(17, 50, 22, 6, 51, 23),
(19, 46, 16, 6, 47, 17),
# 22
(2, 139, 111, 7, 140, 112),
(17, 74, 46),
(7, 54, 24, 16, 55, 25),
(34, 37, 13),
# 23
(4, 151, 121, 5, 152, 122),
(4, 75, 47, 14, 76, 48),
(11, 54, 24, 14, 55, 25),
(16, 45, 15, 14, 46, 16),
# 24
(6, 147, 117, 4, 148, 118),
(6, 73, 45, 14, 74, 46),
(11, 54, 24, 16, 55, 25),
(30, 46, 16, 2, 47, 17),
# 25
(8, 132, 106, 4, 133, 107),
(8, 75, 47, 13, 76, 48),
(7, 54, 24, 22, 55, 25),
(22, 45, 15, 13, 46, 16),
# 26
(10, 142, 114, 2, 143, 115),
(19, 74, 46, 4, 75, 47),
(28, 50, 22, 6, 51, 23),
(33, 46, 16, 4, 47, 17),
# 27
(8, 152, 122, 4, 153, 123),
(22, 73, 45, 3, 74, 46),
(8, 53, 23, 26, 54, 24),
(12, 45, 15, 28, 46, 16),
# 28
(3, 147, 117, 10, 148, 118),
(3, 73, 45, 23, 74, 46),
(4, 54, 24, 31, 55, 25),
(11, 45, 15, 31, 46, 16),
# 29
(7, 146, 116, 7, 147, 117),
(21, 73, 45, 7, 74, 46),
(1, 53, 23, 37, 54, 24),
(19, 45, 15, 26, 46, 16),
# 30
(5, 145, 115, 10, 146, 116),
(19, 75, 47, 10, 76, 48),
(15, 54, 24, 25, 55, 25),
(23, 45, 15, 25, 46, 16),
# 31
(13, 145, 115, 3, 146, 116),
(2, 74, 46, 29, 75, 47),
(42, 54, 24, 1, 55, 25),
(23, 45, 15, 28, 46, 16),
# 32
(17, 145, 115),
(10, 74, 46, 23, 75, 47),
(10, 54, 24, 35, 55, 25),
(19, 45, 15, 35, 46, 16),
# 33
(17, 145, 115, 1, 146, 116),
(14, 74, 46, 21, 75, 47),
(29, 54, 24, 19, 55, 25),
(11, 45, 15, 46, 46, 16),
# 34
(13, 145, 115, 6, 146, 116),
(14, 74, 46, 23, 75, 47),
(44, 54, 24, 7, 55, 25),
(59, 46, 16, 1, 47, 17),
# 35
(12, 151, 121, 7, 152, 122),
(12, 75, 47, 26, 76, 48),
(39, 54, 24, 14, 55, 25),
(22, 45, 15, 41, 46, 16),
# 36
(6, 151, 121, 14, 152, 122),
(6, 75, 47, 34, 76, 48),
(46, 54, 24, 10, 55, 25),
(2, 45, 15, 64, 46, 16),
# 37
(17, 152, 122, 4, 153, 123),
(29, 74, 46, 14, 75, 47),
(49, 54, 24, 10, 55, 25),
(24, 45, 15, 46, 46, 16),
# 38
(4, 152, 122, 18, 153, 123),
(13, 74, 46, 32, 75, 47),
(48, 54, 24, 14, 55, 25),
(42, 45, 15, 32, 46, 16),
# 39
(20, 147, 117, 4, 148, 118),
(40, 75, 47, 7, 76, 48),
(43, 54, 24, 22, 55, 25),
(10, 45, 15, 67, 46, 16),
# 40
(19, 148, 118, 6, 149, 119),
(18, 75, 47, 31, 76, 48),
(34, 54, 24, 34, 55, 25),
(20, 45, 15, 61, 46, 16),
)
def glog(n):
if n < 1: # pragma: no cover
raise ValueError(f"glog({n})")
return LOG_TABLE[n]
def gexp(n):
return EXP_TABLE[n % 255]
class Polynomial:
def __init__(self, num, shift):
if not num: # pragma: no cover
raise Exception(f"{len(num)}/{shift}")
offset = 0
for offset in range(len(num)):
if num[offset] != 0:
break
self.num = num[offset:] + [0] * shift
def __getitem__(self, index):
return self.num[index]
def __iter__(self):
return iter(self.num)
def __len__(self):
return len(self.num)
def __mul__(self, other):
num = [0] * (len(self) + len(other) - 1)
for i, item in enumerate(self):
for j, other_item in enumerate(other):
num[i + j] ^= gexp(glog(item) + glog(other_item))
return Polynomial(num, 0)
def __mod__(self, other):
difference = len(self) - len(other)
if difference < 0:
return self
ratio = glog(self[0]) - glog(other[0])
num = [
item ^ gexp(glog(other_item) + ratio)
for item, other_item in zip(self, other)
]
if difference:
num.extend(self[-difference:])
# recursive call
return Polynomial(num, 0) % other
class RSBlock(NamedTuple):
total_count: int
data_count: int
def rs_blocks(version, error_correction):
if error_correction not in RS_BLOCK_OFFSET: # pragma: no cover
raise Exception(
"bad rs block @ version: %s / error_correction: %s"
% (version, error_correction)
)
offset = RS_BLOCK_OFFSET[error_correction]
rs_block = RS_BLOCK_TABLE[(version - 1) * 4 + offset]
blocks = []
for i in range(0, len(rs_block), 3):
count, total_count, data_count = rs_block[i : i + 3]
for _ in range(count):
blocks.append(RSBlock(total_count, data_count))
return blocks
| 7,288 | Python | 22.213376 | 81 | 0.432766 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/console_scripts.py | #!/usr/bin/env python
"""
qr - Convert stdin (or the first argument) to a QR Code.
When stdout is a tty the QR Code is printed to the terminal and when stdout is
a pipe to a file an image is written. The default image format is PNG.
"""
import optparse
import os
import sys
from typing import Dict, Iterable, NoReturn, Optional, Set, Type
import qrcode
from qrcode.image.base import BaseImage, DrawerAliases
# The next block is added to get the terminal to display properly on MS platforms
if sys.platform.startswith(("win", "cygwin")): # pragma: no cover
import colorama # type: ignore
colorama.init()
default_factories = {
"pil": "qrcode.image.pil.PilImage",
"png": "qrcode.image.pure.PyPNGImage",
"svg": "qrcode.image.svg.SvgImage",
"svg-fragment": "qrcode.image.svg.SvgFragmentImage",
"svg-path": "qrcode.image.svg.SvgPathImage",
# Keeping for backwards compatibility:
"pymaging": "qrcode.image.pure.PymagingImage",
}
error_correction = {
"L": qrcode.ERROR_CORRECT_L,
"M": qrcode.ERROR_CORRECT_M,
"Q": qrcode.ERROR_CORRECT_Q,
"H": qrcode.ERROR_CORRECT_H,
}
def main(args=None):
if args is None:
args = sys.argv[1:]
from pkg_resources import get_distribution
version = get_distribution("qrcode").version
parser = optparse.OptionParser(usage=(__doc__ or "").strip(), version=version)
# Wrap parser.error in a typed NoReturn method for better typing.
def raise_error(msg: str) -> NoReturn:
parser.error(msg)
raise # pragma: no cover
parser.add_option(
"--factory",
help="Full python path to the image factory class to "
"create the image with. You can use the following shortcuts to the "
f"built-in image factory classes: {commas(default_factories)}.",
)
parser.add_option(
"--factory-drawer",
help=f"Use an alternate drawer. {get_drawer_help()}.",
)
parser.add_option(
"--optimize",
type=int,
help="Optimize the data by looking for chunks "
"of at least this many characters that could use a more efficient "
"encoding method. Use 0 to turn off chunk optimization.",
)
parser.add_option(
"--error-correction",
type="choice",
choices=sorted(error_correction.keys()),
default="M",
help="The error correction level to use. Choices are L (7%), "
"M (15%, default), Q (25%), and H (30%).",
)
parser.add_option(
"--ascii", help="Print as ascii even if stdout is piped.", action="store_true"
)
parser.add_option(
"--output",
help="The output file. If not specified, the image is sent to "
"the standard output.",
)
opts, args = parser.parse_args(args)
if opts.factory:
module = default_factories.get(opts.factory, opts.factory)
try:
image_factory = get_factory(module)
except ValueError as e:
raise_error(str(e))
else:
image_factory = None
qr = qrcode.QRCode(
error_correction=error_correction[opts.error_correction],
image_factory=image_factory,
)
if args:
data = args[0]
data = data.encode(errors="surrogateescape")
else:
data = sys.stdin.buffer.read()
if opts.optimize is None:
qr.add_data(data)
else:
qr.add_data(data, optimize=opts.optimize)
if opts.output:
img = qr.make_image()
with open(opts.output, "wb") as out:
img.save(out)
else:
if image_factory is None and (os.isatty(sys.stdout.fileno()) or opts.ascii):
qr.print_ascii(tty=not opts.ascii)
return
kwargs = {}
aliases: Optional[DrawerAliases] = getattr(
qr.image_factory, "drawer_aliases", None
)
if opts.factory_drawer:
if not aliases:
raise_error("The selected factory has no drawer aliases.")
if opts.factory_drawer not in aliases:
raise_error(
f"{opts.factory_drawer} factory drawer not found."
f" Expected {commas(aliases)}"
)
drawer_cls, drawer_kwargs = aliases[opts.factory_drawer]
kwargs["module_drawer"] = drawer_cls(**drawer_kwargs)
img = qr.make_image(**kwargs)
sys.stdout.flush()
img.save(sys.stdout.buffer)
def get_factory(module: str) -> Type[BaseImage]:
if "." not in module:
raise ValueError("The image factory is not a full python path")
module, name = module.rsplit(".", 1)
imp = __import__(module, {}, {}, [name])
return getattr(imp, name)
def get_drawer_help() -> str:
help: Dict[str, Set] = {}
for alias, module in default_factories.items():
try:
image = get_factory(module)
except ImportError: # pragma: no cover
continue
aliases: Optional[DrawerAliases] = getattr(image, "drawer_aliases", None)
if not aliases:
continue
factories = help.setdefault(commas(aliases), set())
factories.add(alias)
return ". ".join(
f"For {commas(factories, 'and')}, use: {aliases}"
for aliases, factories in help.items()
)
def commas(items: Iterable[str], joiner="or") -> str:
items = tuple(items)
if not items:
return ""
if len(items) == 1:
return items[0]
return f"{', '.join(items[:-1])} {joiner} {items[-1]}"
if __name__ == "__main__": # pragma: no cover
main()
| 5,571 | Python | 29.955555 | 86 | 0.602046 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/constants.py | # QR error correct levels
ERROR_CORRECT_L = 1
ERROR_CORRECT_M = 0
ERROR_CORRECT_Q = 3
ERROR_CORRECT_H = 2
| 106 | Python | 16.833331 | 25 | 0.716981 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/__init__.py | from qrcode.main import QRCode
from qrcode.main import make # noqa
from qrcode.constants import ( # noqa
ERROR_CORRECT_L,
ERROR_CORRECT_M,
ERROR_CORRECT_Q,
ERROR_CORRECT_H,
)
from qrcode import image # noqa
def run_example(data="http://www.lincolnloop.com", *args, **kwargs):
"""
Build an example QR Code and display it.
There's an even easier way than the code here though: just use the ``make``
shortcut.
"""
qr = QRCode(*args, **kwargs)
qr.add_data(data)
im = qr.make_image()
im.show()
if __name__ == "__main__": # pragma: no cover
import sys
run_example(*sys.argv[1:])
| 645 | Python | 19.838709 | 79 | 0.626357 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/LUT.py | # Store all kinds of lookup table.
# # generate rsPoly lookup table.
# from qrcode import base
# def create_bytes(rs_blocks):
# for r in range(len(rs_blocks)):
# dcCount = rs_blocks[r].data_count
# ecCount = rs_blocks[r].total_count - dcCount
# rsPoly = base.Polynomial([1], 0)
# for i in range(ecCount):
# rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0)
# return ecCount, rsPoly
# rsPoly_LUT = {}
# for version in range(1,41):
# for error_correction in range(4):
# rs_blocks_list = base.rs_blocks(version, error_correction)
# ecCount, rsPoly = create_bytes(rs_blocks_list)
# rsPoly_LUT[ecCount]=rsPoly.num
# print(rsPoly_LUT)
# Result. Usage: input: ecCount, output: Polynomial.num
# e.g. rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0)
rsPoly_LUT = {
7: [1, 127, 122, 154, 164, 11, 68, 117],
10: [1, 216, 194, 159, 111, 199, 94, 95, 113, 157, 193],
13: [1, 137, 73, 227, 17, 177, 17, 52, 13, 46, 43, 83, 132, 120],
15: [1, 29, 196, 111, 163, 112, 74, 10, 105, 105, 139, 132, 151, 32, 134, 26],
16: [1, 59, 13, 104, 189, 68, 209, 30, 8, 163, 65, 41, 229, 98, 50, 36, 59],
17: [1, 119, 66, 83, 120, 119, 22, 197, 83, 249, 41, 143, 134, 85, 53, 125, 99, 79],
18: [
1,
239,
251,
183,
113,
149,
175,
199,
215,
240,
220,
73,
82,
173,
75,
32,
67,
217,
146,
],
20: [
1,
152,
185,
240,
5,
111,
99,
6,
220,
112,
150,
69,
36,
187,
22,
228,
198,
121,
121,
165,
174,
],
22: [
1,
89,
179,
131,
176,
182,
244,
19,
189,
69,
40,
28,
137,
29,
123,
67,
253,
86,
218,
230,
26,
145,
245,
],
24: [
1,
122,
118,
169,
70,
178,
237,
216,
102,
115,
150,
229,
73,
130,
72,
61,
43,
206,
1,
237,
247,
127,
217,
144,
117,
],
26: [
1,
246,
51,
183,
4,
136,
98,
199,
152,
77,
56,
206,
24,
145,
40,
209,
117,
233,
42,
135,
68,
70,
144,
146,
77,
43,
94,
],
28: [
1,
252,
9,
28,
13,
18,
251,
208,
150,
103,
174,
100,
41,
167,
12,
247,
56,
117,
119,
233,
127,
181,
100,
121,
147,
176,
74,
58,
197,
],
30: [
1,
212,
246,
77,
73,
195,
192,
75,
98,
5,
70,
103,
177,
22,
217,
138,
51,
181,
246,
72,
25,
18,
46,
228,
74,
216,
195,
11,
106,
130,
150,
],
}
| 3,599 | Python | 15.071429 | 88 | 0.335649 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/release.py | """
This file provides zest.releaser entrypoints using when releasing new
qrcode versions.
"""
import os
import re
import datetime
def update_manpage(data):
"""
Update the version in the manpage document.
"""
if data["name"] != "qrcode":
return
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filename = os.path.join(base_dir, "doc", "qr.1")
with open(filename) as f:
lines = f.readlines()
changed = False
for i, line in enumerate(lines):
if not line.startswith(".TH "):
continue
parts = re.split(r'"([^"]*)"', line)
if len(parts) < 5:
continue
changed = parts[3] != data["new_version"]
if changed:
# Update version
parts[3] = data["new_version"]
# Update date
parts[1] = datetime.datetime.now().strftime("%-d %b %Y")
lines[i] = '"'.join(parts)
break
if changed:
with open(filename, "w") as f:
for line in lines:
f.write(line)
| 1,079 | Python | 24.714285 | 74 | 0.543095 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/base.py | import abc
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, Union
from qrcode.image.styles.moduledrawers.base import QRModuleDrawer
if TYPE_CHECKING:
from qrcode.main import ActiveWithNeighbors, QRCode
DrawerAliases = Dict[str, Tuple[Type[QRModuleDrawer], Dict[str, Any]]]
class BaseImage:
"""
Base QRCode image output class.
"""
kind: Optional[str] = None
allowed_kinds: Optional[Tuple[str]] = None
needs_context = False
needs_processing = False
needs_drawrect = True
def __init__(self, border, width, box_size, *args, **kwargs):
self.border = border
self.width = width
self.box_size = box_size
self.pixel_size = (self.width + self.border * 2) * self.box_size
self.modules = kwargs.pop("qrcode_modules")
self._img = self.new_image(**kwargs)
self.init_new_image()
@abc.abstractmethod
def drawrect(self, row, col):
"""
Draw a single rectangle of the QR code.
"""
def drawrect_context(self, row: int, col: int, qr: "QRCode"):
"""
Draw a single rectangle of the QR code given the surrounding context
"""
raise NotImplementedError("BaseImage.drawrect_context") # pragma: no cover
def process(self):
"""
Processes QR code after completion
"""
raise NotImplementedError("BaseImage.drawimage") # pragma: no cover
@abc.abstractmethod
def save(self, stream, kind=None):
"""
Save the image file.
"""
def pixel_box(self, row, col):
"""
A helper method for pixel-based image generators that specifies the
four pixel coordinates for a single rect.
"""
x = (col + self.border) * self.box_size
y = (row + self.border) * self.box_size
return (
(x, y),
(x + self.box_size - 1, y + self.box_size - 1),
)
@abc.abstractmethod
def new_image(self, **kwargs) -> Any:
"""
Build the image class. Subclasses should return the class created.
"""
def init_new_image(self):
pass
def get_image(self, **kwargs):
"""
Return the image class for further processing.
"""
return self._img
def check_kind(self, kind, transform=None):
"""
Get the image type.
"""
if kind is None:
kind = self.kind
allowed = not self.allowed_kinds or kind in self.allowed_kinds
if transform:
kind = transform(kind)
if not allowed:
allowed = kind in self.allowed_kinds
if not allowed:
raise ValueError(f"Cannot set {type(self).__name__} type to {kind}")
return kind
def is_eye(self, row: int, col: int):
"""
Find whether the referenced module is in an eye.
"""
return (
(row < 7 and col < 7)
or (row < 7 and self.width - col < 8)
or (self.width - row < 8 and col < 7)
)
class BaseImageWithDrawer(BaseImage):
default_drawer_class: Type[QRModuleDrawer]
drawer_aliases: DrawerAliases = {}
def get_default_module_drawer(self) -> QRModuleDrawer:
return self.default_drawer_class()
def get_default_eye_drawer(self) -> QRModuleDrawer:
return self.default_drawer_class()
needs_context = True
module_drawer: "QRModuleDrawer"
eye_drawer: "QRModuleDrawer"
def __init__(
self,
*args,
module_drawer: Union[QRModuleDrawer, str, None] = None,
eye_drawer: Union[QRModuleDrawer, str, None] = None,
**kwargs,
):
self.module_drawer = (
self.get_drawer(module_drawer) or self.get_default_module_drawer()
)
# The eye drawer can be overridden by another module drawer as well,
# but you have to be more careful with these in order to make the QR
# code still parseable
self.eye_drawer = self.get_drawer(eye_drawer) or self.get_default_eye_drawer()
super().__init__(*args, **kwargs)
def get_drawer(
self, drawer: Union[QRModuleDrawer, str, None]
) -> Optional[QRModuleDrawer]:
if not isinstance(drawer, str):
return drawer
drawer_cls, kwargs = self.drawer_aliases[drawer]
return drawer_cls(**kwargs)
def init_new_image(self):
self.module_drawer.initialize(img=self)
self.eye_drawer.initialize(img=self)
return super().init_new_image()
def drawrect_context(self, row: int, col: int, qr: "QRCode"):
box = self.pixel_box(row, col)
drawer = self.eye_drawer if self.is_eye(row, col) else self.module_drawer
is_active: Union[bool, ActiveWithNeighbors] = (
qr.active_with_neighbors(row, col)
if drawer.needs_neighbors
else bool(qr.modules[row][col])
)
drawer.drawrect(box, is_active)
| 4,984 | Python | 29.212121 | 86 | 0.589687 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styledpil.py | # Needed on case-insensitive filesystems
from __future__ import absolute_import
import qrcode.image.base
from qrcode.compat.pil import Image
from qrcode.image.styles.colormasks import QRColorMask, SolidFillColorMask
from qrcode.image.styles.moduledrawers import SquareModuleDrawer
class StyledPilImage(qrcode.image.base.BaseImageWithDrawer):
"""
Styled PIL image builder, default format is PNG.
This differs from the PilImage in that there is a module_drawer, a
color_mask, and an optional image
The module_drawer should extend the QRModuleDrawer class and implement the
drawrect_context(self, box, active, context), and probably also the
initialize function. This will draw an individual "module" or square on
the QR code.
The color_mask will extend the QRColorMask class and will at very least
implement the get_fg_pixel(image, x, y) function, calculating a color to
put on the image at the pixel location (x,y) (more advanced functionality
can be gotten by instead overriding other functions defined in the
QRColorMask class)
The Image can be specified either by path or with a Pillow Image, and if it
is there will be placed in the middle of the QR code. No effort is done to
ensure that the QR code is still legible after the image has been placed
there; Q or H level error correction levels are recommended to maintain
data integrity A resampling filter can be specified (defaulting to
PIL.Image.Resampling.LANCZOS) for resizing; see PIL.Image.resize() for possible
options for this parameter.
"""
kind = "PNG"
needs_processing = True
color_mask: QRColorMask
default_drawer_class = SquareModuleDrawer
def __init__(self, *args, **kwargs):
self.color_mask = kwargs.get("color_mask", SolidFillColorMask())
embeded_image_path = kwargs.get("embeded_image_path", None)
self.embeded_image = kwargs.get("embeded_image", None)
self.embeded_image_resample = kwargs.get(
"embeded_image_resample", Image.Resampling.LANCZOS
)
if not self.embeded_image and embeded_image_path:
self.embeded_image = Image.open(embeded_image_path)
# the paint_color is the color the module drawer will use to draw upon
# a canvas During the color mask process, pixels that are paint_color
# are replaced by a newly-calculated color
self.paint_color = tuple(0 for i in self.color_mask.back_color)
if self.color_mask.has_transparency:
self.paint_color = tuple([*self.color_mask.back_color[:3], 255])
super().__init__(*args, **kwargs)
def new_image(self, **kwargs):
mode = (
"RGBA"
if (
self.color_mask.has_transparency
or (self.embeded_image and "A" in self.embeded_image.getbands())
)
else "RGB"
)
# This is the background color. Should be white or whiteish
back_color = self.color_mask.back_color
return Image.new(mode, (self.pixel_size, self.pixel_size), back_color)
def init_new_image(self):
self.color_mask.initialize(self, self._img)
super().init_new_image()
def process(self):
self.color_mask.apply_mask(self._img)
if self.embeded_image:
self.draw_embeded_image()
def draw_embeded_image(self):
if not self.embeded_image:
return
total_width, _ = self._img.size
total_width = int(total_width)
logo_width_ish = int(total_width / 4)
logo_offset = (
int((int(total_width / 2) - int(logo_width_ish / 2)) / self.box_size)
* self.box_size
) # round the offset to the nearest module
logo_position = (logo_offset, logo_offset)
logo_width = total_width - logo_offset * 2
region = self.embeded_image
region = region.resize((logo_width, logo_width), self.embeded_image_resample)
if "A" in region.getbands():
self._img.alpha_composite(region, logo_position)
else:
self._img.paste(region, logo_position)
def save(self, stream, format=None, **kwargs):
if format is None:
format = kwargs.get("kind", self.kind)
if "kind" in kwargs:
del kwargs["kind"]
self._img.save(stream, format=format, **kwargs)
def __getattr__(self, name):
return getattr(self._img, name)
| 4,477 | Python | 38.628318 | 85 | 0.653116 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/pil.py | import qrcode.image.base
from qrcode.compat.pil import Image, ImageDraw
class PilImage(qrcode.image.base.BaseImage):
"""
PIL image builder, default format is PNG.
"""
kind = "PNG"
def new_image(self, **kwargs):
back_color = kwargs.get("back_color", "white")
fill_color = kwargs.get("fill_color", "black")
try:
fill_color = fill_color.lower()
except AttributeError:
pass
try:
back_color = back_color.lower()
except AttributeError:
pass
# L mode (1 mode) color = (r*299 + g*587 + b*114)//1000
if fill_color == "black" and back_color == "white":
mode = "1"
fill_color = 0
if back_color == "white":
back_color = 255
elif back_color == "transparent":
mode = "RGBA"
back_color = None
else:
mode = "RGB"
img = Image.new(mode, (self.pixel_size, self.pixel_size), back_color)
self.fill_color = fill_color
self._idr = ImageDraw.Draw(img)
return img
def drawrect(self, row, col):
box = self.pixel_box(row, col)
self._idr.rectangle(box, fill=self.fill_color)
def save(self, stream, format=None, **kwargs):
kind = kwargs.pop("kind", self.kind)
if format is None:
format = kind
self._img.save(stream, format=format, **kwargs)
def __getattr__(self, name):
return getattr(self._img, name)
| 1,524 | Python | 26.727272 | 77 | 0.541995 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/svg.py | import decimal
from decimal import Decimal
from typing import List, Optional, Type, Union, overload
from typing_extensions import Literal
import qrcode.image.base
from qrcode.compat.etree import ET
from qrcode.image.styles.moduledrawers import svg as svg_drawers
from qrcode.image.styles.moduledrawers.base import QRModuleDrawer
class SvgFragmentImage(qrcode.image.base.BaseImageWithDrawer):
"""
SVG image builder
Creates a QR-code image as a SVG document fragment.
"""
_SVG_namespace = "http://www.w3.org/2000/svg"
kind = "SVG"
allowed_kinds = ("SVG",)
default_drawer_class: Type[QRModuleDrawer] = svg_drawers.SvgSquareDrawer
def __init__(self, *args, **kwargs):
ET.register_namespace("svg", self._SVG_namespace)
super().__init__(*args, **kwargs)
# Save the unit size, for example the default box_size of 10 is '1mm'.
self.unit_size = self.units(self.box_size)
@overload
def units(self, pixels: Union[int, Decimal], text: Literal[False]) -> Decimal:
...
@overload
def units(self, pixels: Union[int, Decimal], text: Literal[True] = True) -> str:
...
def units(self, pixels, text=True):
"""
A box_size of 10 (default) equals 1mm.
"""
units = Decimal(pixels) / 10
if not text:
return units
units = units.quantize(Decimal("0.001"))
context = decimal.Context(traps=[decimal.Inexact])
try:
for d in (Decimal("0.01"), Decimal("0.1"), Decimal("0")):
units = units.quantize(d, context=context)
except decimal.Inexact:
pass
return f"{units}mm"
def save(self, stream, kind=None):
self.check_kind(kind=kind)
self._write(stream)
def to_string(self, **kwargs):
return ET.tostring(self._img, **kwargs)
def new_image(self, **kwargs):
return self._svg(**kwargs)
def _svg(self, tag=None, version="1.1", **kwargs):
if tag is None:
tag = ET.QName(self._SVG_namespace, "svg")
dimension = self.units(self.pixel_size)
return ET.Element(
tag, # type: ignore
width=dimension,
height=dimension,
version=version,
**kwargs,
)
def _write(self, stream):
ET.ElementTree(self._img).write(stream, xml_declaration=False)
class SvgImage(SvgFragmentImage):
"""
Standalone SVG image builder
Creates a QR-code image as a standalone SVG document.
"""
background: Optional[str] = None
drawer_aliases: qrcode.image.base.DrawerAliases = {
"circle": (svg_drawers.SvgCircleDrawer, {}),
"gapped-circle": (svg_drawers.SvgCircleDrawer, {"size_ratio": Decimal(0.8)}),
"gapped-square": (svg_drawers.SvgSquareDrawer, {"size_ratio": Decimal(0.8)}),
}
def _svg(self, tag="svg", **kwargs):
svg = super()._svg(tag=tag, **kwargs)
svg.set("xmlns", self._SVG_namespace)
if self.background:
svg.append(
ET.Element(
"rect",
fill=self.background,
x="0",
y="0",
width="100%",
height="100%",
)
)
return svg
def _write(self, stream):
ET.ElementTree(self._img).write(stream, encoding="UTF-8", xml_declaration=True)
class SvgPathImage(SvgImage):
"""
SVG image builder with one single <path> element (removes white spaces
between individual QR points).
"""
QR_PATH_STYLE = {
"fill": "#000000",
"fill-opacity": "1",
"fill-rule": "nonzero",
"stroke": "none",
}
needs_processing = True
path: Optional[ET.Element] = None
default_drawer_class: Type[QRModuleDrawer] = svg_drawers.SvgPathSquareDrawer
drawer_aliases = {
"circle": (svg_drawers.SvgPathCircleDrawer, {}),
"gapped-circle": (
svg_drawers.SvgPathCircleDrawer,
{"size_ratio": Decimal(0.8)},
),
"gapped-square": (
svg_drawers.SvgPathSquareDrawer,
{"size_ratio": Decimal(0.8)},
),
}
def __init__(self, *args, **kwargs):
self._subpaths: List[str] = []
super().__init__(*args, **kwargs)
def _svg(self, viewBox=None, **kwargs):
if viewBox is None:
dimension = self.units(self.pixel_size, text=False)
viewBox = "0 0 {d} {d}".format(d=dimension)
return super()._svg(viewBox=viewBox, **kwargs)
def process(self):
# Store the path just in case someone wants to use it again or in some
# unique way.
self.path = ET.Element(
ET.QName("path"), # type: ignore
d="".join(self._subpaths),
id="qr-path",
**self.QR_PATH_STYLE,
)
self._subpaths = []
self._img.append(self.path)
class SvgFillImage(SvgImage):
"""
An SvgImage that fills the background to white.
"""
background = "white"
class SvgPathFillImage(SvgPathImage):
"""
An SvgPathImage that fills the background to white.
"""
background = "white"
| 5,246 | Python | 28.15 | 87 | 0.572055 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/pure.py | from itertools import chain
import png
import qrcode.image.base
class PyPNGImage(qrcode.image.base.BaseImage):
"""
pyPNG image builder.
"""
kind = "PNG"
allowed_kinds = ("PNG",)
needs_drawrect = False
def new_image(self, **kwargs):
return png.Writer(self.pixel_size, self.pixel_size, greyscale=True, bitdepth=1)
def drawrect(self, row, col):
"""
Not used.
"""
def save(self, stream, kind=None):
if isinstance(stream, str):
stream = open(stream, "wb")
self._img.write(stream, self.rows_iter())
def rows_iter(self):
yield from self.border_rows_iter()
border_col = [1] * (self.box_size * self.border)
for module_row in self.modules:
row = (
border_col
+ list(
chain.from_iterable(
([not point] * self.box_size) for point in module_row
)
)
+ border_col
)
for _ in range(self.box_size):
yield row
yield from self.border_rows_iter()
def border_rows_iter(self):
border_row = [1] * (self.box_size * (self.width + self.border * 2))
for _ in range(self.border * self.box_size):
yield border_row
# Keeping this for backwards compatibility.
PymagingImage = PyPNGImage
| 1,412 | Python | 24.690909 | 87 | 0.535411 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styles/colormasks.py | # Needed on case-insensitive filesystems
from __future__ import absolute_import
import math
from qrcode.compat.pil import Image
class QRColorMask:
"""
QRColorMask is used to color in the QRCode.
By the time apply_mask is called, the QRModuleDrawer of the StyledPilImage
will have drawn all of the modules on the canvas (the color of these
modules will be mostly black, although antialiasing may result in
gradients) In the base class, apply_mask is implemented such that the
background color will remain, but the foreground pixels will be replaced by
a color determined by a call to get_fg_pixel. There is additional
calculation done to preserve the gradient artifacts of antialiasing.
All QRColorMask objects should be careful about RGB vs RGBA color spaces.
For examples of what these look like, see doc/color_masks.png
"""
back_color = (255, 255, 255)
has_transparency = False
paint_color = back_color
def initialize(self, styledPilImage, image):
self.paint_color = styledPilImage.paint_color
def apply_mask(self, image):
width, height = image.size
for x in range(width):
for y in range(height):
norm = self.extrap_color(
self.back_color, self.paint_color, image.getpixel((x, y))
)
if norm is not None:
image.putpixel(
(x, y),
self.interp_color(
self.get_bg_pixel(image, x, y),
self.get_fg_pixel(image, x, y),
norm,
),
)
else:
image.putpixel((x, y), self.get_bg_pixel(image, x, y))
def get_fg_pixel(self, image, x, y):
raise NotImplementedError("QRModuleDrawer.paint_fg_pixel")
def get_bg_pixel(self, image, x, y):
return self.back_color
# The following functions are helpful for color calculation:
# interpolate a number between two numbers
def interp_num(self, n1, n2, norm):
return int(n2 * norm + n1 * (1 - norm))
# interpolate a color between two colorrs
def interp_color(self, col1, col2, norm):
return tuple(self.interp_num(col1[i], col2[i], norm) for i in range(len(col1)))
# find the interpolation coefficient between two numbers
def extrap_num(self, n1, n2, interped_num):
if n2 == n1:
return None
else:
return (interped_num - n1) / (n2 - n1)
# find the interpolation coefficient between two numbers
def extrap_color(self, col1, col2, interped_color):
normed = []
for c1, c2, ci in zip(col1, col2, interped_color):
extrap = self.extrap_num(c1, c2, ci)
if extrap is not None:
normed.append(extrap)
if not normed:
return None
return sum(normed) / len(normed)
class SolidFillColorMask(QRColorMask):
"""
Just fills in the background with one color and the foreground with another
"""
def __init__(self, back_color=(255, 255, 255), front_color=(0, 0, 0)):
self.back_color = back_color
self.front_color = front_color
self.has_transparency = len(self.back_color) == 4
def apply_mask(self, image):
if self.back_color == (255, 255, 255) and self.front_color == (0, 0, 0):
# Optimization: the image is already drawn by QRModuleDrawer in
# black and white, so if these are also our mask colors we don't
# need to do anything. This is much faster than actually applying a
# mask.
pass
else:
# TODO there's probably a way to use PIL.ImageMath instead of doing
# the individual pixel comparisons that the base class uses, which
# would be a lot faster. (In fact doing this would probably remove
# the need for the B&W optimization above.)
QRColorMask.apply_mask(self, image)
def get_fg_pixel(self, image, x, y):
return self.front_color
class RadialGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a radial gradient from the center to the edge
"""
def __init__(
self, back_color=(255, 255, 255), center_color=(0, 0, 0), edge_color=(0, 0, 255)
):
self.back_color = back_color
self.center_color = center_color
self.edge_color = edge_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
normedDistanceToCenter = math.sqrt(
(x - width / 2) ** 2 + (y - width / 2) ** 2
) / (math.sqrt(2) * width / 2)
return self.interp_color(
self.center_color, self.edge_color, normedDistanceToCenter
)
class SquareGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a square gradient from the center to the edge
"""
def __init__(
self, back_color=(255, 255, 255), center_color=(0, 0, 0), edge_color=(0, 0, 255)
):
self.back_color = back_color
self.center_color = center_color
self.edge_color = edge_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
normedDistanceToCenter = max(abs(x - width / 2), abs(y - width / 2)) / (
width / 2
)
return self.interp_color(
self.center_color, self.edge_color, normedDistanceToCenter
)
class HorizontalGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a gradient sweeping from the left to the right
"""
def __init__(
self, back_color=(255, 255, 255), left_color=(0, 0, 0), right_color=(0, 0, 255)
):
self.back_color = back_color
self.left_color = left_color
self.right_color = right_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.interp_color(self.left_color, self.right_color, x / width)
class VerticalGradiantColorMask(QRColorMask):
"""
Fills in the forefround with a gradient sweeping from the top to the bottom
"""
def __init__(
self, back_color=(255, 255, 255), top_color=(0, 0, 0), bottom_color=(0, 0, 255)
):
self.back_color = back_color
self.top_color = top_color
self.bottom_color = bottom_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.interp_color(self.top_color, self.bottom_color, y / width)
class ImageColorMask(QRColorMask):
"""
Fills in the foreground with pixels from another image, either passed by
path or passed by image object.
"""
def __init__(
self, back_color=(255, 255, 255), color_mask_path=None, color_mask_image=None
):
self.back_color = back_color
if color_mask_image:
self.color_img = color_mask_image
else:
self.color_img = Image.open(color_mask_path)
self.has_transparency = len(self.back_color) == 4
def initialize(self, styledPilImage, image):
self.paint_color = styledPilImage.paint_color
self.color_img = self.color_img.resize(image.size)
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.color_img.getpixel((x, y))
| 7,601 | Python | 33.39819 | 88 | 0.600447 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styles/moduledrawers/base.py | from __future__ import absolute_import
import abc
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from qrcode.image.base import BaseImage
class QRModuleDrawer(abc.ABC):
"""
QRModuleDrawer exists to draw the modules of the QR Code onto images.
For this, technically all that is necessary is a ``drawrect(self, box,
is_active)`` function which takes in the box in which it is to draw,
whether or not the box is "active" (a module exists there). If
``needs_neighbors`` is set to True, then the method should also accept a
``neighbors`` kwarg (the neighboring pixels).
It is frequently necessary to also implement an "initialize" function to
set up values that only the containing Image class knows about.
For examples of what these look like, see doc/module_drawers.png
"""
needs_neighbors = False
def __init__(self, **kwargs):
pass
def initialize(self, img: "BaseImage") -> None:
self.img = img
@abc.abstractmethod
def drawrect(self, box, is_active) -> None:
...
| 1,067 | Python | 27.864864 | 76 | 0.685098 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styles/moduledrawers/__init__.py | # For backwards compatibility, importing the PIL drawers here.
try:
from .pil import CircleModuleDrawer # noqa: F401
from .pil import GappedSquareModuleDrawer # noqa: F401
from .pil import HorizontalBarsDrawer # noqa: F401
from .pil import RoundedModuleDrawer # noqa: F401
from .pil import SquareModuleDrawer # noqa: F401
from .pil import VerticalBarsDrawer # noqa: F401
except ImportError:
pass
| 430 | Python | 38.181815 | 62 | 0.739535 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styles/moduledrawers/pil.py | # Needed on case-insensitive filesystems
from __future__ import absolute_import
from typing import TYPE_CHECKING, List
from qrcode.compat.pil import Image, ImageDraw
from qrcode.image.styles.moduledrawers.base import QRModuleDrawer
if TYPE_CHECKING:
from qrcode.image.styledpil import StyledPilImage
from qrcode.main import ActiveWithNeighbors
# When drawing antialiased things, make them bigger and then shrink them down
# to size after the geometry has been drawn.
ANTIALIASING_FACTOR = 4
class StyledPilQRModuleDrawer(QRModuleDrawer):
"""
A base class for StyledPilImage module drawers.
NOTE: the color that this draws in should be whatever is equivalent to
black in the color space, and the specified QRColorMask will handle adding
colors as necessary to the image
"""
img: "StyledPilImage"
class SquareModuleDrawer(StyledPilQRModuleDrawer):
"""
Draws the modules as simple squares
"""
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.imgDraw = ImageDraw.Draw(self.img._img)
def drawrect(self, box, is_active: bool):
if is_active:
self.imgDraw.rectangle(box, fill=self.img.paint_color)
class GappedSquareModuleDrawer(StyledPilQRModuleDrawer):
"""
Draws the modules as simple squares that are not contiguous.
The size_ratio determines how wide the squares are relative to the width of
the space they are printed in
"""
def __init__(self, size_ratio=0.8):
self.size_ratio = size_ratio
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.imgDraw = ImageDraw.Draw(self.img._img)
self.delta = (1 - self.size_ratio) * self.img.box_size / 2
def drawrect(self, box, is_active: bool):
if is_active:
smaller_box = (
box[0][0] + self.delta,
box[0][1] + self.delta,
box[1][0] - self.delta,
box[1][1] - self.delta,
)
self.imgDraw.rectangle(smaller_box, fill=self.img.paint_color)
class CircleModuleDrawer(StyledPilQRModuleDrawer):
"""
Draws the modules as circles
"""
circle = None
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
box_size = self.img.box_size
fake_size = box_size * ANTIALIASING_FACTOR
self.circle = Image.new(
self.img.mode,
(fake_size, fake_size),
self.img.color_mask.back_color,
)
ImageDraw.Draw(self.circle).ellipse(
(0, 0, fake_size, fake_size), fill=self.img.paint_color
)
self.circle = self.circle.resize((box_size, box_size), Image.Resampling.LANCZOS)
def drawrect(self, box, is_active: bool):
if is_active:
self.img._img.paste(self.circle, (box[0][0], box[0][1]))
class RoundedModuleDrawer(StyledPilQRModuleDrawer):
"""
Draws the modules with all 90 degree corners replaced with rounded edges.
radius_ratio determines the radius of the rounded edges - a value of 1
means that an isolated module will be drawn as a circle, while a value of 0
means that the radius of the rounded edge will be 0 (and thus back to 90
degrees again).
"""
needs_neighbors = True
def __init__(self, radius_ratio=1):
self.radius_ratio = radius_ratio
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.corner_width = int(self.img.box_size / 2)
self.setup_corners()
def setup_corners(self):
mode = self.img.mode
back_color = self.img.color_mask.back_color
front_color = self.img.paint_color
self.SQUARE = Image.new(
mode, (self.corner_width, self.corner_width), front_color
)
fake_width = self.corner_width * ANTIALIASING_FACTOR
radius = self.radius_ratio * fake_width
diameter = radius * 2
base = Image.new(
mode, (fake_width, fake_width), back_color
) # make something 4x bigger for antialiasing
base_draw = ImageDraw.Draw(base)
base_draw.ellipse((0, 0, diameter, diameter), fill=front_color)
base_draw.rectangle((radius, 0, fake_width, fake_width), fill=front_color)
base_draw.rectangle((0, radius, fake_width, fake_width), fill=front_color)
self.NW_ROUND = base.resize(
(self.corner_width, self.corner_width), Image.Resampling.LANCZOS
)
self.SW_ROUND = self.NW_ROUND.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
self.SE_ROUND = self.NW_ROUND.transpose(Image.Transpose.ROTATE_180)
self.NE_ROUND = self.NW_ROUND.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
def drawrect(self, box: List[List[int]], is_active: "ActiveWithNeighbors"):
if not is_active:
return
# find rounded edges
nw_rounded = not is_active.W and not is_active.N
ne_rounded = not is_active.N and not is_active.E
se_rounded = not is_active.E and not is_active.S
sw_rounded = not is_active.S and not is_active.W
nw = self.NW_ROUND if nw_rounded else self.SQUARE
ne = self.NE_ROUND if ne_rounded else self.SQUARE
se = self.SE_ROUND if se_rounded else self.SQUARE
sw = self.SW_ROUND if sw_rounded else self.SQUARE
self.img._img.paste(nw, (box[0][0], box[0][1]))
self.img._img.paste(ne, (box[0][0] + self.corner_width, box[0][1]))
self.img._img.paste(
se, (box[0][0] + self.corner_width, box[0][1] + self.corner_width)
)
self.img._img.paste(sw, (box[0][0], box[0][1] + self.corner_width))
class VerticalBarsDrawer(StyledPilQRModuleDrawer):
"""
Draws vertically contiguous groups of modules as long rounded rectangles,
with gaps between neighboring bands (the size of these gaps is inversely
proportional to the horizontal_shrink).
"""
needs_neighbors = True
def __init__(self, horizontal_shrink=0.8):
self.horizontal_shrink = horizontal_shrink
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.half_height = int(self.img.box_size / 2)
self.delta = int((1 - self.horizontal_shrink) * self.half_height)
self.setup_edges()
def setup_edges(self):
mode = self.img.mode
back_color = self.img.color_mask.back_color
front_color = self.img.paint_color
height = self.half_height
width = height * 2
shrunken_width = int(width * self.horizontal_shrink)
self.SQUARE = Image.new(mode, (shrunken_width, height), front_color)
fake_width = width * ANTIALIASING_FACTOR
fake_height = height * ANTIALIASING_FACTOR
base = Image.new(
mode, (fake_width, fake_height), back_color
) # make something 4x bigger for antialiasing
base_draw = ImageDraw.Draw(base)
base_draw.ellipse((0, 0, fake_width, fake_height * 2), fill=front_color)
self.ROUND_TOP = base.resize((shrunken_width, height), Image.Resampling.LANCZOS)
self.ROUND_BOTTOM = self.ROUND_TOP.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
def drawrect(self, box, is_active: "ActiveWithNeighbors"):
if is_active:
# find rounded edges
top_rounded = not is_active.N
bottom_rounded = not is_active.S
top = self.ROUND_TOP if top_rounded else self.SQUARE
bottom = self.ROUND_BOTTOM if bottom_rounded else self.SQUARE
self.img._img.paste(top, (box[0][0] + self.delta, box[0][1]))
self.img._img.paste(
bottom, (box[0][0] + self.delta, box[0][1] + self.half_height)
)
class HorizontalBarsDrawer(StyledPilQRModuleDrawer):
"""
Draws horizontally contiguous groups of modules as long rounded rectangles,
with gaps between neighboring bands (the size of these gaps is inversely
proportional to the vertical_shrink).
"""
needs_neighbors = True
def __init__(self, vertical_shrink=0.8):
self.vertical_shrink = vertical_shrink
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.half_width = int(self.img.box_size / 2)
self.delta = int((1 - self.vertical_shrink) * self.half_width)
self.setup_edges()
def setup_edges(self):
mode = self.img.mode
back_color = self.img.color_mask.back_color
front_color = self.img.paint_color
width = self.half_width
height = width * 2
shrunken_height = int(height * self.vertical_shrink)
self.SQUARE = Image.new(mode, (width, shrunken_height), front_color)
fake_width = width * ANTIALIASING_FACTOR
fake_height = height * ANTIALIASING_FACTOR
base = Image.new(
mode, (fake_width, fake_height), back_color
) # make something 4x bigger for antialiasing
base_draw = ImageDraw.Draw(base)
base_draw.ellipse((0, 0, fake_width * 2, fake_height), fill=front_color)
self.ROUND_LEFT = base.resize((width, shrunken_height), Image.Resampling.LANCZOS)
self.ROUND_RIGHT = self.ROUND_LEFT.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
def drawrect(self, box, is_active: "ActiveWithNeighbors"):
if is_active:
# find rounded edges
left_rounded = not is_active.W
right_rounded = not is_active.E
left = self.ROUND_LEFT if left_rounded else self.SQUARE
right = self.ROUND_RIGHT if right_rounded else self.SQUARE
self.img._img.paste(left, (box[0][0], box[0][1] + self.delta))
self.img._img.paste(
right, (box[0][0] + self.half_width, box[0][1] + self.delta)
)
| 9,852 | Python | 35.902622 | 89 | 0.630938 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/image/styles/moduledrawers/svg.py | import abc
from decimal import Decimal
from typing import TYPE_CHECKING, NamedTuple
from qrcode.image.styles.moduledrawers.base import QRModuleDrawer
from qrcode.compat.etree import ET
if TYPE_CHECKING:
from qrcode.image.svg import SvgFragmentImage, SvgPathImage
ANTIALIASING_FACTOR = 4
class Coords(NamedTuple):
x0: Decimal
y0: Decimal
x1: Decimal
y1: Decimal
xh: Decimal
yh: Decimal
class BaseSvgQRModuleDrawer(QRModuleDrawer):
img: "SvgFragmentImage"
def __init__(self, *, size_ratio: Decimal = Decimal(1), **kwargs):
self.size_ratio = size_ratio
def initialize(self, *args, **kwargs) -> None:
super().initialize(*args, **kwargs)
self.box_delta = (1 - self.size_ratio) * self.img.box_size / 2
self.box_size = Decimal(self.img.box_size) * self.size_ratio
self.box_half = self.box_size / 2
def coords(self, box) -> Coords:
row, col = box[0]
x = row + self.box_delta
y = col + self.box_delta
return Coords(
x,
y,
x + self.box_size,
y + self.box_size,
x + self.box_half,
y + self.box_half,
)
class SvgQRModuleDrawer(BaseSvgQRModuleDrawer):
tag = "rect"
def initialize(self, *args, **kwargs) -> None:
super().initialize(*args, **kwargs)
self.tag_qname = ET.QName(self.img._SVG_namespace, self.tag)
def drawrect(self, box, is_active: bool):
if not is_active:
return
self.img._img.append(self.el(box))
@abc.abstractmethod
def el(self, box):
...
class SvgSquareDrawer(SvgQRModuleDrawer):
def initialize(self, *args, **kwargs) -> None:
super().initialize(*args, **kwargs)
self.unit_size = self.img.units(self.box_size)
def el(self, box):
coords = self.coords(box)
return ET.Element(
self.tag_qname, # type: ignore
x=self.img.units(coords.x0),
y=self.img.units(coords.y0),
width=self.unit_size,
height=self.unit_size,
)
class SvgCircleDrawer(SvgQRModuleDrawer):
tag = "circle"
def initialize(self, *args, **kwargs) -> None:
super().initialize(*args, **kwargs)
self.radius = self.img.units(self.box_half)
def el(self, box):
coords = self.coords(box)
return ET.Element(
self.tag_qname, # type: ignore
cx=self.img.units(coords.xh),
cy=self.img.units(coords.yh),
r=self.radius,
)
class SvgPathQRModuleDrawer(BaseSvgQRModuleDrawer):
img: "SvgPathImage"
def drawrect(self, box, is_active: bool):
if not is_active:
return
self.img._subpaths.append(self.subpath(box))
@abc.abstractmethod
def subpath(self, box) -> str:
...
class SvgPathSquareDrawer(SvgPathQRModuleDrawer):
def subpath(self, box) -> str:
coords = self.coords(box)
x0 = self.img.units(coords.x0, text=False)
y0 = self.img.units(coords.y0, text=False)
x1 = self.img.units(coords.x1, text=False)
y1 = self.img.units(coords.y1, text=False)
return f"M{x0},{y0}H{x1}V{y1}H{x0}z"
class SvgPathCircleDrawer(SvgPathQRModuleDrawer):
def initialize(self, *args, **kwargs) -> None:
super().initialize(*args, **kwargs)
def subpath(self, box) -> str:
coords = self.coords(box)
x0 = self.img.units(coords.x0, text=False)
yh = self.img.units(coords.yh, text=False)
h = self.img.units(self.box_half - self.box_delta, text=False)
x1 = self.img.units(coords.x1, text=False)
# rx,ry is the centerpoint of the arc
# 1? is the x-axis-rotation
# 2? is the large-arc-flag
# 3? is the sweep flag
# x,y is the point the arc is drawn to
return f"M{x0},{yh}A{h},{h} 0 0 0 {x1},{yh}A{h},{h} 0 0 0 {x0},{yh}z"
| 3,952 | Python | 26.838028 | 77 | 0.593877 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/compat/etree.py | try:
import lxml.etree as ET # type: ignore # noqa: F401
except ImportError:
import xml.etree.ElementTree as ET # type: ignore # noqa: F401
| 152 | Python | 29.599994 | 68 | 0.684211 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/compat/pil.py | # Try to import PIL in either of the two ways it can be installed.
Image = None
ImageDraw = None
try:
from PIL import Image, ImageDraw # type: ignore # noqa: F401
except ImportError: # pragma: no cover
try:
import Image # type: ignore # noqa: F401
import ImageDraw # type: ignore # noqa: F401
except ImportError:
pass
| 362 | Python | 26.923075 | 66 | 0.651934 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_release.py | import re
import builtins
import datetime
import unittest
from unittest import mock
from qrcode.release import update_manpage
OPEN = f"{builtins.__name__}.open"
DATA = 'test\n.TH "date" "version" "description"\nthis'
class UpdateManpageTests(unittest.TestCase):
@mock.patch(OPEN, new_callable=mock.mock_open, read_data=".TH invalid")
def test_invalid_data(self, mock_file):
update_manpage({"name": "qrcode", "new_version": "1.23"})
mock_file.assert_called()
mock_file().write.assert_not_called()
@mock.patch(OPEN, new_callable=mock.mock_open, read_data=DATA)
def test_not_qrcode(self, mock_file):
update_manpage({"name": "not-qrcode"})
mock_file.assert_not_called()
@mock.patch(OPEN, new_callable=mock.mock_open, read_data=DATA)
def test_no_change(self, mock_file):
update_manpage({"name": "qrcode", "new_version": "version"})
mock_file.assert_called()
mock_file().write.assert_not_called()
@mock.patch(OPEN, new_callable=mock.mock_open, read_data=DATA)
def test_change(self, mock_file):
update_manpage({"name": "qrcode", "new_version": "3.11"})
expected = re.split(r"([^\n]*(?:\n|$))", DATA)[1::2]
expected[1] = (
expected[1]
.replace("version", "3.11")
.replace("date", datetime.datetime.now().strftime("%-d %b %Y"))
)
mock_file().write.has_calls([mock.call(line) for line in expected])
| 1,468 | Python | 34.829267 | 75 | 0.626703 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_util.py | import unittest
from qrcode import util
class UtilTests(unittest.TestCase):
def test_check_wrong_version(self):
with self.assertRaises(ValueError):
util.check_version(0)
with self.assertRaises(ValueError):
util.check_version(41)
| 277 | Python | 20.384614 | 43 | 0.67509 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_script.py | import io
import os
import sys
import unittest
from tempfile import mkdtemp
from unittest import mock
from qrcode.compat.pil import Image
from qrcode.console_scripts import commas, main
def bad_read():
raise UnicodeDecodeError("utf-8", b"0x80", 0, 1, "invalid start byte")
class ScriptTest(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
os.rmdir(self.tmpdir)
@mock.patch("os.isatty", lambda *args: True)
@mock.patch("qrcode.main.QRCode.print_ascii")
def test_isatty(self, mock_print_ascii):
main(["testtext"])
mock_print_ascii.assert_called_with(tty=True)
@mock.patch("os.isatty", lambda *args: False)
@mock.patch("sys.stdout")
@unittest.skipIf(not Image, "Requires PIL")
def test_piped(self, mock_stdout):
main(["testtext"])
@mock.patch("os.isatty", lambda *args: True)
@mock.patch("qrcode.main.QRCode.print_ascii")
@mock.patch("sys.stdin")
def test_stdin(self, mock_stdin, mock_print_ascii):
mock_stdin.buffer.read.return_value = "testtext"
main([])
self.assertTrue(mock_stdin.buffer.read.called)
mock_print_ascii.assert_called_with(tty=True)
@mock.patch("os.isatty", lambda *args: True)
@mock.patch("qrcode.main.QRCode.print_ascii")
def test_stdin_py3_unicodedecodeerror(self, mock_print_ascii):
mock_stdin = mock.Mock(sys.stdin)
mock_stdin.buffer.read.return_value = "testtext"
mock_stdin.read.side_effect = bad_read
with mock.patch("sys.stdin", mock_stdin):
# sys.stdin.read() will raise an error...
self.assertRaises(UnicodeDecodeError, sys.stdin.read)
# ... but it won't be used now.
main([])
mock_print_ascii.assert_called_with(tty=True)
@mock.patch("os.isatty", lambda *args: True)
@mock.patch("qrcode.main.QRCode.print_ascii")
def test_optimize(self, mock_print_ascii):
main("testtext --optimize 0".split())
@mock.patch("sys.stdout")
def test_factory(self, mock_stdout):
main("testtext --factory svg".split())
@mock.patch("sys.stderr")
def test_bad_factory(self, mock_stderr):
self.assertRaises(SystemExit, main, "testtext --factory fish".split())
@mock.patch.object(sys, "argv", "qr testtext output".split())
@unittest.skipIf(not Image, "Requires PIL")
def test_sys_argv(self):
main()
@unittest.skipIf(not Image, "Requires PIL")
def test_output(self):
tmpfile = os.path.join(self.tmpdir, "test.png")
main(["testtext", "--output", tmpfile])
os.remove(tmpfile)
@mock.patch("sys.stderr", new_callable=io.StringIO)
@unittest.skipIf(not Image, "Requires PIL")
def test_factory_drawer_none(self, mock_stderr):
with self.assertRaises(SystemExit):
main("testtext --factory pil --factory-drawer nope".split())
self.assertIn(
"The selected factory has no drawer aliases", mock_stderr.getvalue()
)
@mock.patch("sys.stderr", new_callable=io.StringIO)
def test_factory_drawer_bad(self, mock_stderr):
with self.assertRaises(SystemExit):
main("testtext --factory svg --factory-drawer sobad".split())
self.assertIn("sobad factory drawer not found", mock_stderr.getvalue())
@mock.patch("sys.stderr", new_callable=io.StringIO)
def test_factory_drawer(self, mock_stderr):
main("testtext --factory svg --factory-drawer circle".split())
def test_commas(self):
self.assertEqual(commas([]), "")
self.assertEqual(commas(["A"]), "A")
self.assertEqual(commas("AB"), "A or B")
self.assertEqual(commas("ABC"), "A, B or C")
self.assertEqual(commas("ABC", joiner="and"), "A, B and C")
| 3,807 | Python | 34.924528 | 80 | 0.641713 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_qrcode_svg.py | import io
import os
import unittest
from tempfile import mkdtemp
import qrcode
from qrcode.image import svg
UNICODE_TEXT = "\u03b1\u03b2\u03b3"
class SvgImageWhite(svg.SvgImage):
background = "white"
class QRCodeSvgTests(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
os.rmdir(self.tmpdir)
def test_render_svg(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=svg.SvgImage)
img.save(io.BytesIO())
def test_render_svg_path(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=svg.SvgPathImage)
img.save(io.BytesIO())
def test_render_svg_fragment(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=svg.SvgFragmentImage)
img.save(io.BytesIO())
def test_svg_string(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=svg.SvgFragmentImage)
file_like = io.BytesIO()
img.save(file_like)
file_like.seek(0)
assert file_like.read() in img.to_string()
def test_render_svg_with_background(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=SvgImageWhite)
img.save(io.BytesIO())
def test_svg_circle_drawer(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=svg.SvgPathImage, module_drawer="circle")
img.save(io.BytesIO())
| 1,644 | Python | 25.967213 | 83 | 0.629562 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_qrcode.py | import io
import os
import unittest
import warnings
from tempfile import mkdtemp
from unittest import mock
import png
import qrcode
import qrcode.util
from qrcode.compat.pil import Image as pil_Image
from qrcode.exceptions import DataOverflowError
from qrcode.image.base import BaseImage
from qrcode.image.pure import PyPNGImage
from qrcode.image.styledpil import StyledPilImage
from qrcode.image.styles import colormasks, moduledrawers
from qrcode.util import MODE_8BIT_BYTE, MODE_ALPHA_NUM, MODE_NUMBER, QRData
UNICODE_TEXT = "\u03b1\u03b2\u03b3"
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
class QRCodeTests(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
os.rmdir(self.tmpdir)
def test_basic(self):
qr = qrcode.QRCode(version=1)
qr.add_data("a")
qr.make(fit=False)
def test_large(self):
qr = qrcode.QRCode(version=27)
qr.add_data("a")
qr.make(fit=False)
def test_invalid_version(self):
self.assertRaises(ValueError, qrcode.QRCode, version=41)
def test_invalid_border(self):
self.assertRaises(ValueError, qrcode.QRCode, border=-1)
def test_overflow(self):
qr = qrcode.QRCode(version=1)
qr.add_data("abcdefghijklmno")
self.assertRaises(DataOverflowError, qr.make, fit=False)
def test_add_qrdata(self):
qr = qrcode.QRCode(version=1)
data = QRData("a")
qr.add_data(data)
qr.make(fit=False)
def test_fit(self):
qr = qrcode.QRCode()
qr.add_data("a")
qr.make()
self.assertEqual(qr.version, 1)
qr.add_data("bcdefghijklmno")
qr.make()
self.assertEqual(qr.version, 2)
def test_mode_number(self):
qr = qrcode.QRCode()
qr.add_data("1234567890123456789012345678901234", optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_NUMBER)
def test_mode_alpha(self):
qr = qrcode.QRCode()
qr.add_data("ABCDEFGHIJ1234567890", optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_regression_mode_comma(self):
qr = qrcode.QRCode()
qr.add_data(",", optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit(self):
qr = qrcode.QRCode()
qr.add_data("abcABC" + UNICODE_TEXT, optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit_newline(self):
qr = qrcode.QRCode()
qr.add_data("ABCDEFGHIJ1234567890\n", optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_pil(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image()
img.save(io.BytesIO())
self.assertIsInstance(img.get_image(), pil_Image.Image)
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_pil_with_transparent_background(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(back_color="TransParent")
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_pil_with_red_background(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(back_color="red")
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_pil_with_rgb_color_tuples(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(back_color=(255, 195, 235), fill_color=(55, 95, 35))
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_with_pattern(self):
qr = qrcode.QRCode(mask_pattern=3)
qr.add_data(UNICODE_TEXT)
img = qr.make_image()
img.save(io.BytesIO())
def test_make_image_with_wrong_pattern(self):
with self.assertRaises(TypeError):
qrcode.QRCode(mask_pattern="string pattern")
with self.assertRaises(ValueError):
qrcode.QRCode(mask_pattern=-1)
with self.assertRaises(ValueError):
qrcode.QRCode(mask_pattern=42)
def test_mask_pattern_setter(self):
qr = qrcode.QRCode()
with self.assertRaises(TypeError):
qr.mask_pattern = "string pattern"
with self.assertRaises(ValueError):
qr.mask_pattern = -1
with self.assertRaises(ValueError):
qr.mask_pattern = 8
def test_qrcode_bad_factory(self):
with self.assertRaises(TypeError):
qrcode.QRCode(image_factory="not_BaseImage") # type: ignore
with self.assertRaises(AssertionError):
qrcode.QRCode(image_factory=dict) # type: ignore
def test_qrcode_factory(self):
class MockFactory(BaseImage):
drawrect = mock.Mock()
new_image = mock.Mock()
qr = qrcode.QRCode(image_factory=MockFactory)
qr.add_data(UNICODE_TEXT)
qr.make_image()
self.assertTrue(MockFactory.new_image.called)
self.assertTrue(MockFactory.drawrect.called)
def test_render_pypng(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=PyPNGImage)
self.assertIsInstance(img.get_image(), png.Writer)
print(img.width, img.box_size, img.border)
img.save(io.BytesIO())
def test_render_pypng_to_str(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=PyPNGImage)
self.assertIsInstance(img.get_image(), png.Writer)
mock_open = mock.mock_open()
with mock.patch("qrcode.image.pure.open", mock_open, create=True):
img.save("test_file.png")
mock_open.assert_called_once_with("test_file.png", "wb")
mock_open("test_file.png", "wb").write.assert_called()
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_Image(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=StyledPilImage)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_embeded_image(self):
embeded_img = pil_Image.new("RGB", (10, 10), color="red")
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=StyledPilImage, embeded_image=embeded_img)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_embeded_image_path(self):
tmpfile = os.path.join(self.tmpdir, "test.png")
embeded_img = pil_Image.new("RGB", (10, 10), color="red")
embeded_img.save(tmpfile)
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=StyledPilImage, embeded_image_path=tmpfile)
img.save(io.BytesIO())
os.remove(tmpfile)
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_square_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.SquareModuleDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_gapped_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.GappedSquareModuleDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_circle_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.CircleModuleDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_rounded_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.RoundedModuleDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_vertical_bars_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.VerticalBarsDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_horizontal_bars_module_drawer(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
img = qr.make_image(
image_factory=StyledPilImage,
module_drawer=moduledrawers.HorizontalBarsDrawer(),
)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_default_solid_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.SolidFillColorMask()
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_solid_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.SolidFillColorMask(back_color=WHITE, front_color=RED)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_color_mask_with_transparency(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.SolidFillColorMask(
back_color=(255, 0, 255, 255), front_color=RED
)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
assert img.mode == "RGBA"
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_radial_gradient_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.RadialGradiantColorMask(
back_color=WHITE, center_color=BLACK, edge_color=RED
)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_square_gradient_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.SquareGradiantColorMask(
back_color=WHITE, center_color=BLACK, edge_color=RED
)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_horizontal_gradient_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.HorizontalGradiantColorMask(
back_color=WHITE, left_color=RED, right_color=BLACK
)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_vertical_gradient_color_mask(self):
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.VerticalGradiantColorMask(
back_color=WHITE, top_color=RED, bottom_color=BLACK
)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
@unittest.skipIf(not pil_Image, "Requires PIL")
def test_render_styled_with_image_color_mask(self):
img_mask = pil_Image.new("RGB", (10, 10), color="red")
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_L)
qr.add_data(UNICODE_TEXT)
mask = colormasks.ImageColorMask(back_color=WHITE, color_mask_image=img_mask)
img = qr.make_image(image_factory=StyledPilImage, color_mask=mask)
img.save(io.BytesIO())
def test_optimize(self):
qr = qrcode.QRCode()
text = "A1abc12345def1HELLOa"
qr.add_data(text, optimize=4)
qr.make()
self.assertEqual(
[d.mode for d in qr.data_list],
[
MODE_8BIT_BYTE,
MODE_NUMBER,
MODE_8BIT_BYTE,
MODE_ALPHA_NUM,
MODE_8BIT_BYTE,
],
)
self.assertEqual(qr.version, 2)
def test_optimize_short(self):
qr = qrcode.QRCode()
text = "A1abc1234567def1HELLOa"
qr.add_data(text, optimize=7)
qr.make()
self.assertEqual(len(qr.data_list), 3)
self.assertEqual(
[d.mode for d in qr.data_list],
[MODE_8BIT_BYTE, MODE_NUMBER, MODE_8BIT_BYTE],
)
self.assertEqual(qr.version, 2)
def test_optimize_longer_than_data(self):
qr = qrcode.QRCode()
text = "ABCDEFGHIJK"
qr.add_data(text, optimize=12)
self.assertEqual(len(qr.data_list), 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_optimize_size(self):
text = "A1abc12345123451234512345def1HELLOHELLOHELLOHELLOa" * 5
qr = qrcode.QRCode()
qr.add_data(text)
qr.make()
self.assertEqual(qr.version, 10)
qr = qrcode.QRCode()
qr.add_data(text, optimize=0)
qr.make()
self.assertEqual(qr.version, 11)
def test_qrdata_repr(self):
data = b"hello"
data_obj = qrcode.util.QRData(data)
self.assertEqual(repr(data_obj), repr(data))
def test_print_ascii_stdout(self):
qr = qrcode.QRCode()
with mock.patch("sys.stdout") as fake_stdout:
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_ascii, tty=True)
self.assertTrue(fake_stdout.isatty.called)
def test_print_ascii(self):
qr = qrcode.QRCode(border=0)
f = io.StringIO()
qr.print_ascii(out=f)
printed = f.getvalue()
f.close()
expected = "\u2588\u2580\u2580\u2580\u2580\u2580\u2588"
self.assertEqual(printed[: len(expected)], expected)
f = io.StringIO()
f.isatty = lambda: True
qr.print_ascii(out=f, tty=True)
printed = f.getvalue()
f.close()
expected = (
"\x1b[48;5;232m\x1b[38;5;255m" + "\xa0\u2584\u2584\u2584\u2584\u2584\xa0"
)
self.assertEqual(printed[: len(expected)], expected)
def test_print_tty_stdout(self):
qr = qrcode.QRCode()
with mock.patch("sys.stdout") as fake_stdout:
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_tty)
self.assertTrue(fake_stdout.isatty.called)
def test_print_tty(self):
qr = qrcode.QRCode()
f = io.StringIO()
f.isatty = lambda: True
qr.print_tty(out=f)
printed = f.getvalue()
f.close()
BOLD_WHITE_BG = "\x1b[1;47m"
BLACK_BG = "\x1b[40m"
WHITE_BLOCK = BOLD_WHITE_BG + " " + BLACK_BG
EOL = "\x1b[0m\n"
expected = (
BOLD_WHITE_BG + " " * 23 + EOL + WHITE_BLOCK + " " * 7 + WHITE_BLOCK
)
self.assertEqual(printed[: len(expected)], expected)
def test_get_matrix(self):
qr = qrcode.QRCode(border=0)
qr.add_data("1")
self.assertEqual(qr.get_matrix(), qr.modules)
def test_get_matrix_border(self):
qr = qrcode.QRCode(border=1)
qr.add_data("1")
matrix = [row[1:-1] for row in qr.get_matrix()[1:-1]]
self.assertEqual(matrix, qr.modules)
def test_negative_size_at_construction(self):
self.assertRaises(ValueError, qrcode.QRCode, box_size=-1)
def test_negative_size_at_usage(self):
qr = qrcode.QRCode()
qr.box_size = -1
self.assertRaises(ValueError, qr.make_image)
class ShortcutTest(unittest.TestCase):
@unittest.skipIf(not pil_Image, "Requires PIL")
def runTest(self):
qrcode.make("image")
| 17,398 | Python | 34.653688 | 85 | 0.624727 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/qrcode/tests/test_example.py | import unittest
from unittest import mock
from qrcode import run_example
from qrcode.compat.pil import Image
class ExampleTest(unittest.TestCase):
@unittest.skipIf(not Image, "Requires PIL")
@mock.patch("PIL.Image.Image.show")
def runTest(self, mock_show):
run_example()
mock_show.assert_called_with()
| 333 | Python | 22.857141 | 47 | 0.717718 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pathtools/patterns.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# patterns.py: Common wildcard searching/filtering functionality for files.
#
# Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:module: pathtools.patterns
:synopsis: Wildcard pattern matching and filtering functions for paths.
:author: Yesudeep Mangalapilly <[email protected]>
Functions
---------
.. autofunction:: match_path
.. autofunction:: match_path_against
.. autofunction:: filter_paths
"""
from fnmatch import fnmatch, fnmatchcase
__all__ = ['match_path',
'match_path_against',
'match_any_paths',
'filter_paths']
def _string_lower(s):
"""
Convenience function to lowercase a string (the :mod:`string` module is
deprecated/removed in Python 3.0).
:param s:
The string which will be lowercased.
:returns:
Lowercased copy of string s.
"""
return s.lower()
def match_path_against(pathname, patterns, case_sensitive=True):
"""
Determines whether the pathname matches any of the given wildcard patterns,
optionally ignoring the case of the pathname and patterns.
:param pathname:
A path name that will be matched against a wildcard pattern.
:param patterns:
A list of wildcard patterns to match_path the filename against.
:param case_sensitive:
``True`` if the matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if the pattern matches; ``False`` otherwise.
Doctests::
>>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False)
True
>>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True)
False
>>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False)
True
>>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True)
False
>>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False)
True
"""
if case_sensitive:
match_func = fnmatchcase
pattern_transform_func = (lambda w: w)
else:
match_func = fnmatch
pathname = pathname.lower()
pattern_transform_func = _string_lower
for pattern in set(patterns):
pattern = pattern_transform_func(pattern)
if match_func(pathname, pattern):
return True
return False
def _match_path(pathname,
included_patterns,
excluded_patterns,
case_sensitive=True):
"""Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
if not case_sensitive:
included_patterns = set(map(_string_lower, included_patterns))
excluded_patterns = set(map(_string_lower, excluded_patterns))
else:
included_patterns = set(included_patterns)
excluded_patterns = set(excluded_patterns)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
raise ValueError('conflicting patterns `%s` included and excluded'\
% common_patterns)
return (match_path_against(pathname, included_patterns, case_sensitive)\
and not match_path_against(pathname, excluded_patterns,
case_sensitive))
def match_path(pathname,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Matches a pathname against a set of acceptable and ignored patterns.
:param pathname:
A pathname which will be matched against a pattern.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern is specified, the function treats the pathname as
a match_path.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern is specified, the function treats the pathname as
a match_path.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if the pathname matches; ``False`` otherwise.
:raises:
ValueError if included patterns and excluded patterns contain the
same pattern.
Doctests::
>>> match_path("/Users/gorakhargosh/foobar.py")
True
>>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False)
True
>>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
return _match_path(pathname, included, excluded, case_sensitive)
def filter_paths(pathnames,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> set(filter_paths(pathnames)) == pathnames
True
>>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames
True
>>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"])
True
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
yield pathname
def match_any_paths(pathnames,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Matches from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if any of the paths matches; ``False`` otherwise.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> match_any_paths(pathnames)
True
>>> match_any_paths(pathnames, case_sensitive=False)
True
>>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)
True
>>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False)
False
>>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True)
False
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
return True
return False
| 10,698 | Python | 39.221804 | 156 | 0.638157 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pathtools/__init__.py | # -*- coding: utf-8 -*-
# pathtools: File system path tools.
# Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| 1,182 | Python | 52.772725 | 79 | 0.773266 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pathtools/path.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# path.py: Path functions.
#
# Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:module: pathtools.path
:synopsis: Directory walking, listing, and path sanitizing functions.
:author: Yesudeep Mangalapilly <[email protected]>
Functions
---------
.. autofunction:: get_dir_walker
.. autofunction:: walk
.. autofunction:: listdir
.. autofunction:: list_directories
.. autofunction:: list_files
.. autofunction:: absolute_path
.. autofunction:: real_absolute_path
.. autofunction:: parent_dir_path
"""
import os.path
import os.path
from functools import partial
__all__ = [
'get_dir_walker',
'walk',
'listdir',
'list_directories',
'list_files',
'absolute_path',
'real_absolute_path',
'parent_dir_path',
]
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walk = partial(os.walk, topdown=topdown, followlinks=followlinks)
else:
def walk(path, topdown=topdown, followlinks=followlinks):
try:
yield next(os.walk(path, topdown=topdown, followlinks=followlinks))
except NameError:
yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101
return walk
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Walks a directory tree optionally recursively. Works exactly like
:func:`os.walk` only adding the `recursive` argument.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
walk_func = get_dir_walker(recursive, topdown, followlinks)
for root, dirnames, filenames in walk_func(dir_pathname):
yield (root, dirnames, filenames)
def listdir(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all items using their absolute paths in a directory, optionally
recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for dirname in dirnames:
yield absolute_path(os.path.join(root, dirname))
for filename in filenames:
yield absolute_path(os.path.join(root, filename))
def list_directories(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all the directories using their absolute paths within the specified
directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for dirname in dirnames:
yield absolute_path(os.path.join(root, dirname))
def list_files(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all the files using their absolute paths within the specified
directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for filename in filenames:
yield absolute_path(os.path.join(root, filename))
def absolute_path(path):
"""
Returns the absolute path for the given path and normalizes the path.
:param path:
Path for which the absolute normalized path will be found.
:returns:
Absolute normalized path.
"""
return os.path.abspath(os.path.normpath(path))
def real_absolute_path(path):
"""
Returns the real absolute normalized path for the given path.
:param path:
Path for which the real absolute normalized path will be found.
:returns:
Real absolute normalized path.
"""
return os.path.realpath(absolute_path(path))
def parent_dir_path(path):
"""
Returns the parent directory path.
:param path:
Path for which the parent directory will be obtained.
:returns:
Parent directory path.
"""
return absolute_path(os.path.dirname(path))
| 6,614 | Python | 30.802884 | 98 | 0.669791 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles/os.py | """Async executor versions of file functions from the os module."""
import asyncio
from functools import partial, wraps
import os
def wrap(func):
@asyncio.coroutine
@wraps(func)
def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
pfunc = partial(func, *args, **kwargs)
return loop.run_in_executor(executor, pfunc)
return run
stat = wrap(os.stat)
if hasattr(os, "sendfile"):
sendfile = wrap(os.sendfile)
| 514 | Python | 21.391303 | 67 | 0.653696 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles/base.py | """Various base classes."""
import asyncio
from collections.abc import Coroutine
class AsyncBase:
def __init__(self, file, loop, executor):
self._file = file
self._loop = loop
self._executor = executor
def __aiter__(self):
"""We are our own iterator."""
return self
@asyncio.coroutine
def __anext__(self):
"""Simulate normal file iteration."""
line = yield from self.readline()
if line:
return line
else:
raise StopAsyncIteration
class _ContextManager(Coroutine):
__slots__ = ('_coro', '_obj')
def __init__(self, coro):
self._coro = coro
self._obj = None
def send(self, value):
return self._coro.send(value)
def throw(self, typ, val=None, tb=None):
if val is None:
return self._coro.throw(typ)
elif tb is None:
return self._coro.throw(typ, val)
else:
return self._coro.throw(typ, val, tb)
def close(self):
return self._coro.close()
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
resp = yield from self._coro
return resp
def __await__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __anext__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __aenter__(self):
self._obj = yield from self._coro
return self._obj
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
self._obj.close()
self._obj = None
class AiofilesContextManager(_ContextManager):
"""An adjusted async context manager for aiofiles."""
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
yield from self._obj.close()
self._obj = None
| 2,121 | Python | 21.574468 | 57 | 0.564828 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles/_compat.py | import sys
try:
from functools import singledispatch
except ImportError: # pragma: nocover
from singledispatch import singledispatch
PY_35 = sys.version_info >= (3, 5)
| 205 | Python | 21.888886 | 64 | 0.64878 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles/__init__.py | """Utilities for asyncio-friendly file handling."""
from .threadpool import open
__version__ = "0.4.0"
__all__ = (open,)
| 123 | Python | 16.714283 | 51 | 0.650406 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt/importer.py | """This module implements a post import hook mechanism styled after what is
described in PEP-369. Note that it doesn't cope with modules being reloaded.
"""
import sys
import threading
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
import importlib
string_types = str,
else:
string_types = basestring,
from .decorators import synchronized
# The dictionary registering any post import hooks to be triggered once
# the target module has been imported. Once a module has been imported
# and the hooks fired, the list of hooks recorded against the target
# module will be truncacted but the list left in the dictionary. This
# acts as a flag to indicate that the module had already been imported.
_post_import_hooks = {}
_post_import_hooks_init = False
_post_import_hooks_lock = threading.RLock()
# Register a new post import hook for the target module name. This
# differs from the PEP-369 implementation in that it also allows the
# hook function to be specified as a string consisting of the name of
# the callback in the form 'module:function'. This will result in a
# proxy callback being registered which will defer loading of the
# specified module containing the callback function until required.
def _create_import_hook_from_string(name):
def import_hook(module):
module_name, function = name.split(':')
attrs = function.split('.')
__import__(module_name)
callback = sys.modules[module_name]
for attr in attrs:
callback = getattr(callback, attr)
return callback(module)
return import_hook
@synchronized(_post_import_hooks_lock)
def register_post_import_hook(hook, name):
# Create a deferred import hook if hook is a string name rather than
# a callable function.
if isinstance(hook, string_types):
hook = _create_import_hook_from_string(hook)
# Automatically install the import hook finder if it has not already
# been installed.
global _post_import_hooks_init
if not _post_import_hooks_init:
_post_import_hooks_init = True
sys.meta_path.insert(0, ImportHookFinder())
# Determine if any prior registration of a post import hook for
# the target modules has occurred and act appropriately.
hooks = _post_import_hooks.get(name, None)
if hooks is None:
# No prior registration of post import hooks for the target
# module. We need to check whether the module has already been
# imported. If it has we fire the hook immediately and add an
# empty list to the registry to indicate that the module has
# already been imported and hooks have fired. Otherwise add
# the post import hook to the registry.
module = sys.modules.get(name, None)
if module is not None:
_post_import_hooks[name] = []
hook(module)
else:
_post_import_hooks[name] = [hook]
elif hooks == []:
# A prior registration of port import hooks for the target
# module was done and the hooks already fired. Fire the hook
# immediately.
module = sys.modules[name]
hook(module)
else:
# A prior registration of port import hooks for the target
# module was done but the module has not yet been imported.
_post_import_hooks[name].append(hook)
# Register post import hooks defined as package entry points.
def _create_import_hook_from_entrypoint(entrypoint):
def import_hook(module):
__import__(entrypoint.module_name)
callback = sys.modules[entrypoint.module_name]
for attr in entrypoint.attrs:
callback = getattr(callback, attr)
return callback(module)
return import_hook
def discover_post_import_hooks(group):
try:
import pkg_resources
except ImportError:
return
for entrypoint in pkg_resources.iter_entry_points(group=group):
callback = _create_import_hook_from_entrypoint(entrypoint)
register_post_import_hook(callback, entrypoint.name)
# Indicate that a module has been loaded. Any post import hooks which
# were registered against the target module will be invoked. If an
# exception is raised in any of the post import hooks, that will cause
# the import of the target module to fail.
@synchronized(_post_import_hooks_lock)
def notify_module_loaded(module):
name = getattr(module, '__name__', None)
hooks = _post_import_hooks.get(name, None)
if hooks:
_post_import_hooks[name] = []
for hook in hooks:
hook(module)
# A custom module import finder. This intercepts attempts to import
# modules and watches out for attempts to import target modules of
# interest. When a module of interest is imported, then any post import
# hooks which are registered will be invoked.
class _ImportHookLoader:
def load_module(self, fullname):
module = sys.modules[fullname]
notify_module_loaded(module)
return module
class _ImportHookChainedLoader:
def __init__(self, loader):
self.loader = loader
def load_module(self, fullname):
module = self.loader.load_module(fullname)
notify_module_loaded(module)
return module
class ImportHookFinder:
def __init__(self):
self.in_progress = {}
@synchronized(_post_import_hooks_lock)
def find_module(self, fullname, path=None):
# If the module being imported is not one we have registered
# post import hooks for, we can return immediately. We will
# take no further part in the importing of this module.
if not fullname in _post_import_hooks:
return None
# When we are interested in a specific module, we will call back
# into the import system a second time to defer to the import
# finder that is supposed to handle the importing of the module.
# We set an in progress flag for the target module so that on
# the second time through we don't trigger another call back
# into the import system and cause a infinite loop.
if fullname in self.in_progress:
return None
self.in_progress[fullname] = True
# Now call back into the import system again.
try:
if PY3:
# For Python 3 we need to use find_loader() from
# the importlib module. It doesn't actually
# import the target module and only finds the
# loader. If a loader is found, we need to return
# our own loader which will then in turn call the
# real loader to import the module and invoke the
# post import hooks.
loader = importlib.find_loader(fullname, path)
if loader:
return _ImportHookChainedLoader(loader)
else:
# For Python 2 we don't have much choice but to
# call back in to __import__(). This will
# actually cause the module to be imported. If no
# module could be found then ImportError will be
# raised. Otherwise we return a loader which
# returns the already loaded module and invokes
# the post import hooks.
__import__(fullname)
return _ImportHookLoader()
finally:
del self.in_progress[fullname]
# Decorator for marking that a function should be called as a post
# import hook when the target module is imported.
def when_imported(name):
def register(hook):
register_post_import_hook(hook, name)
return hook
return register
| 7,726 | Python | 32.742358 | 76 | 0.659591 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt/wrappers.py | import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, '__qualname__', wrapped.__qualname__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
@property
def __annotations__(self):
return self.__wrapped__.__anotations__
@__annotations__.setter
def __annotations__(self, value):
self.__wrapped__.__annotations__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if PY3:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return '<%s at 0x%x for %s at 0x%x>' % (
type(self).__name__, id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__))
def __reversed__(self):
return reversed(self.__wrapped__)
if PY3:
def __round__(self):
return round(self.__wrapped__)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith('_self_'):
object.__setattr__(self, name, value)
elif name == '__wrapped__':
object.__setattr__(self, name, value)
try:
object.__delattr__(self, '__qualname__')
except AttributeError:
pass
try:
object.__setattr__(self, '__qualname__', value.__qualname__)
except AttributeError:
pass
elif name == '__qualname__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith('_self_'):
object.__delattr__(self, name)
elif name == '__wrapped__':
raise TypeError('__wrapped__ must be an object')
elif name == '__qualname__':
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = ('_self_instance', '_self_wrapper', '_self_enabled',
'_self_binding', '_self_parent')
def __init__(self, wrapped, instance, wrapper, enabled=None,
binding='function', parent=None):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, '_self_instance', instance)
object.__setattr__(self, '_self_wrapper', wrapper)
object.__setattr__(self, '_self_enabled', enabled)
object.__setattr__(self, '_self_binding', binding)
object.__setattr__(self, '_self_parent', parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(descriptor, instance,
self._self_wrapper, self._self_enabled,
self._self_binding, self)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == 'function':
descriptor = self._self_parent.__wrapped__.__get__(
instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor, instance, self._self_wrapper,
self._self_enabled, self._self_binding,
self._self_parent)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding == 'function':
if self._self_instance is None:
instance = getattr(self.__wrapped__, '__self__', None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance,
args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == 'function':
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError('missing 1 required positional argument')
instance, args = args[0], args[1:]
wrapped = functools.partial(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, '__self__', None)
return self._self_wrapper(self.__wrapped__, instance, args,
kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = 'classmethod'
elif isinstance(wrapped, staticmethod):
binding = 'staticmethod'
elif hasattr(wrapped, '__self__'):
if inspect.isclass(wrapped.__self__):
binding = 'classmethod'
else:
binding = 'function'
else:
binding = 'function'
super(FunctionWrapper, self).__init__(wrapped, None, wrapper,
enabled, binding)
try:
from ._wrappers import (ObjectProxy, CallableObjectProxy, FunctionWrapper,
BoundFunctionWrapper, _FunctionWrapperBase)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split('.')
attribute = path[0]
original = getattr(parent, attribute)
for attribute in path[1:]:
parent = original
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
if inspect.isclass(original):
for cls in inspect.getmro(original):
if attribute in vars(original):
original = vars(original)[attribute]
break
else:
original = getattr(original, attribute)
else:
original = getattr(original, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit('.', 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ('_self_expired', '_self_instance')
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self,
callback=callback)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance,
_callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback))
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback))
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs)
| 32,326 | Python | 34.918889 | 78 | 0.602271 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt/arguments.py | # This is a copy of the inspect.getcallargs() function from Python 2.7
# so we can provide it for use under Python 2.6. As the code in this
# file derives from the Python distribution, it falls under the version
# of the PSF license used for Python 2.7.
from inspect import getargspec, ismethod
import sys
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
| 4,059 | Python | 40.85567 | 80 | 0.566396 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt/__init__.py | __version_info__ = ('1', '10', '10')
__version__ = '.'.join(__version_info__)
from .wrappers import (ObjectProxy, CallableObjectProxy, FunctionWrapper,
BoundFunctionWrapper, WeakFunctionProxy, resolve_path, apply_patch,
wrap_object, wrap_object_attribute, function_wrapper,
wrap_function_wrapper, patch_function_wrapper,
transient_function_wrapper)
from .decorators import (adapter_factory, AdapterFactory, decorator,
synchronized)
from .importer import (register_post_import_hook, when_imported,
notify_module_loaded, discover_post_import_hooks)
try:
from inspect import getcallargs
except ImportError:
from .arguments import getcallargs
| 699 | Python | 33.999998 | 75 | 0.723891 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt/decorators.py | """This module implements decorators for implementing other decorators
as well as some commonly used decorators.
"""
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
import builtins
exec_ = getattr(builtins, "exec")
del builtins
else:
string_types = basestring,
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
from functools import partial
from inspect import ismethod, isclass, formatargspec
from collections import namedtuple
from threading import Lock, RLock
try:
from inspect import signature
except ImportError:
pass
from .wrappers import (FunctionWrapper, BoundFunctionWrapper, ObjectProxy,
CallableObjectProxy)
# Adapter wrapper for the wrapped function which will overlay certain
# properties from the adapter function onto the wrapped function so that
# functions such as inspect.getargspec(), inspect.getfullargspec(),
# inspect.signature() and inspect.getsource() return the correct results
# one would expect.
class _AdapterFunctionCode(CallableObjectProxy):
def __init__(self, wrapped_code, adapter_code):
super(_AdapterFunctionCode, self).__init__(wrapped_code)
self._self_adapter_code = adapter_code
@property
def co_argcount(self):
return self._self_adapter_code.co_argcount
@property
def co_code(self):
return self._self_adapter_code.co_code
@property
def co_flags(self):
return self._self_adapter_code.co_flags
@property
def co_kwonlyargcount(self):
return self._self_adapter_code.co_kwonlyargcount
@property
def co_varnames(self):
return self._self_adapter_code.co_varnames
class _AdapterFunctionSurrogate(CallableObjectProxy):
def __init__(self, wrapped, adapter):
super(_AdapterFunctionSurrogate, self).__init__(wrapped)
self._self_adapter = adapter
@property
def __code__(self):
return _AdapterFunctionCode(self.__wrapped__.__code__,
self._self_adapter.__code__)
@property
def __defaults__(self):
return self._self_adapter.__defaults__
@property
def __kwdefaults__(self):
return self._self_adapter.__kwdefaults__
@property
def __signature__(self):
if 'signature' not in globals():
return self._self_adapter.__signature__
else:
# Can't allow this to fail on Python 3 else it falls
# through to using __wrapped__, but that will be the
# wrong function we want to derive the signature
# from. Thus generate the signature ourselves.
return signature(self._self_adapter)
if PY2:
func_code = __code__
func_defaults = __defaults__
class _BoundAdapterWrapper(BoundFunctionWrapper):
@property
def __func__(self):
return _AdapterFunctionSurrogate(self.__wrapped__.__func__,
self._self_parent._self_adapter)
if PY2:
im_func = __func__
class AdapterWrapper(FunctionWrapper):
__bound_function_wrapper__ = _BoundAdapterWrapper
def __init__(self, *args, **kwargs):
adapter = kwargs.pop('adapter')
super(AdapterWrapper, self).__init__(*args, **kwargs)
self._self_surrogate = _AdapterFunctionSurrogate(
self.__wrapped__, adapter)
self._self_adapter = adapter
@property
def __code__(self):
return self._self_surrogate.__code__
@property
def __defaults__(self):
return self._self_surrogate.__defaults__
@property
def __kwdefaults__(self):
return self._self_surrogate.__kwdefaults__
if PY2:
func_code = __code__
func_defaults = __defaults__
@property
def __signature__(self):
return self._self_surrogate.__signature__
class AdapterFactory(object):
def __call__(self, wrapped):
raise NotImplementedError()
class DelegatedAdapterFactory(AdapterFactory):
def __init__(self, factory):
super(DelegatedAdapterFactory, self).__init__()
self.factory = factory
def __call__(self, wrapped):
return self.factory(wrapped)
adapter_factory = DelegatedAdapterFactory
# Decorator for creating other decorators. This decorator and the
# wrappers which they use are designed to properly preserve any name
# attributes, function signatures etc, in addition to the wrappers
# themselves acting like a transparent proxy for the original wrapped
# function so the wrapper is effectively indistinguishable from the
# original wrapped function.
def decorator(wrapper=None, enabled=None, adapter=None):
# The decorator should be supplied with a single positional argument
# which is the wrapper function to be used to implement the
# decorator. This may be preceded by a step whereby the keyword
# arguments are supplied to customise the behaviour of the
# decorator. The 'adapter' argument is used to optionally denote a
# separate function which is notionally used by an adapter
# decorator. In that case parts of the function '__code__' and
# '__defaults__' attributes are used from the adapter function
# rather than those of the wrapped function. This allows for the
# argument specification from inspect.getargspec() and similar
# functions to be overridden with a prototype for a different
# function than what was wrapped. The 'enabled' argument provides a
# way to enable/disable the use of the decorator. If the type of
# 'enabled' is a boolean, then it is evaluated immediately and the
# wrapper not even applied if it is False. If not a boolean, it will
# be evaluated when the wrapper is called for an unbound wrapper,
# and when binding occurs for a bound wrapper. When being evaluated,
# if 'enabled' is callable it will be called to obtain the value to
# be checked. If False, the wrapper will not be called and instead
# the original wrapped function will be called directly instead.
if wrapper is not None:
# Helper function for creating wrapper of the appropriate
# time when we need it down below.
def _build(wrapped, wrapper, enabled=None, adapter=None):
if adapter:
if isinstance(adapter, AdapterFactory):
adapter = adapter(wrapped)
if not callable(adapter):
ns = {}
if not isinstance(adapter, string_types):
adapter = formatargspec(*adapter)
exec_('def adapter{0}: pass'.format(adapter), ns, ns)
adapter = ns['adapter']
return AdapterWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled, adapter=adapter)
return FunctionWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled)
# The wrapper has been provided so return the final decorator.
# The decorator is itself one of our function wrappers so we
# can determine when it is applied to functions, instance methods
# or class methods. This allows us to bind the instance or class
# method so the appropriate self or cls attribute is supplied
# when it is finally called.
def _wrapper(wrapped, instance, args, kwargs):
# We first check for the case where the decorator was applied
# to a class type.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass(arg=1)
# def function():
# pass
#
# In this case an instance of the class is to be used as the
# decorator wrapper function. If args was empty at this point,
# then it means that there were optional keyword arguments
# supplied to be used when creating an instance of the class
# to be used as the wrapper function.
if instance is None and isclass(wrapped) and not args:
# We still need to be passed the target function to be
# wrapped as yet, so we need to return a further function
# to be able to capture it.
def _capture(target_wrapped):
# Now have the target function to be wrapped and need
# to create an instance of the class which is to act
# as the decorator wrapper function. Before we do that,
# we need to first check that use of the decorator
# hadn't been disabled by a simple boolean. If it was,
# the target function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# Now create an instance of the class which is to act
# as the decorator wrapper function. Any arguments had
# to be supplied as keyword only arguments so that is
# all we pass when creating it.
target_wrapper = wrapped(**kwargs)
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper,
_enabled, adapter)
return _capture
# We should always have the target function to be wrapped at
# this point as the first (and only) value in args.
target_wrapped = args[0]
# Need to now check that use of the decorator hadn't been
# disabled by a simple boolean. If it was, then target
# function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# We now need to build the wrapper, but there are a couple of
# different cases we need to consider.
if instance is None:
if isclass(wrapped):
# In this case the decorator was applied to a class
# type but optional keyword arguments were not supplied
# for initialising an instance of the class to be used
# as the decorator wrapper function.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass
# def function():
# pass
#
# We still need to create an instance of the class to
# be used as the decorator wrapper function, but no
# arguments are pass.
target_wrapper = wrapped()
else:
# In this case the decorator was applied to a normal
# function, or possibly a static method of a class.
#
# @decorator
# def mydecoratorfuntion(wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorfunction
# def function():
# pass
#
# That normal function becomes the decorator wrapper
# function.
target_wrapper = wrapper
else:
if isclass(instance):
# In this case the decorator was applied to a class
# method.
#
# class myclass(object):
# @decorator
# @classmethod
# def decoratorclassmethod(cls, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(None, instance)
else:
# In this case the decorator was applied to an instance
# method.
#
# class myclass(object):
# @decorator
# def decoratorclassmethod(self, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(instance, type(instance))
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper, _enabled, adapter)
# We first return our magic function wrapper here so we can
# determine in what context the decorator factory was used. In
# other words, it is itself a universal decorator.
return _build(wrapper, _wrapper)
else:
# The wrapper still has not been provided, so we are just
# collecting the optional keyword arguments. Return the
# decorator again wrapped in a partial using the collected
# arguments.
return partial(decorator, enabled=enabled, adapter=adapter)
# Decorator for implementing thread synchronization. It can be used as a
# decorator, in which case the synchronization context is determined by
# what type of function is wrapped, or it can also be used as a context
# manager, where the user needs to supply the correct synchronization
# context. It is also possible to supply an object which appears to be a
# synchronization primitive of some sort, by virtue of having release()
# and acquire() methods. In that case that will be used directly as the
# synchronization primitive without creating a separate lock against the
# derived or supplied context.
def synchronized(wrapped):
# Determine if being passed an object which is a synchronization
# primitive. We can't check by type for Lock, RLock, Semaphore etc,
# as the means of creating them isn't the type. Therefore use the
# existence of acquire() and release() methods. This is more
# extensible anyway as it allows custom synchronization mechanisms.
if hasattr(wrapped, 'acquire') and hasattr(wrapped, 'release'):
# We remember what the original lock is and then return a new
# decorator which accesses and locks it. When returning the new
# decorator we wrap it with an object proxy so we can override
# the context manager methods in case it is being used to wrap
# synchronized statements with a 'with' statement.
lock = wrapped
@decorator
def _synchronized(wrapped, instance, args, kwargs):
# Execute the wrapped function while the original supplied
# lock is held.
with lock:
return wrapped(*args, **kwargs)
class _PartialDecorator(CallableObjectProxy):
def __enter__(self):
lock.acquire()
return lock
def __exit__(self, *args):
lock.release()
return _PartialDecorator(wrapped=_synchronized)
# Following only apply when the lock is being created automatically
# based on the context of what was supplied. In this case we supply
# a final decorator, but need to use FunctionWrapper directly as we
# want to derive from it to add context manager methods in case it is
# being used to wrap synchronized statements with a 'with' statement.
def _synchronized_lock(context):
# Attempt to retrieve the lock for the specific context.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
# There is no existing lock defined for the context we
# are dealing with so we need to create one. This needs
# to be done in a way to guarantee there is only one
# created, even if multiple threads try and create it at
# the same time. We can't always use the setdefault()
# method on the __dict__ for the context. This is the
# case where the context is a class, as __dict__ is
# actually a dictproxy. What we therefore do is use a
# meta lock on this wrapper itself, to control the
# creation and assignment of the lock attribute against
# the context.
meta_lock = vars(synchronized).setdefault(
'_synchronized_meta_lock', Lock())
with meta_lock:
# We need to check again for whether the lock we want
# exists in case two threads were trying to create it
# at the same time and were competing to create the
# meta lock.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
lock = RLock()
setattr(context, '_synchronized_lock', lock)
return lock
def _synchronized_wrapper(wrapped, instance, args, kwargs):
# Execute the wrapped function while the lock for the
# desired context is held. If instance is None then the
# wrapped function is used as the context.
with _synchronized_lock(instance or wrapped):
return wrapped(*args, **kwargs)
class _FinalDecorator(FunctionWrapper):
def __enter__(self):
self._self_lock = _synchronized_lock(self.__wrapped__)
self._self_lock.acquire()
return self._self_lock
def __exit__(self, *args):
self._self_lock.release()
return _FinalDecorator(wrapped=wrapped, wrapper=_synchronized_wrapper)
| 20,127 | Python | 38.235867 | 78 | 0.575347 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/exceptions.py | from jmespath.compat import with_str_method
class JMESPathError(ValueError):
pass
@with_str_method
class ParseError(JMESPathError):
_ERROR_MESSAGE = 'Invalid jmespath expression'
def __init__(self, lex_position, token_value, token_type,
msg=_ERROR_MESSAGE):
super(ParseError, self).__init__(lex_position, token_value, token_type)
self.lex_position = lex_position
self.token_value = token_value
self.token_type = token_type.upper()
self.msg = msg
# Whatever catches the ParseError can fill in the full expression
self.expression = None
def __str__(self):
# self.lex_position +1 to account for the starting double quote char.
underline = ' ' * (self.lex_position + 1) + '^'
return (
'%s: Parse error at column %s, '
'token "%s" (%s), for expression:\n"%s"\n%s' % (
self.msg, self.lex_position, self.token_value, self.token_type,
self.expression, underline))
@with_str_method
class IncompleteExpressionError(ParseError):
def set_expression(self, expression):
self.expression = expression
self.lex_position = len(expression)
self.token_type = None
self.token_value = None
def __str__(self):
# self.lex_position +1 to account for the starting double quote char.
underline = ' ' * (self.lex_position + 1) + '^'
return (
'Invalid jmespath expression: Incomplete expression:\n'
'"%s"\n%s' % (self.expression, underline))
@with_str_method
class LexerError(ParseError):
def __init__(self, lexer_position, lexer_value, message, expression=None):
self.lexer_position = lexer_position
self.lexer_value = lexer_value
self.message = message
super(LexerError, self).__init__(lexer_position,
lexer_value,
message)
# Whatever catches LexerError can set this.
self.expression = expression
def __str__(self):
underline = ' ' * self.lexer_position + '^'
return 'Bad jmespath expression: %s:\n%s\n%s' % (
self.message, self.expression, underline)
@with_str_method
class ArityError(ParseError):
def __init__(self, expected, actual, name):
self.expected_arity = expected
self.actual_arity = actual
self.function_name = name
self.expression = None
def __str__(self):
return ("Expected %s %s for function %s(), "
"received %s" % (
self.expected_arity,
self._pluralize('argument', self.expected_arity),
self.function_name,
self.actual_arity))
def _pluralize(self, word, count):
if count == 1:
return word
else:
return word + 's'
@with_str_method
class VariadictArityError(ArityError):
def __str__(self):
return ("Expected at least %s %s for function %s(), "
"received %s" % (
self.expected_arity,
self._pluralize('argument', self.expected_arity),
self.function_name,
self.actual_arity))
@with_str_method
class JMESPathTypeError(JMESPathError):
def __init__(self, function_name, current_value, actual_type,
expected_types):
self.function_name = function_name
self.current_value = current_value
self.actual_type = actual_type
self.expected_types = expected_types
def __str__(self):
return ('In function %s(), invalid type for value: %s, '
'expected one of: %s, received: "%s"' % (
self.function_name, self.current_value,
self.expected_types, self.actual_type))
class EmptyExpressionError(JMESPathError):
def __init__(self):
super(EmptyExpressionError, self).__init__(
"Invalid JMESPath expression: cannot be empty.")
class UnknownFunctionError(JMESPathError):
pass
| 4,128 | Python | 32.569105 | 79 | 0.577035 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/parser.py | """Top down operator precedence parser.
This is an implementation of Vaughan R. Pratt's
"Top Down Operator Precedence" parser.
(http://dl.acm.org/citation.cfm?doid=512927.512931).
These are some additional resources that help explain the
general idea behind a Pratt parser:
* http://effbot.org/zone/simple-top-down-parsing.htm
* http://javascript.crockford.com/tdop/tdop.html
A few notes on the implementation.
* All the nud/led tokens are on the Parser class itself, and are dispatched
using getattr(). This keeps all the parsing logic contained to a single
class.
* We use two passes through the data. One to create a list of token,
then one pass through the tokens to create the AST. While the lexer actually
yields tokens, we convert it to a list so we can easily implement two tokens
of lookahead. A previous implementation used a fixed circular buffer, but it
was significantly slower. Also, the average jmespath expression typically
does not have a large amount of token so this is not an issue. And
interestingly enough, creating a token list first is actually faster than
consuming from the token iterator one token at a time.
"""
import random
from jmespath import lexer
from jmespath.compat import with_repr_method
from jmespath import ast
from jmespath import exceptions
from jmespath import visitor
class Parser(object):
BINDING_POWER = {
'eof': 0,
'unquoted_identifier': 0,
'quoted_identifier': 0,
'literal': 0,
'rbracket': 0,
'rparen': 0,
'comma': 0,
'rbrace': 0,
'number': 0,
'current': 0,
'expref': 0,
'colon': 0,
'pipe': 1,
'or': 2,
'and': 3,
'eq': 5,
'gt': 5,
'lt': 5,
'gte': 5,
'lte': 5,
'ne': 5,
'flatten': 9,
# Everything above stops a projection.
'star': 20,
'filter': 21,
'dot': 40,
'not': 45,
'lbrace': 50,
'lbracket': 55,
'lparen': 60,
}
# The maximum binding power for a token that can stop
# a projection.
_PROJECTION_STOP = 10
# The _MAX_SIZE most recent expressions are cached in
# _CACHE dict.
_CACHE = {}
_MAX_SIZE = 128
def __init__(self, lookahead=2):
self.tokenizer = None
self._tokens = [None] * lookahead
self._buffer_size = lookahead
self._index = 0
def parse(self, expression):
cached = self._CACHE.get(expression)
if cached is not None:
return cached
parsed_result = self._do_parse(expression)
self._CACHE[expression] = parsed_result
if len(self._CACHE) > self._MAX_SIZE:
self._free_cache_entries()
return parsed_result
def _do_parse(self, expression):
try:
return self._parse(expression)
except exceptions.LexerError as e:
e.expression = expression
raise
except exceptions.IncompleteExpressionError as e:
e.set_expression(expression)
raise
except exceptions.ParseError as e:
e.expression = expression
raise
def _parse(self, expression):
self.tokenizer = lexer.Lexer().tokenize(expression)
self._tokens = list(self.tokenizer)
self._index = 0
parsed = self._expression(binding_power=0)
if not self._current_token() == 'eof':
t = self._lookahead_token(0)
raise exceptions.ParseError(t['start'], t['value'], t['type'],
"Unexpected token: %s" % t['value'])
return ParsedResult(expression, parsed)
def _expression(self, binding_power=0):
left_token = self._lookahead_token(0)
self._advance()
nud_function = getattr(
self, '_token_nud_%s' % left_token['type'],
self._error_nud_token)
left = nud_function(left_token)
current_token = self._current_token()
while binding_power < self.BINDING_POWER[current_token]:
led = getattr(self, '_token_led_%s' % current_token, None)
if led is None:
error_token = self._lookahead_token(0)
self._error_led_token(error_token)
else:
self._advance()
left = led(left)
current_token = self._current_token()
return left
def _token_nud_literal(self, token):
return ast.literal(token['value'])
def _token_nud_unquoted_identifier(self, token):
return ast.field(token['value'])
def _token_nud_quoted_identifier(self, token):
field = ast.field(token['value'])
# You can't have a quoted identifier as a function
# name.
if self._current_token() == 'lparen':
t = self._lookahead_token(0)
raise exceptions.ParseError(
0, t['value'], t['type'],
'Quoted identifier not allowed for function names.')
return field
def _token_nud_star(self, token):
left = ast.identity()
if self._current_token() == 'rbracket':
right = ast.identity()
else:
right = self._parse_projection_rhs(self.BINDING_POWER['star'])
return ast.value_projection(left, right)
def _token_nud_filter(self, token):
return self._token_led_filter(ast.identity())
def _token_nud_lbrace(self, token):
return self._parse_multi_select_hash()
def _token_nud_lparen(self, token):
expression = self._expression()
self._match('rparen')
return expression
def _token_nud_flatten(self, token):
left = ast.flatten(ast.identity())
right = self._parse_projection_rhs(
self.BINDING_POWER['flatten'])
return ast.projection(left, right)
def _token_nud_not(self, token):
expr = self._expression(self.BINDING_POWER['not'])
return ast.not_expression(expr)
def _token_nud_lbracket(self, token):
if self._current_token() in ['number', 'colon']:
right = self._parse_index_expression()
# We could optimize this and remove the identity() node.
# We don't really need an index_expression node, we can
# just use emit an index node here if we're not dealing
# with a slice.
return self._project_if_slice(ast.identity(), right)
elif self._current_token() == 'star' and \
self._lookahead(1) == 'rbracket':
self._advance()
self._advance()
right = self._parse_projection_rhs(self.BINDING_POWER['star'])
return ast.projection(ast.identity(), right)
else:
return self._parse_multi_select_list()
def _parse_index_expression(self):
# We're here:
# [<current>
# ^
# | current token
if (self._lookahead(0) == 'colon' or
self._lookahead(1) == 'colon'):
return self._parse_slice_expression()
else:
# Parse the syntax [number]
node = ast.index(self._lookahead_token(0)['value'])
self._advance()
self._match('rbracket')
return node
def _parse_slice_expression(self):
# [start:end:step]
# Where start, end, and step are optional.
# The last colon is optional as well.
parts = [None, None, None]
index = 0
current_token = self._current_token()
while not current_token == 'rbracket' and index < 3:
if current_token == 'colon':
index += 1
if index == 3:
self._raise_parse_error_for_token(
self._lookahead_token(0), 'syntax error')
self._advance()
elif current_token == 'number':
parts[index] = self._lookahead_token(0)['value']
self._advance()
else:
self._raise_parse_error_for_token(
self._lookahead_token(0), 'syntax error')
current_token = self._current_token()
self._match('rbracket')
return ast.slice(*parts)
def _token_nud_current(self, token):
return ast.current_node()
def _token_nud_expref(self, token):
expression = self._expression(self.BINDING_POWER['expref'])
return ast.expref(expression)
def _token_led_dot(self, left):
if not self._current_token() == 'star':
right = self._parse_dot_rhs(self.BINDING_POWER['dot'])
if left['type'] == 'subexpression':
left['children'].append(right)
return left
else:
return ast.subexpression([left, right])
else:
# We're creating a projection.
self._advance()
right = self._parse_projection_rhs(
self.BINDING_POWER['dot'])
return ast.value_projection(left, right)
def _token_led_pipe(self, left):
right = self._expression(self.BINDING_POWER['pipe'])
return ast.pipe(left, right)
def _token_led_or(self, left):
right = self._expression(self.BINDING_POWER['or'])
return ast.or_expression(left, right)
def _token_led_and(self, left):
right = self._expression(self.BINDING_POWER['and'])
return ast.and_expression(left, right)
def _token_led_lparen(self, left):
if left['type'] != 'field':
# 0 - first func arg or closing paren.
# -1 - '(' token
# -2 - invalid function "name".
prev_t = self._lookahead_token(-2)
raise exceptions.ParseError(
prev_t['start'], prev_t['value'], prev_t['type'],
"Invalid function name '%s'" % prev_t['value'])
name = left['value']
args = []
while not self._current_token() == 'rparen':
expression = self._expression()
if self._current_token() == 'comma':
self._match('comma')
args.append(expression)
self._match('rparen')
function_node = ast.function_expression(name, args)
return function_node
def _token_led_filter(self, left):
# Filters are projections.
condition = self._expression(0)
self._match('rbracket')
if self._current_token() == 'flatten':
right = ast.identity()
else:
right = self._parse_projection_rhs(self.BINDING_POWER['filter'])
return ast.filter_projection(left, right, condition)
def _token_led_eq(self, left):
return self._parse_comparator(left, 'eq')
def _token_led_ne(self, left):
return self._parse_comparator(left, 'ne')
def _token_led_gt(self, left):
return self._parse_comparator(left, 'gt')
def _token_led_gte(self, left):
return self._parse_comparator(left, 'gte')
def _token_led_lt(self, left):
return self._parse_comparator(left, 'lt')
def _token_led_lte(self, left):
return self._parse_comparator(left, 'lte')
def _token_led_flatten(self, left):
left = ast.flatten(left)
right = self._parse_projection_rhs(
self.BINDING_POWER['flatten'])
return ast.projection(left, right)
def _token_led_lbracket(self, left):
token = self._lookahead_token(0)
if token['type'] in ['number', 'colon']:
right = self._parse_index_expression()
if left['type'] == 'index_expression':
# Optimization: if the left node is an index expr,
# we can avoid creating another node and instead just add
# the right node as a child of the left.
left['children'].append(right)
return left
else:
return self._project_if_slice(left, right)
else:
# We have a projection
self._match('star')
self._match('rbracket')
right = self._parse_projection_rhs(self.BINDING_POWER['star'])
return ast.projection(left, right)
def _project_if_slice(self, left, right):
index_expr = ast.index_expression([left, right])
if right['type'] == 'slice':
return ast.projection(
index_expr,
self._parse_projection_rhs(self.BINDING_POWER['star']))
else:
return index_expr
def _parse_comparator(self, left, comparator):
right = self._expression(self.BINDING_POWER[comparator])
return ast.comparator(comparator, left, right)
def _parse_multi_select_list(self):
expressions = []
while True:
expression = self._expression()
expressions.append(expression)
if self._current_token() == 'rbracket':
break
else:
self._match('comma')
self._match('rbracket')
return ast.multi_select_list(expressions)
def _parse_multi_select_hash(self):
pairs = []
while True:
key_token = self._lookahead_token(0)
# Before getting the token value, verify it's
# an identifier.
self._match_multiple_tokens(
token_types=['quoted_identifier', 'unquoted_identifier'])
key_name = key_token['value']
self._match('colon')
value = self._expression(0)
node = ast.key_val_pair(key_name=key_name, node=value)
pairs.append(node)
if self._current_token() == 'comma':
self._match('comma')
elif self._current_token() == 'rbrace':
self._match('rbrace')
break
return ast.multi_select_dict(nodes=pairs)
def _parse_projection_rhs(self, binding_power):
# Parse the right hand side of the projection.
if self.BINDING_POWER[self._current_token()] < self._PROJECTION_STOP:
# BP of 10 are all the tokens that stop a projection.
right = ast.identity()
elif self._current_token() == 'lbracket':
right = self._expression(binding_power)
elif self._current_token() == 'filter':
right = self._expression(binding_power)
elif self._current_token() == 'dot':
self._match('dot')
right = self._parse_dot_rhs(binding_power)
else:
self._raise_parse_error_for_token(self._lookahead_token(0),
'syntax error')
return right
def _parse_dot_rhs(self, binding_power):
# From the grammar:
# expression '.' ( identifier /
# multi-select-list /
# multi-select-hash /
# function-expression /
# *
# In terms of tokens that means that after a '.',
# you can have:
lookahead = self._current_token()
# Common case "foo.bar", so first check for an identifier.
if lookahead in ['quoted_identifier', 'unquoted_identifier', 'star']:
return self._expression(binding_power)
elif lookahead == 'lbracket':
self._match('lbracket')
return self._parse_multi_select_list()
elif lookahead == 'lbrace':
self._match('lbrace')
return self._parse_multi_select_hash()
else:
t = self._lookahead_token(0)
allowed = ['quoted_identifier', 'unquoted_identifier',
'lbracket', 'lbrace']
msg = (
"Expecting: %s, got: %s" % (allowed, t['type'])
)
self._raise_parse_error_for_token(t, msg)
def _error_nud_token(self, token):
if token['type'] == 'eof':
raise exceptions.IncompleteExpressionError(
token['start'], token['value'], token['type'])
self._raise_parse_error_for_token(token, 'invalid token')
def _error_led_token(self, token):
self._raise_parse_error_for_token(token, 'invalid token')
def _match(self, token_type=None):
# inline'd self._current_token()
if self._current_token() == token_type:
# inline'd self._advance()
self._advance()
else:
self._raise_parse_error_maybe_eof(
token_type, self._lookahead_token(0))
def _match_multiple_tokens(self, token_types):
if self._current_token() not in token_types:
self._raise_parse_error_maybe_eof(
token_types, self._lookahead_token(0))
self._advance()
def _advance(self):
self._index += 1
def _current_token(self):
return self._tokens[self._index]['type']
def _lookahead(self, number):
return self._tokens[self._index + number]['type']
def _lookahead_token(self, number):
return self._tokens[self._index + number]
def _raise_parse_error_for_token(self, token, reason):
lex_position = token['start']
actual_value = token['value']
actual_type = token['type']
raise exceptions.ParseError(lex_position, actual_value,
actual_type, reason)
def _raise_parse_error_maybe_eof(self, expected_type, token):
lex_position = token['start']
actual_value = token['value']
actual_type = token['type']
if actual_type == 'eof':
raise exceptions.IncompleteExpressionError(
lex_position, actual_value, actual_type)
message = 'Expecting: %s, got: %s' % (expected_type,
actual_type)
raise exceptions.ParseError(
lex_position, actual_value, actual_type, message)
def _free_cache_entries(self):
for key in random.sample(self._CACHE.keys(), int(self._MAX_SIZE / 2)):
self._CACHE.pop(key, None)
@classmethod
def purge(cls):
"""Clear the expression compilation cache."""
cls._CACHE.clear()
@with_repr_method
class ParsedResult(object):
def __init__(self, expression, parsed):
self.expression = expression
self.parsed = parsed
def search(self, value, options=None):
interpreter = visitor.TreeInterpreter(options)
result = interpreter.visit(self.parsed, value)
return result
def _render_dot_file(self):
"""Render the parsed AST as a dot file.
Note that this is marked as an internal method because
the AST is an implementation detail and is subject
to change. This method can be used to help troubleshoot
or for development purposes, but is not considered part
of the public supported API. Use at your own risk.
"""
renderer = visitor.GraphvizVisitor()
contents = renderer.visit(self.parsed)
return contents
def __repr__(self):
return repr(self.parsed)
| 19,082 | Python | 35.142045 | 79 | 0.564459 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/ast.py | # AST nodes have this structure:
# {"type": <node type>", children: [], "value": ""}
def comparator(name, first, second):
return {'type': 'comparator', 'children': [first, second], 'value': name}
def current_node():
return {'type': 'current', 'children': []}
def expref(expression):
return {'type': 'expref', 'children': [expression]}
def function_expression(name, args):
return {'type': 'function_expression', 'children': args, 'value': name}
def field(name):
return {"type": "field", "children": [], "value": name}
def filter_projection(left, right, comparator):
return {'type': 'filter_projection', 'children': [left, right, comparator]}
def flatten(node):
return {'type': 'flatten', 'children': [node]}
def identity():
return {"type": "identity", 'children': []}
def index(index):
return {"type": "index", "value": index, "children": []}
def index_expression(children):
return {"type": "index_expression", 'children': children}
def key_val_pair(key_name, node):
return {"type": "key_val_pair", 'children': [node], "value": key_name}
def literal(literal_value):
return {'type': 'literal', 'value': literal_value, 'children': []}
def multi_select_dict(nodes):
return {"type": "multi_select_dict", "children": nodes}
def multi_select_list(nodes):
return {"type": "multi_select_list", "children": nodes}
def or_expression(left, right):
return {"type": "or_expression", "children": [left, right]}
def and_expression(left, right):
return {"type": "and_expression", "children": [left, right]}
def not_expression(expr):
return {"type": "not_expression", "children": [expr]}
def pipe(left, right):
return {'type': 'pipe', 'children': [left, right]}
def projection(left, right):
return {'type': 'projection', 'children': [left, right]}
def subexpression(children):
return {"type": "subexpression", 'children': children}
def slice(start, end, step):
return {"type": "slice", "children": [start, end, step]}
def value_projection(left, right):
return {'type': 'value_projection', 'children': [left, right]}
| 2,130 | Python | 22.417582 | 79 | 0.635681 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/lexer.py | import string
import warnings
from json import loads
from jmespath.exceptions import LexerError, EmptyExpressionError
class Lexer(object):
START_IDENTIFIER = set(string.ascii_letters + '_')
VALID_IDENTIFIER = set(string.ascii_letters + string.digits + '_')
VALID_NUMBER = set(string.digits)
WHITESPACE = set(" \t\n\r")
SIMPLE_TOKENS = {
'.': 'dot',
'*': 'star',
']': 'rbracket',
',': 'comma',
':': 'colon',
'@': 'current',
'(': 'lparen',
')': 'rparen',
'{': 'lbrace',
'}': 'rbrace',
}
def tokenize(self, expression):
self._initialize_for_expression(expression)
while self._current is not None:
if self._current in self.SIMPLE_TOKENS:
yield {'type': self.SIMPLE_TOKENS[self._current],
'value': self._current,
'start': self._position, 'end': self._position + 1}
self._next()
elif self._current in self.START_IDENTIFIER:
start = self._position
buff = self._current
while self._next() in self.VALID_IDENTIFIER:
buff += self._current
yield {'type': 'unquoted_identifier', 'value': buff,
'start': start, 'end': start + len(buff)}
elif self._current in self.WHITESPACE:
self._next()
elif self._current == '[':
start = self._position
next_char = self._next()
if next_char == ']':
self._next()
yield {'type': 'flatten', 'value': '[]',
'start': start, 'end': start + 2}
elif next_char == '?':
self._next()
yield {'type': 'filter', 'value': '[?',
'start': start, 'end': start + 2}
else:
yield {'type': 'lbracket', 'value': '[',
'start': start, 'end': start + 1}
elif self._current == "'":
yield self._consume_raw_string_literal()
elif self._current == '|':
yield self._match_or_else('|', 'or', 'pipe')
elif self._current == '&':
yield self._match_or_else('&', 'and', 'expref')
elif self._current == '`':
yield self._consume_literal()
elif self._current in self.VALID_NUMBER:
start = self._position
buff = self._consume_number()
yield {'type': 'number', 'value': int(buff),
'start': start, 'end': start + len(buff)}
elif self._current == '-':
# Negative number.
start = self._position
buff = self._consume_number()
if len(buff) > 1:
yield {'type': 'number', 'value': int(buff),
'start': start, 'end': start + len(buff)}
else:
raise LexerError(lexer_position=start,
lexer_value=buff,
message="Unknown token '%s'" % buff)
elif self._current == '"':
yield self._consume_quoted_identifier()
elif self._current == '<':
yield self._match_or_else('=', 'lte', 'lt')
elif self._current == '>':
yield self._match_or_else('=', 'gte', 'gt')
elif self._current == '!':
yield self._match_or_else('=', 'ne', 'not')
elif self._current == '=':
if self._next() == '=':
yield {'type': 'eq', 'value': '==',
'start': self._position - 1, 'end': self._position}
self._next()
else:
if self._current is None:
# If we're at the EOF, we never advanced
# the position so we don't need to rewind
# it back one location.
position = self._position
else:
position = self._position - 1
raise LexerError(
lexer_position=position,
lexer_value='=',
message="Unknown token '='")
else:
raise LexerError(lexer_position=self._position,
lexer_value=self._current,
message="Unknown token %s" % self._current)
yield {'type': 'eof', 'value': '',
'start': self._length, 'end': self._length}
def _consume_number(self):
start = self._position
buff = self._current
while self._next() in self.VALID_NUMBER:
buff += self._current
return buff
def _initialize_for_expression(self, expression):
if not expression:
raise EmptyExpressionError()
self._position = 0
self._expression = expression
self._chars = list(self._expression)
self._current = self._chars[self._position]
self._length = len(self._expression)
def _next(self):
if self._position == self._length - 1:
self._current = None
else:
self._position += 1
self._current = self._chars[self._position]
return self._current
def _consume_until(self, delimiter):
# Consume until the delimiter is reached,
# allowing for the delimiter to be escaped with "\".
start = self._position
buff = ''
self._next()
while self._current != delimiter:
if self._current == '\\':
buff += '\\'
self._next()
if self._current is None:
# We're at the EOF.
raise LexerError(lexer_position=start,
lexer_value=self._expression[start:],
message="Unclosed %s delimiter" % delimiter)
buff += self._current
self._next()
# Skip the closing delimiter.
self._next()
return buff
def _consume_literal(self):
start = self._position
lexeme = self._consume_until('`').replace('\\`', '`')
try:
# Assume it is valid JSON and attempt to parse.
parsed_json = loads(lexeme)
except ValueError:
try:
# Invalid JSON values should be converted to quoted
# JSON strings during the JEP-12 deprecation period.
parsed_json = loads('"%s"' % lexeme.lstrip())
warnings.warn("deprecated string literal syntax",
PendingDeprecationWarning)
except ValueError:
raise LexerError(lexer_position=start,
lexer_value=self._expression[start:],
message="Bad token %s" % lexeme)
token_len = self._position - start
return {'type': 'literal', 'value': parsed_json,
'start': start, 'end': token_len}
def _consume_quoted_identifier(self):
start = self._position
lexeme = '"' + self._consume_until('"') + '"'
try:
token_len = self._position - start
return {'type': 'quoted_identifier', 'value': loads(lexeme),
'start': start, 'end': token_len}
except ValueError as e:
error_message = str(e).split(':')[0]
raise LexerError(lexer_position=start,
lexer_value=lexeme,
message=error_message)
def _consume_raw_string_literal(self):
start = self._position
lexeme = self._consume_until("'").replace("\\'", "'")
token_len = self._position - start
return {'type': 'literal', 'value': lexeme,
'start': start, 'end': token_len}
def _match_or_else(self, expected, match_type, else_type):
start = self._position
current = self._current
next_char = self._next()
if next_char == expected:
self._next()
return {'type': match_type, 'value': current + next_char,
'start': start, 'end': start + 1}
return {'type': else_type, 'value': current,
'start': start, 'end': start}
| 8,574 | Python | 40.028708 | 77 | 0.46536 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/__init__.py | import warnings
import sys
from jmespath import parser
from jmespath.visitor import Options
__version__ = '0.10.0'
if sys.version_info[:2] <= (2, 6) or ((3, 0) <= sys.version_info[:2] <= (3, 3)):
python_ver = '.'.join(str(x) for x in sys.version_info[:3])
warnings.warn(
'You are using Python {0}, which will no longer be supported in '
'version 0.11.0'.format(python_ver),
DeprecationWarning)
def compile(expression):
return parser.Parser().parse(expression)
def search(expression, data, options=None):
return parser.Parser().parse(expression).search(data, options=options)
| 623 | Python | 24.999999 | 80 | 0.667737 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/visitor.py | import operator
from jmespath import functions
from jmespath.compat import string_type
from numbers import Number
def _equals(x, y):
if _is_special_integer_case(x, y):
return False
else:
return x == y
def _is_special_integer_case(x, y):
# We need to special case comparing 0 or 1 to
# True/False. While normally comparing any
# integer other than 0/1 to True/False will always
# return False. However 0/1 have this:
# >>> 0 == True
# False
# >>> 0 == False
# True
# >>> 1 == True
# True
# >>> 1 == False
# False
#
# Also need to consider that:
# >>> 0 in [True, False]
# True
if type(x) is int and (x == 0 or x == 1):
return y is True or y is False
elif type(y) is int and (y == 0 or y == 1):
return x is True or x is False
def _is_comparable(x):
# The spec doesn't officially support string types yet,
# but enough people are relying on this behavior that
# it's been added back. This should eventually become
# part of the official spec.
return _is_actual_number(x) or isinstance(x, string_type)
def _is_actual_number(x):
# We need to handle python's quirkiness with booleans,
# specifically:
#
# >>> isinstance(False, int)
# True
# >>> isinstance(True, int)
# True
if x is True or x is False:
return False
return isinstance(x, Number)
class Options(object):
"""Options to control how a JMESPath function is evaluated."""
def __init__(self, dict_cls=None, custom_functions=None):
#: The class to use when creating a dict. The interpreter
# may create dictionaries during the evaluation of a JMESPath
# expression. For example, a multi-select hash will
# create a dictionary. By default we use a dict() type.
# You can set this value to change what dict type is used.
# The most common reason you would change this is if you
# want to set a collections.OrderedDict so that you can
# have predictable key ordering.
self.dict_cls = dict_cls
self.custom_functions = custom_functions
class _Expression(object):
def __init__(self, expression, interpreter):
self.expression = expression
self.interpreter = interpreter
def visit(self, node, *args, **kwargs):
return self.interpreter.visit(node, *args, **kwargs)
class Visitor(object):
def __init__(self):
self._method_cache = {}
def visit(self, node, *args, **kwargs):
node_type = node['type']
method = self._method_cache.get(node_type)
if method is None:
method = getattr(
self, 'visit_%s' % node['type'], self.default_visit)
self._method_cache[node_type] = method
return method(node, *args, **kwargs)
def default_visit(self, node, *args, **kwargs):
raise NotImplementedError("default_visit")
class TreeInterpreter(Visitor):
COMPARATOR_FUNC = {
'eq': _equals,
'ne': lambda x, y: not _equals(x, y),
'lt': operator.lt,
'gt': operator.gt,
'lte': operator.le,
'gte': operator.ge
}
_EQUALITY_OPS = ['eq', 'ne']
MAP_TYPE = dict
def __init__(self, options=None):
super(TreeInterpreter, self).__init__()
self._dict_cls = self.MAP_TYPE
if options is None:
options = Options()
self._options = options
if options.dict_cls is not None:
self._dict_cls = self._options.dict_cls
if options.custom_functions is not None:
self._functions = self._options.custom_functions
else:
self._functions = functions.Functions()
def default_visit(self, node, *args, **kwargs):
raise NotImplementedError(node['type'])
def visit_subexpression(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_field(self, node, value):
try:
return value.get(node['value'])
except AttributeError:
return None
def visit_comparator(self, node, value):
# Common case: comparator is == or !=
comparator_func = self.COMPARATOR_FUNC[node['value']]
if node['value'] in self._EQUALITY_OPS:
return comparator_func(
self.visit(node['children'][0], value),
self.visit(node['children'][1], value)
)
else:
# Ordering operators are only valid for numbers.
# Evaluating any other type with a comparison operator
# will yield a None value.
left = self.visit(node['children'][0], value)
right = self.visit(node['children'][1], value)
num_types = (int, float)
if not (_is_comparable(left) and
_is_comparable(right)):
return None
return comparator_func(left, right)
def visit_current(self, node, value):
return value
def visit_expref(self, node, value):
return _Expression(node['children'][0], self)
def visit_function_expression(self, node, value):
resolved_args = []
for child in node['children']:
current = self.visit(child, value)
resolved_args.append(current)
return self._functions.call_function(node['value'], resolved_args)
def visit_filter_projection(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
return None
comparator_node = node['children'][2]
collected = []
for element in base:
if self._is_true(self.visit(comparator_node, element)):
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def visit_flatten(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
# Can't flatten the object if it's not a list.
return None
merged_list = []
for element in base:
if isinstance(element, list):
merged_list.extend(element)
else:
merged_list.append(element)
return merged_list
def visit_identity(self, node, value):
return value
def visit_index(self, node, value):
# Even though we can index strings, we don't
# want to support that.
if not isinstance(value, list):
return None
try:
return value[node['value']]
except IndexError:
return None
def visit_index_expression(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_slice(self, node, value):
if not isinstance(value, list):
return None
s = slice(*node['children'])
return value[s]
def visit_key_val_pair(self, node, value):
return self.visit(node['children'][0], value)
def visit_literal(self, node, value):
return node['value']
def visit_multi_select_dict(self, node, value):
if value is None:
return None
collected = self._dict_cls()
for child in node['children']:
collected[child['value']] = self.visit(child, value)
return collected
def visit_multi_select_list(self, node, value):
if value is None:
return None
collected = []
for child in node['children']:
collected.append(self.visit(child, value))
return collected
def visit_or_expression(self, node, value):
matched = self.visit(node['children'][0], value)
if self._is_false(matched):
matched = self.visit(node['children'][1], value)
return matched
def visit_and_expression(self, node, value):
matched = self.visit(node['children'][0], value)
if self._is_false(matched):
return matched
return self.visit(node['children'][1], value)
def visit_not_expression(self, node, value):
original_result = self.visit(node['children'][0], value)
if type(original_result) is int and original_result == 0:
# Special case for 0, !0 should be false, not true.
# 0 is not a special cased integer in jmespath.
return False
return not original_result
def visit_pipe(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_projection(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
return None
collected = []
for element in base:
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def visit_value_projection(self, node, value):
base = self.visit(node['children'][0], value)
try:
base = base.values()
except AttributeError:
return None
collected = []
for element in base:
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def _is_false(self, value):
# This looks weird, but we're explicitly using equality checks
# because the truth/false values are different between
# python and jmespath.
return (value == '' or value == [] or value == {} or value is None or
value is False)
def _is_true(self, value):
return not self._is_false(value)
class GraphvizVisitor(Visitor):
def __init__(self):
super(GraphvizVisitor, self).__init__()
self._lines = []
self._count = 1
def visit(self, node, *args, **kwargs):
self._lines.append('digraph AST {')
current = '%s%s' % (node['type'], self._count)
self._count += 1
self._visit(node, current)
self._lines.append('}')
return '\n'.join(self._lines)
def _visit(self, node, current):
self._lines.append('%s [label="%s(%s)"]' % (
current, node['type'], node.get('value', '')))
for child in node.get('children', []):
child_name = '%s%s' % (child['type'], self._count)
self._count += 1
self._lines.append(' %s -> %s' % (current, child_name))
self._visit(child, child_name)
| 10,844 | Python | 31.963526 | 77 | 0.57488 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/compat.py | import sys
import inspect
PY2 = sys.version_info[0] == 2
def with_metaclass(meta, *bases):
# Taken from flask/six.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
if PY2:
text_type = unicode
string_type = basestring
from itertools import izip_longest as zip_longest
def with_str_method(cls):
"""Class decorator that handles __str__ compat between py2 and py3."""
# In python2, the __str__ should be __unicode__
# and __str__ should return bytes.
cls.__unicode__ = cls.__str__
def __str__(self):
return self.__unicode__().encode('utf-8')
cls.__str__ = __str__
return cls
def with_repr_method(cls):
"""Class decorator that handle __repr__ with py2 and py3."""
# This is almost the same thing as with_str_method *except*
# it uses the unicode_escape encoding. This also means we need to be
# careful encoding the input multiple times, so we only encode
# if we get a unicode type.
original_repr_method = cls.__repr__
def __repr__(self):
original_repr = original_repr_method(self)
if isinstance(original_repr, text_type):
original_repr = original_repr.encode('unicode_escape')
return original_repr
cls.__repr__ = __repr__
return cls
def get_methods(cls):
for name, method in inspect.getmembers(cls,
predicate=inspect.ismethod):
yield name, method
else:
text_type = str
string_type = str
from itertools import zip_longest
def with_str_method(cls):
# In python3, we don't need to do anything, we return a str type.
return cls
def with_repr_method(cls):
return cls
def get_methods(cls):
for name, method in inspect.getmembers(cls,
predicate=inspect.isfunction):
yield name, method
| 2,117 | Python | 31.090909 | 78 | 0.571091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jmespath/functions.py | import math
import json
from jmespath import exceptions
from jmespath.compat import string_type as STRING_TYPE
from jmespath.compat import get_methods, with_metaclass
# python types -> jmespath types
TYPES_MAP = {
'bool': 'boolean',
'list': 'array',
'dict': 'object',
'NoneType': 'null',
'unicode': 'string',
'str': 'string',
'float': 'number',
'int': 'number',
'long': 'number',
'OrderedDict': 'object',
'_Projection': 'array',
'_Expression': 'expref',
}
# jmespath types -> python types
REVERSE_TYPES_MAP = {
'boolean': ('bool',),
'array': ('list', '_Projection'),
'object': ('dict', 'OrderedDict',),
'null': ('NoneType',),
'string': ('unicode', 'str'),
'number': ('float', 'int', 'long'),
'expref': ('_Expression',),
}
def signature(*arguments):
def _record_signature(func):
func.signature = arguments
return func
return _record_signature
class FunctionRegistry(type):
def __init__(cls, name, bases, attrs):
cls._populate_function_table()
super(FunctionRegistry, cls).__init__(name, bases, attrs)
def _populate_function_table(cls):
function_table = {}
# Any method with a @signature decorator that also
# starts with "_func_" is registered as a function.
# _func_max_by -> max_by function.
for name, method in get_methods(cls):
if not name.startswith('_func_'):
continue
signature = getattr(method, 'signature', None)
if signature is not None:
function_table[name[6:]] = {
'function': method,
'signature': signature,
}
cls.FUNCTION_TABLE = function_table
class Functions(with_metaclass(FunctionRegistry, object)):
FUNCTION_TABLE = {
}
def call_function(self, function_name, resolved_args):
try:
spec = self.FUNCTION_TABLE[function_name]
except KeyError:
raise exceptions.UnknownFunctionError(
"Unknown function: %s()" % function_name)
function = spec['function']
signature = spec['signature']
self._validate_arguments(resolved_args, signature, function_name)
return function(self, *resolved_args)
def _validate_arguments(self, args, signature, function_name):
if signature and signature[-1].get('variadic'):
if len(args) < len(signature):
raise exceptions.VariadictArityError(
len(signature), len(args), function_name)
elif len(args) != len(signature):
raise exceptions.ArityError(
len(signature), len(args), function_name)
return self._type_check(args, signature, function_name)
def _type_check(self, actual, signature, function_name):
for i in range(len(signature)):
allowed_types = signature[i]['types']
if allowed_types:
self._type_check_single(actual[i], allowed_types,
function_name)
def _type_check_single(self, current, types, function_name):
# Type checking involves checking the top level type,
# and in the case of arrays, potentially checking the types
# of each element.
allowed_types, allowed_subtypes = self._get_allowed_pytypes(types)
# We're not using isinstance() on purpose.
# The type model for jmespath does not map
# 1-1 with python types (booleans are considered
# integers in python for example).
actual_typename = type(current).__name__
if actual_typename not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, current,
self._convert_to_jmespath_type(actual_typename), types)
# If we're dealing with a list type, we can have
# additional restrictions on the type of the list
# elements (for example a function can require a
# list of numbers or a list of strings).
# Arrays are the only types that can have subtypes.
if allowed_subtypes:
self._subtype_check(current, allowed_subtypes,
types, function_name)
def _get_allowed_pytypes(self, types):
allowed_types = []
allowed_subtypes = []
for t in types:
type_ = t.split('-', 1)
if len(type_) == 2:
type_, subtype = type_
allowed_subtypes.append(REVERSE_TYPES_MAP[subtype])
else:
type_ = type_[0]
allowed_types.extend(REVERSE_TYPES_MAP[type_])
return allowed_types, allowed_subtypes
def _subtype_check(self, current, allowed_subtypes, types, function_name):
if len(allowed_subtypes) == 1:
# The easy case, we know up front what type
# we need to validate.
allowed_subtypes = allowed_subtypes[0]
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed_subtypes:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
elif len(allowed_subtypes) > 1 and current:
# Dynamic type validation. Based on the first
# type we see, we validate that the remaining types
# match.
first = type(current[0]).__name__
for subtypes in allowed_subtypes:
if first in subtypes:
allowed = subtypes
break
else:
raise exceptions.JMESPathTypeError(
function_name, current[0], first, types)
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
@signature({'types': ['number']})
def _func_abs(self, arg):
return abs(arg)
@signature({'types': ['array-number']})
def _func_avg(self, arg):
if arg:
return sum(arg) / float(len(arg))
else:
return None
@signature({'types': [], 'variadic': True})
def _func_not_null(self, *arguments):
for argument in arguments:
if argument is not None:
return argument
@signature({'types': []})
def _func_to_array(self, arg):
if isinstance(arg, list):
return arg
else:
return [arg]
@signature({'types': []})
def _func_to_string(self, arg):
if isinstance(arg, STRING_TYPE):
return arg
else:
return json.dumps(arg, separators=(',', ':'),
default=str)
@signature({'types': []})
def _func_to_number(self, arg):
if isinstance(arg, (list, dict, bool)):
return None
elif arg is None:
return None
elif isinstance(arg, (int, float)):
return arg
else:
try:
return int(arg)
except ValueError:
try:
return float(arg)
except ValueError:
return None
@signature({'types': ['array', 'string']}, {'types': []})
def _func_contains(self, subject, search):
return search in subject
@signature({'types': ['string', 'array', 'object']})
def _func_length(self, arg):
return len(arg)
@signature({'types': ['string']}, {'types': ['string']})
def _func_ends_with(self, search, suffix):
return search.endswith(suffix)
@signature({'types': ['string']}, {'types': ['string']})
def _func_starts_with(self, search, suffix):
return search.startswith(suffix)
@signature({'types': ['array', 'string']})
def _func_reverse(self, arg):
if isinstance(arg, STRING_TYPE):
return arg[::-1]
else:
return list(reversed(arg))
@signature({"types": ['number']})
def _func_ceil(self, arg):
return math.ceil(arg)
@signature({"types": ['number']})
def _func_floor(self, arg):
return math.floor(arg)
@signature({"types": ['string']}, {"types": ['array-string']})
def _func_join(self, separator, array):
return separator.join(array)
@signature({'types': ['expref']}, {'types': ['array']})
def _func_map(self, expref, arg):
result = []
for element in arg:
result.append(expref.visit(expref.expression, element))
return result
@signature({"types": ['array-number', 'array-string']})
def _func_max(self, arg):
if arg:
return max(arg)
else:
return None
@signature({"types": ["object"], "variadic": True})
def _func_merge(self, *arguments):
merged = {}
for arg in arguments:
merged.update(arg)
return merged
@signature({"types": ['array-number', 'array-string']})
def _func_min(self, arg):
if arg:
return min(arg)
else:
return None
@signature({"types": ['array-string', 'array-number']})
def _func_sort(self, arg):
return list(sorted(arg))
@signature({"types": ['array-number']})
def _func_sum(self, arg):
return sum(arg)
@signature({"types": ['object']})
def _func_keys(self, arg):
# To be consistent with .values()
# should we also return the indices of a list?
return list(arg.keys())
@signature({"types": ['object']})
def _func_values(self, arg):
return list(arg.values())
@signature({'types': []})
def _func_type(self, arg):
if isinstance(arg, STRING_TYPE):
return "string"
elif isinstance(arg, bool):
return "boolean"
elif isinstance(arg, list):
return "array"
elif isinstance(arg, dict):
return "object"
elif isinstance(arg, (float, int)):
return "number"
elif arg is None:
return "null"
@signature({'types': ['array']}, {'types': ['expref']})
def _func_sort_by(self, array, expref):
if not array:
return array
# sort_by allows for the expref to be either a number of
# a string, so we have some special logic to handle this.
# We evaluate the first array element and verify that it's
# either a string of a number. We then create a key function
# that validates that type, which requires that remaining array
# elements resolve to the same type as the first element.
required_type = self._convert_to_jmespath_type(
type(expref.visit(expref.expression, array[0])).__name__)
if required_type not in ['number', 'string']:
raise exceptions.JMESPathTypeError(
'sort_by', array[0], required_type, ['string', 'number'])
keyfunc = self._create_key_func(expref,
[required_type],
'sort_by')
return list(sorted(array, key=keyfunc))
@signature({'types': ['array']}, {'types': ['expref']})
def _func_min_by(self, array, expref):
keyfunc = self._create_key_func(expref,
['number', 'string'],
'min_by')
if array:
return min(array, key=keyfunc)
else:
return None
@signature({'types': ['array']}, {'types': ['expref']})
def _func_max_by(self, array, expref):
keyfunc = self._create_key_func(expref,
['number', 'string'],
'max_by')
if array:
return max(array, key=keyfunc)
else:
return None
def _create_key_func(self, expref, allowed_types, function_name):
def keyfunc(x):
result = expref.visit(expref.expression, x)
actual_typename = type(result).__name__
jmespath_type = self._convert_to_jmespath_type(actual_typename)
# allowed_types is in term of jmespath types, not python types.
if jmespath_type not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, result, jmespath_type, allowed_types)
return result
return keyfunc
def _convert_to_jmespath_type(self, pyobject):
return TYPES_MAP.get(pyobject, 'unknown')
| 12,766 | Python | 34.170799 | 78 | 0.550838 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/attr/_funcs.py | from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import NOTHING, _obj_setattr, fields
from .exceptions import AttrsAttributeNotFoundError
def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v, True, filter, dict_factory, retain_collection_types
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk, filter, df, retain_collection_types
),
_asdict_anything(
vv, filter, df, retain_collection_types
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def _asdict_anything(val, filter, dict_factory, retain_collection_types):
"""
``asdict`` only works on attrs instances, this works on anything.
"""
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(val, True, filter, dict_factory, retain_collection_types)
elif isinstance(val, (tuple, list, set)):
cf = val.__class__ if retain_collection_types is True else list
rv = cf(
[
_asdict_anything(
i, filter, dict_factory, retain_collection_types
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(kk, filter, df, retain_collection_types),
_asdict_anything(vv, filter, df, retain_collection_types),
)
for kk, vv in iteritems(val)
)
else:
rv = val
return rv
def astuple(
inst,
recurse=True,
filter=None,
tuple_factory=tuple,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a tuple.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable tuple_factory: A callable to produce tuples from. For
example, to produce lists instead of tuples.
:param bool retain_collection_types: Do not convert to ``list``
or ``dict`` when encountering an attribute which type is
``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
``True``.
:rtype: return type of *tuple_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.2.0
"""
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(
astuple(
v,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain is True else list
rv.append(
cf(
[
astuple(
j,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(j.__class__)
else j
for j in v
]
)
)
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(
df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(kk.__class__)
else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(vv.__class__)
else vv,
)
for kk, vv in iteritems(v)
)
)
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
be found on *cls*.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. deprecated:: 17.1.0
Use `evolve` instead.
"""
import warnings
warnings.warn(
"assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning,
stacklevel=2,
)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}.".format(
k=k, cl=new.__class__
)
)
_obj_setattr(new, k, v)
return new
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
def resolve_types(cls, globalns=None, localns=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attr.s`. That means
the decorator has to come in the line **before** `attr.s`.
.. versionadded:: 20.1.0
"""
try:
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
cls.__attrs_types_resolved__
except AttributeError:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls):
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
cls.__attrs_types_resolved__ = True
# Return the class so you can use it as a decorator too.
return cls
| 11,640 | Python | 33.339233 | 79 | 0.531615 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/attr/validators.py | """
Commonly useful validators.
"""
from __future__ import absolute_import, division, print_function
import re
from ._make import _AndValidator, and_, attrib, attrs
from .exceptions import NotCallableError
__all__ = [
"and_",
"deep_iterable",
"deep_mapping",
"in_",
"instance_of",
"is_callable",
"matches_re",
"optional",
"provides",
]
@attrs(repr=False, slots=True, hash=True)
class _InstanceOfValidator(object):
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r}).".format(
name=attr.name,
type=self.type,
actual=value.__class__,
value=value,
),
attr,
self.type,
value,
)
def __repr__(self):
return "<instance_of validator for type {type!r}>".format(
type=self.type
)
def instance_of(type):
"""
A validator that raises a `TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are performed using
`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected type, and the value it
got.
"""
return _InstanceOfValidator(type)
@attrs(repr=False, frozen=True, slots=True)
class _MatchesReValidator(object):
regex = attrib()
flags = attrib()
match_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.match_func(value):
raise ValueError(
"'{name}' must match regex {regex!r}"
" ({value!r} doesn't)".format(
name=attr.name, regex=self.regex.pattern, value=value
),
attr,
self.regex,
value,
)
def __repr__(self):
return "<matches_re validator for pattern {regex!r}>".format(
regex=self.regex
)
def matches_re(regex, flags=0, func=None):
r"""
A validator that raises `ValueError` if the initializer is called
with a string that doesn't match *regex*.
:param str regex: a regex string to match against
:param int flags: flags that will be passed to the underlying re function
(default 0)
:param callable func: which underlying `re` function to call (options
are `re.fullmatch`, `re.search`, `re.match`, default
is ``None`` which means either `re.fullmatch` or an emulation of
it on Python 2). For performance reasons, they won't be used directly
but on a pre-`re.compile`\ ed pattern.
.. versionadded:: 19.2.0
"""
fullmatch = getattr(re, "fullmatch", None)
valid_funcs = (fullmatch, None, re.search, re.match)
if func not in valid_funcs:
raise ValueError(
"'func' must be one of %s."
% (
", ".join(
sorted(
e and e.__name__ or "None" for e in set(valid_funcs)
)
),
)
)
pattern = re.compile(regex, flags)
if func is re.match:
match_func = pattern.match
elif func is re.search:
match_func = pattern.search
else:
if fullmatch:
match_func = pattern.fullmatch
else:
pattern = re.compile(r"(?:{})\Z".format(regex), flags)
match_func = pattern.match
return _MatchesReValidator(pattern, flags, match_func)
@attrs(repr=False, slots=True, hash=True)
class _ProvidesValidator(object):
interface = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't.".format(
name=attr.name, interface=self.interface, value=value
),
attr,
self.interface,
value,
)
def __repr__(self):
return "<provides validator for interface {interface!r}>".format(
interface=self.interface
)
def provides(interface):
"""
A validator that raises a `TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<https://zopeinterface.readthedocs.io/en/latest/>`_).
:param interface: The interface to check for.
:type interface: ``zope.interface.Interface``
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected interface, and the
value it got.
"""
return _ProvidesValidator(interface)
@attrs(repr=False, slots=True, hash=True)
class _OptionalValidator(object):
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return "<optional validator for {what} or None>".format(
what=repr(self.validator)
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or `list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, hash=True)
class _InValidator(object):
options = attrib()
def __call__(self, inst, attr, value):
try:
in_options = value in self.options
except TypeError: # e.g. `1 in "abc"`
in_options = False
if not in_options:
raise ValueError(
"'{name}' must be in {options!r} (got {value!r})".format(
name=attr.name, options=self.options, value=value
)
)
def __repr__(self):
return "<in_ validator with options {options!r}>".format(
options=self.options
)
def in_(options):
"""
A validator that raises a `ValueError` if the initializer is called
with a value that does not belong in the options provided. The check is
performed using ``value in options``.
:param options: Allowed options.
:type options: list, tuple, `enum.Enum`, ...
:raises ValueError: With a human readable error message, the attribute (of
type `attr.Attribute`), the expected options, and the value it
got.
.. versionadded:: 17.1.0
"""
return _InValidator(options)
@attrs(repr=False, slots=False, hash=True)
class _IsCallableValidator(object):
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.name, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attr.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute
that is not callable.
.. versionadded:: 19.1.0
:raises `attr.exceptions.NotCallableError`: With a human readable error
message containing the attribute (`attr.Attribute`) name,
and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, hash=True)
class _DeepIterable(object):
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
class _DeepMapping(object):
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator)
| 11,497 | Python | 29.257895 | 79 | 0.589893 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.