file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_signals.py | from __future__ import annotations
from typing import AsyncIterator
from ._compat import DeprecatedAsyncContextManager
from ._eventloop import get_asynclib
def open_signal_receiver(
*signals: int,
) -> DeprecatedAsyncContextManager[AsyncIterator[int]]:
"""
Start receiving operating system signals.
:param signals: signals to receive (e.g. ``signal.SIGINT``)
:return: an asynchronous context manager for an asynchronous iterator which yields signal
numbers
.. warning:: Windows does not support signals natively so it is best to avoid relying on this
in cross-platform applications.
.. warning:: On asyncio, this permanently replaces any previous signal handler for the given
signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
"""
return get_asynclib().open_signal_receiver(*signals)
| 863 | Python | 30.999999 | 97 | 0.730012 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_streams.py | from __future__ import annotations
import math
from typing import Any, TypeVar, overload
from ..streams.memory import (
MemoryObjectReceiveStream,
MemoryObjectSendStream,
MemoryObjectStreamState,
)
T_Item = TypeVar("T_Item")
@overload
def create_memory_object_stream(
max_buffer_size: float = ...,
) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]:
...
@overload
def create_memory_object_stream(
max_buffer_size: float = ..., item_type: type[T_Item] = ...
) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
...
def create_memory_object_stream(
max_buffer_size: float = 0, item_type: type[T_Item] | None = None
) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]:
"""
Create a memory object stream.
:param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking
:param item_type: type of item, for marking the streams with the right generic type for
static typing (not used at run time)
:return: a tuple of (send stream, receive stream)
"""
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
raise ValueError("max_buffer_size must be either an integer or math.inf")
if max_buffer_size < 0:
raise ValueError("max_buffer_size cannot be negative")
state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size)
return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)
| 1,518 | Python | 30.645833 | 95 | 0.711462 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_synchronization.py | from __future__ import annotations
from collections import deque
from dataclasses import dataclass
from types import TracebackType
from warnings import warn
from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
from ._compat import DeprecatedAwaitable
from ._eventloop import get_asynclib
from ._exceptions import BusyResourceError, WouldBlock
from ._tasks import CancelScope
from ._testing import TaskInfo, get_current_task
@dataclass(frozen=True)
class EventStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
"""
tasks_waiting: int
@dataclass(frozen=True)
class CapacityLimiterStatistics:
"""
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
:ivar float total_tokens: total number of available tokens
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
limiter
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
"""
borrowed_tokens: int
total_tokens: float
borrowers: tuple[object, ...]
tasks_waiting: int
@dataclass(frozen=True)
class LockStatistics:
"""
:ivar bool locked: flag indicating if this lock is locked or not
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
held by any task)
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
"""
locked: bool
owner: TaskInfo | None
tasks_waiting: int
@dataclass(frozen=True)
class ConditionStatistics:
"""
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
"""
tasks_waiting: int
lock_statistics: LockStatistics
@dataclass(frozen=True)
class SemaphoreStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
"""
tasks_waiting: int
class Event:
def __new__(cls) -> Event:
return get_asynclib().Event()
def set(self) -> DeprecatedAwaitable:
"""Set the flag, notifying all listeners."""
raise NotImplementedError
def is_set(self) -> bool:
"""Return ``True`` if the flag is set, ``False`` if not."""
raise NotImplementedError
async def wait(self) -> None:
"""
Wait until the flag has been set.
If the flag has already been set when this method is called, it returns immediately.
"""
raise NotImplementedError
def statistics(self) -> EventStatistics:
"""Return statistics about the current state of this event."""
raise NotImplementedError
class Lock:
_owner_task: TaskInfo | None = None
def __init__(self) -> None:
self._waiters: deque[tuple[TaskInfo, Event]] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
async def acquire(self) -> None:
"""Acquire the lock."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
task = get_current_task()
event = Event()
token = task, event
self._waiters.append(token)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(token)
elif self._owner_task == task:
self.release()
raise
assert self._owner_task == task
else:
try:
await cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
task = get_current_task()
if self._owner_task == task:
raise RuntimeError("Attempted to acquire an already held Lock")
if self._owner_task is not None:
raise WouldBlock
self._owner_task = task
def release(self) -> DeprecatedAwaitable:
"""Release the lock."""
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding this lock")
if self._waiters:
self._owner_task, event = self._waiters.popleft()
event.set()
else:
del self._owner_task
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._owner_task is not None
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
class Condition:
_owner_task: TaskInfo | None = None
def __init__(self, lock: Lock | None = None):
self._lock = lock or Lock()
self._waiters: deque[Event] = deque()
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError("The current task is not holding the underlying lock")
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> DeprecatedAwaitable:
"""Release the underlying lock."""
self._lock.release()
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification."""
await checkpoint()
event = Event()
self._waiters.append(event)
self.release()
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
finally:
with CancelScope(shield=True):
await self.acquire()
def statistics(self) -> ConditionStatistics:
"""
Return statistics about the current state of this condition.
.. versionadded:: 3.0
"""
return ConditionStatistics(len(self._waiters), self._lock.statistics())
class Semaphore:
def __init__(self, initial_value: int, *, max_value: int | None = None):
if not isinstance(initial_value, int):
raise TypeError("initial_value must be an integer")
if initial_value < 0:
raise ValueError("initial_value must be >= 0")
if max_value is not None:
if not isinstance(max_value, int):
raise TypeError("max_value must be an integer or None")
if max_value < initial_value:
raise ValueError(
"max_value must be equal to or higher than initial_value"
)
self._value = initial_value
self._max_value = max_value
self._waiters: deque[Event] = deque()
async def __aenter__(self) -> Semaphore:
await self.acquire()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
async def acquire(self) -> None:
"""Decrement the semaphore value, blocking if necessary."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
event = Event()
self._waiters.append(event)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
else:
self.release()
raise
else:
try:
await cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~anyio.WouldBlock: if the operation would block
"""
if self._value == 0:
raise WouldBlock
self._value -= 1
def release(self) -> DeprecatedAwaitable:
"""Increment the semaphore value."""
if self._max_value is not None and self._value == self._max_value:
raise ValueError("semaphore released too many times")
if self._waiters:
self._waiters.popleft().set()
else:
self._value += 1
return DeprecatedAwaitable(self.release)
@property
def value(self) -> int:
"""The current value of the semaphore."""
return self._value
@property
def max_value(self) -> int | None:
"""The maximum value of the semaphore."""
return self._max_value
def statistics(self) -> SemaphoreStatistics:
"""
Return statistics about the current state of this semaphore.
.. versionadded:: 3.0
"""
return SemaphoreStatistics(len(self._waiters))
class CapacityLimiter:
def __new__(cls, total_tokens: float) -> CapacityLimiter:
return get_asynclib().CapacityLimiter(total_tokens)
async def __aenter__(self) -> None:
raise NotImplementedError
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
raise NotImplementedError
@property
def total_tokens(self) -> float:
"""
The total number of tokens available for borrowing.
This is a read-write property. If the total number of tokens is increased, the
proportionate number of tasks waiting on this limiter will be granted their tokens.
.. versionchanged:: 3.0
The property is now writable.
"""
raise NotImplementedError
@total_tokens.setter
def total_tokens(self, value: float) -> None:
raise NotImplementedError
async def set_total_tokens(self, value: float) -> None:
warn(
"CapacityLimiter.set_total_tokens has been deprecated. Set the value of the"
'"total_tokens" attribute directly.',
DeprecationWarning,
)
self.total_tokens = value
@property
def borrowed_tokens(self) -> int:
"""The number of tokens that have currently been borrowed."""
raise NotImplementedError
@property
def available_tokens(self) -> float:
"""The number of tokens currently available to be borrowed"""
raise NotImplementedError
def acquire_nowait(self) -> DeprecatedAwaitable:
"""
Acquire a token for the current task without waiting for one to become available.
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
"""
Acquire a token without waiting for one to become available.
:param borrower: the entity borrowing a token
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
async def acquire(self) -> None:
"""
Acquire a token for the current task, waiting if necessary for one to become available.
"""
raise NotImplementedError
async def acquire_on_behalf_of(self, borrower: object) -> None:
"""
Acquire a token, waiting if necessary for one to become available.
:param borrower: the entity borrowing a token
"""
raise NotImplementedError
def release(self) -> None:
"""
Release the token held by the current task.
:raises RuntimeError: if the current task has not borrowed a token from this limiter.
"""
raise NotImplementedError
def release_on_behalf_of(self, borrower: object) -> None:
"""
Release the token held by the given borrower.
:raises RuntimeError: if the borrower has not borrowed a token from this limiter.
"""
raise NotImplementedError
def statistics(self) -> CapacityLimiterStatistics:
"""
Return statistics about the current state of this limiter.
.. versionadded:: 3.0
"""
raise NotImplementedError
def create_lock() -> Lock:
"""
Create an asynchronous lock.
:return: a lock object
.. deprecated:: 3.0
Use :class:`~Lock` directly.
"""
warn("create_lock() is deprecated -- use Lock() directly", DeprecationWarning)
return Lock()
def create_condition(lock: Lock | None = None) -> Condition:
"""
Create an asynchronous condition.
:param lock: the lock to base the condition object on
:return: a condition object
.. deprecated:: 3.0
Use :class:`~Condition` directly.
"""
warn(
"create_condition() is deprecated -- use Condition() directly",
DeprecationWarning,
)
return Condition(lock=lock)
def create_event() -> Event:
"""
Create an asynchronous event object.
:return: an event object
.. deprecated:: 3.0
Use :class:`~Event` directly.
"""
warn("create_event() is deprecated -- use Event() directly", DeprecationWarning)
return get_asynclib().Event()
def create_semaphore(value: int, *, max_value: int | None = None) -> Semaphore:
"""
Create an asynchronous semaphore.
:param value: the semaphore's initial value
:param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the
semaphore's value would exceed this number
:return: a semaphore object
.. deprecated:: 3.0
Use :class:`~Semaphore` directly.
"""
warn(
"create_semaphore() is deprecated -- use Semaphore() directly",
DeprecationWarning,
)
return Semaphore(value, max_value=max_value)
def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:
"""
Create a capacity limiter.
:param total_tokens: the total number of tokens available for borrowing (can be an integer or
:data:`math.inf`)
:return: a capacity limiter object
.. deprecated:: 3.0
Use :class:`~CapacityLimiter` directly.
"""
warn(
"create_capacity_limiter() is deprecated -- use CapacityLimiter() directly",
DeprecationWarning,
)
return get_asynclib().CapacityLimiter(total_tokens)
class ResourceGuard:
__slots__ = "action", "_guarded"
def __init__(self, action: str):
self.action = action
self._guarded = False
def __enter__(self) -> None:
if self._guarded:
raise BusyResourceError(self.action)
self._guarded = True
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
self._guarded = False
return None
| 16,747 | Python | 27.053601 | 99 | 0.597122 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_typedattr.py | from __future__ import annotations
import sys
from typing import Any, Callable, Mapping, TypeVar, overload
from ._exceptions import TypedAttributeLookupError
if sys.version_info >= (3, 8):
from typing import final
else:
from typing_extensions import final
T_Attr = TypeVar("T_Attr")
T_Default = TypeVar("T_Default")
undefined = object()
def typed_attribute() -> Any:
"""Return a unique object, used to mark typed attributes."""
return object()
class TypedAttributeSet:
"""
Superclass for typed attribute collections.
Checks that every public attribute of every subclass has a type annotation.
"""
def __init_subclass__(cls) -> None:
annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
for attrname in dir(cls):
if not attrname.startswith("_") and attrname not in annotations:
raise TypeError(
f"Attribute {attrname!r} is missing its type annotation"
)
super().__init_subclass__()
class TypedAttributeProvider:
"""Base class for classes that wish to provide typed extra attributes."""
@property
def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
"""
A mapping of the extra attributes to callables that return the corresponding values.
If the provider wraps another provider, the attributes from that wrapper should also be
included in the returned mapping (but the wrapper may override the callables from the
wrapped instance).
"""
return {}
@overload
def extra(self, attribute: T_Attr) -> T_Attr:
...
@overload
def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default:
...
@final
def extra(self, attribute: Any, default: object = undefined) -> object:
"""
extra(attribute, default=undefined)
Return the value of the given typed extra attribute.
:param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for
:param default: the value that should be returned if no value is found for the attribute
:raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was
given
"""
try:
return self.extra_attributes[attribute]()
except KeyError:
if default is undefined:
raise TypedAttributeLookupError("Attribute not found") from None
else:
return default
| 2,551 | Python | 29.380952 | 96 | 0.638965 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_fileio.py | from __future__ import annotations
import os
import pathlib
import sys
from dataclasses import dataclass
from functools import partial
from os import PathLike
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
AsyncIterator,
Callable,
Generic,
Iterable,
Iterator,
Sequence,
cast,
overload,
)
from .. import to_thread
from ..abc import AsyncResource
if sys.version_info >= (3, 8):
from typing import Final
else:
from typing_extensions import Final
if TYPE_CHECKING:
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
else:
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
class AsyncFile(AsyncResource, Generic[AnyStr]):
"""
An asynchronous file object.
This class wraps a standard file object and provides async friendly versions of the following
blocking methods (where available on the original file object):
* read
* read1
* readline
* readlines
* readinto
* readinto1
* write
* writelines
* truncate
* seek
* tell
* flush
All other methods are directly passed through.
This class supports the asynchronous context manager protocol which closes the underlying file
at the end of the context block.
This class also supports asynchronous iteration::
async with await open_file(...) as f:
async for line in f:
print(line)
"""
def __init__(self, fp: IO[AnyStr]) -> None:
self._fp: Any = fp
def __getattr__(self, name: str) -> object:
return getattr(self._fp, name)
@property
def wrapped(self) -> IO[AnyStr]:
"""The wrapped file object."""
return self._fp
async def __aiter__(self) -> AsyncIterator[AnyStr]:
while True:
line = await self.readline()
if line:
yield line
else:
break
async def aclose(self) -> None:
return await to_thread.run_sync(self._fp.close)
async def read(self, size: int = -1) -> AnyStr:
return await to_thread.run_sync(self._fp.read, size)
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
return await to_thread.run_sync(self._fp.read1, size)
async def readline(self) -> AnyStr:
return await to_thread.run_sync(self._fp.readline)
async def readlines(self) -> list[AnyStr]:
return await to_thread.run_sync(self._fp.readlines)
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
return await to_thread.run_sync(self._fp.readinto, b)
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
return await to_thread.run_sync(self._fp.readinto1, b)
@overload
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int:
...
@overload
async def write(self: AsyncFile[str], b: str) -> int:
...
async def write(self, b: ReadableBuffer | str) -> int:
return await to_thread.run_sync(self._fp.write, b)
@overload
async def writelines(
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
) -> None:
...
@overload
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None:
...
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
return await to_thread.run_sync(self._fp.writelines, lines)
async def truncate(self, size: int | None = None) -> int:
return await to_thread.run_sync(self._fp.truncate, size)
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
return await to_thread.run_sync(self._fp.seek, offset, whence)
async def tell(self) -> int:
return await to_thread.run_sync(self._fp.tell)
async def flush(self) -> None:
return await to_thread.run_sync(self._fp.flush)
@overload
async def open_file(
file: str | PathLike[str] | int,
mode: OpenBinaryMode,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
closefd: bool = ...,
opener: Callable[[str, int], int] | None = ...,
) -> AsyncFile[bytes]:
...
@overload
async def open_file(
file: str | PathLike[str] | int,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
closefd: bool = ...,
opener: Callable[[str, int], int] | None = ...,
) -> AsyncFile[str]:
...
async def open_file(
file: str | PathLike[str] | int,
mode: str = "r",
buffering: int = -1,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
closefd: bool = True,
opener: Callable[[str, int], int] | None = None,
) -> AsyncFile[Any]:
"""
Open a file asynchronously.
The arguments are exactly the same as for the builtin :func:`open`.
:return: an asynchronous file object
"""
fp = await to_thread.run_sync(
open, file, mode, buffering, encoding, errors, newline, closefd, opener
)
return AsyncFile(fp)
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
"""
Wrap an existing file as an asynchronous file.
:param file: an existing file-like object
:return: an asynchronous file object
"""
return AsyncFile(file)
@dataclass(eq=False)
class _PathIterator(AsyncIterator["Path"]):
iterator: Iterator[PathLike[str]]
async def __anext__(self) -> Path:
nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True)
if nextval is None:
raise StopAsyncIteration from None
return Path(cast("PathLike[str]", nextval))
class Path:
"""
An asynchronous version of :class:`pathlib.Path`.
This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but
it is compatible with the :class:`os.PathLike` interface.
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the
deprecated :meth:`~pathlib.Path.link_to` method.
Any methods that do disk I/O need to be awaited on. These methods are:
* :meth:`~pathlib.Path.absolute`
* :meth:`~pathlib.Path.chmod`
* :meth:`~pathlib.Path.cwd`
* :meth:`~pathlib.Path.exists`
* :meth:`~pathlib.Path.expanduser`
* :meth:`~pathlib.Path.group`
* :meth:`~pathlib.Path.hardlink_to`
* :meth:`~pathlib.Path.home`
* :meth:`~pathlib.Path.is_block_device`
* :meth:`~pathlib.Path.is_char_device`
* :meth:`~pathlib.Path.is_dir`
* :meth:`~pathlib.Path.is_fifo`
* :meth:`~pathlib.Path.is_file`
* :meth:`~pathlib.Path.is_mount`
* :meth:`~pathlib.Path.lchmod`
* :meth:`~pathlib.Path.lstat`
* :meth:`~pathlib.Path.mkdir`
* :meth:`~pathlib.Path.open`
* :meth:`~pathlib.Path.owner`
* :meth:`~pathlib.Path.read_bytes`
* :meth:`~pathlib.Path.read_text`
* :meth:`~pathlib.Path.readlink`
* :meth:`~pathlib.Path.rename`
* :meth:`~pathlib.Path.replace`
* :meth:`~pathlib.Path.rmdir`
* :meth:`~pathlib.Path.samefile`
* :meth:`~pathlib.Path.stat`
* :meth:`~pathlib.Path.touch`
* :meth:`~pathlib.Path.unlink`
* :meth:`~pathlib.Path.write_bytes`
* :meth:`~pathlib.Path.write_text`
Additionally, the following methods return an async iterator yielding :class:`~.Path` objects:
* :meth:`~pathlib.Path.glob`
* :meth:`~pathlib.Path.iterdir`
* :meth:`~pathlib.Path.rglob`
"""
__slots__ = "_path", "__weakref__"
__weakref__: Any
def __init__(self, *args: str | PathLike[str]) -> None:
self._path: Final[pathlib.Path] = pathlib.Path(*args)
def __fspath__(self) -> str:
return self._path.__fspath__()
def __str__(self) -> str:
return self._path.__str__()
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.as_posix()!r})"
def __bytes__(self) -> bytes:
return self._path.__bytes__()
def __hash__(self) -> int:
return self._path.__hash__()
def __eq__(self, other: object) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__eq__(target)
def __lt__(self, other: Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__lt__(target)
def __le__(self, other: Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__le__(target)
def __gt__(self, other: Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__gt__(target)
def __ge__(self, other: Path) -> bool:
target = other._path if isinstance(other, Path) else other
return self._path.__ge__(target)
def __truediv__(self, other: Any) -> Path:
return Path(self._path / other)
def __rtruediv__(self, other: Any) -> Path:
return Path(other) / self
@property
def parts(self) -> tuple[str, ...]:
return self._path.parts
@property
def drive(self) -> str:
return self._path.drive
@property
def root(self) -> str:
return self._path.root
@property
def anchor(self) -> str:
return self._path.anchor
@property
def parents(self) -> Sequence[Path]:
return tuple(Path(p) for p in self._path.parents)
@property
def parent(self) -> Path:
return Path(self._path.parent)
@property
def name(self) -> str:
return self._path.name
@property
def suffix(self) -> str:
return self._path.suffix
@property
def suffixes(self) -> list[str]:
return self._path.suffixes
@property
def stem(self) -> str:
return self._path.stem
async def absolute(self) -> Path:
path = await to_thread.run_sync(self._path.absolute)
return Path(path)
def as_posix(self) -> str:
return self._path.as_posix()
def as_uri(self) -> str:
return self._path.as_uri()
def match(self, path_pattern: str) -> bool:
return self._path.match(path_pattern)
def is_relative_to(self, *other: str | PathLike[str]) -> bool:
try:
self.relative_to(*other)
return True
except ValueError:
return False
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
func = partial(os.chmod, follow_symlinks=follow_symlinks)
return await to_thread.run_sync(func, self._path, mode)
@classmethod
async def cwd(cls) -> Path:
path = await to_thread.run_sync(pathlib.Path.cwd)
return cls(path)
async def exists(self) -> bool:
return await to_thread.run_sync(self._path.exists, cancellable=True)
async def expanduser(self) -> Path:
return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True))
def glob(self, pattern: str) -> AsyncIterator[Path]:
gen = self._path.glob(pattern)
return _PathIterator(gen)
async def group(self) -> str:
return await to_thread.run_sync(self._path.group, cancellable=True)
async def hardlink_to(self, target: str | pathlib.Path | Path) -> None:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(os.link, target, self)
@classmethod
async def home(cls) -> Path:
home_path = await to_thread.run_sync(pathlib.Path.home)
return cls(home_path)
def is_absolute(self) -> bool:
return self._path.is_absolute()
async def is_block_device(self) -> bool:
return await to_thread.run_sync(self._path.is_block_device, cancellable=True)
async def is_char_device(self) -> bool:
return await to_thread.run_sync(self._path.is_char_device, cancellable=True)
async def is_dir(self) -> bool:
return await to_thread.run_sync(self._path.is_dir, cancellable=True)
async def is_fifo(self) -> bool:
return await to_thread.run_sync(self._path.is_fifo, cancellable=True)
async def is_file(self) -> bool:
return await to_thread.run_sync(self._path.is_file, cancellable=True)
async def is_mount(self) -> bool:
return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True)
def is_reserved(self) -> bool:
return self._path.is_reserved()
async def is_socket(self) -> bool:
return await to_thread.run_sync(self._path.is_socket, cancellable=True)
async def is_symlink(self) -> bool:
return await to_thread.run_sync(self._path.is_symlink, cancellable=True)
def iterdir(self) -> AsyncIterator[Path]:
gen = self._path.iterdir()
return _PathIterator(gen)
def joinpath(self, *args: str | PathLike[str]) -> Path:
return Path(self._path.joinpath(*args))
async def lchmod(self, mode: int) -> None:
await to_thread.run_sync(self._path.lchmod, mode)
async def lstat(self) -> os.stat_result:
return await to_thread.run_sync(self._path.lstat, cancellable=True)
async def mkdir(
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
) -> None:
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
@overload
async def open(
self,
mode: OpenBinaryMode,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> AsyncFile[bytes]:
...
@overload
async def open(
self,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
) -> AsyncFile[str]:
...
async def open(
self,
mode: str = "r",
buffering: int = -1,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> AsyncFile[Any]:
fp = await to_thread.run_sync(
self._path.open, mode, buffering, encoding, errors, newline
)
return AsyncFile(fp)
async def owner(self) -> str:
return await to_thread.run_sync(self._path.owner, cancellable=True)
async def read_bytes(self) -> bytes:
return await to_thread.run_sync(self._path.read_bytes)
async def read_text(
self, encoding: str | None = None, errors: str | None = None
) -> str:
return await to_thread.run_sync(self._path.read_text, encoding, errors)
def relative_to(self, *other: str | PathLike[str]) -> Path:
return Path(self._path.relative_to(*other))
async def readlink(self) -> Path:
target = await to_thread.run_sync(os.readlink, self._path)
return Path(cast(str, target))
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.rename, target)
return Path(target)
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.replace, target)
return Path(target)
async def resolve(self, strict: bool = False) -> Path:
func = partial(self._path.resolve, strict=strict)
return Path(await to_thread.run_sync(func, cancellable=True))
def rglob(self, pattern: str) -> AsyncIterator[Path]:
gen = self._path.rglob(pattern)
return _PathIterator(gen)
async def rmdir(self) -> None:
await to_thread.run_sync(self._path.rmdir)
async def samefile(
self, other_path: str | bytes | int | pathlib.Path | Path
) -> bool:
if isinstance(other_path, Path):
other_path = other_path._path
return await to_thread.run_sync(
self._path.samefile, other_path, cancellable=True
)
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
func = partial(os.stat, follow_symlinks=follow_symlinks)
return await to_thread.run_sync(func, self._path, cancellable=True)
async def symlink_to(
self,
target: str | pathlib.Path | Path,
target_is_directory: bool = False,
) -> None:
if isinstance(target, Path):
target = target._path
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
await to_thread.run_sync(self._path.touch, mode, exist_ok)
async def unlink(self, missing_ok: bool = False) -> None:
try:
await to_thread.run_sync(self._path.unlink)
except FileNotFoundError:
if not missing_ok:
raise
def with_name(self, name: str) -> Path:
return Path(self._path.with_name(name))
def with_stem(self, stem: str) -> Path:
return Path(self._path.with_name(stem + self._path.suffix))
def with_suffix(self, suffix: str) -> Path:
return Path(self._path.with_suffix(suffix))
async def write_bytes(self, data: bytes) -> int:
return await to_thread.run_sync(self._path.write_bytes, data)
async def write_text(
self,
data: str,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
) -> int:
# Path.write_text() does not support the "newline" parameter before Python 3.10
def sync_write_text() -> int:
with self._path.open(
"w", encoding=encoding, errors=errors, newline=newline
) as fp:
return fp.write(data)
return await to_thread.run_sync(sync_write_text)
PathLike.register(Path)
| 18,026 | Python | 28.846026 | 98 | 0.605681 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_core/_sockets.py | from __future__ import annotations
import socket
import ssl
import sys
from ipaddress import IPv6Address, ip_address
from os import PathLike, chmod
from pathlib import Path
from socket import AddressFamily, SocketKind
from typing import Awaitable, List, Tuple, cast, overload
from .. import to_thread
from ..abc import (
ConnectedUDPSocket,
IPAddressType,
IPSockAddrType,
SocketListener,
SocketStream,
UDPSocket,
UNIXSocketStream,
)
from ..streams.stapled import MultiListener
from ..streams.tls import TLSStream
from ._eventloop import get_asynclib
from ._resources import aclose_forcefully
from ._synchronization import Event
from ._tasks import create_task_group, move_on_after
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
GetAddrInfoReturnType = List[
Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]]
]
AnyIPAddressFamily = Literal[
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
]
IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
# tls_hostname given
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str,
happy_eyeballs_delay: float = ...,
) -> TLSStream:
...
# ssl_context given
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
ssl_context: ssl.SSLContext,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> TLSStream:
...
# tls=True
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
tls: Literal[True],
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> TLSStream:
...
# tls=False
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
tls: Literal[False],
ssl_context: ssl.SSLContext | None = ...,
tls_standard_compatible: bool = ...,
tls_hostname: str | None = ...,
happy_eyeballs_delay: float = ...,
) -> SocketStream:
...
# No TLS arguments
@overload
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = ...,
happy_eyeballs_delay: float = ...,
) -> SocketStream:
...
async def connect_tcp(
remote_host: IPAddressType,
remote_port: int,
*,
local_host: IPAddressType | None = None,
tls: bool = False,
ssl_context: ssl.SSLContext | None = None,
tls_standard_compatible: bool = True,
tls_hostname: str | None = None,
happy_eyeballs_delay: float = 0.25,
) -> SocketStream | TLSStream:
"""
Connect to a host using the TCP protocol.
This function implements the stateless version of the Happy Eyeballs algorithm (RFC
6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
each one is tried until one connection attempt succeeds. If the first attempt does
not connected within 250 milliseconds, a second attempt is started using the next
address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
available) is tried first.
When the connection has been established, a TLS handshake will be done if either
``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
:param remote_host: the IP address or host name to connect to
:param remote_port: port on the target host to connect to
:param local_host: the interface address or name to bind the socket to before connecting
:param tls: ``True`` to do a TLS handshake with the connected stream and return a
:class:`~anyio.streams.tls.TLSStream` instead
:param ssl_context: the SSL context object to use (if omitted, a default context is created)
:param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing
the stream and requires that the server does this as well. Otherwise,
:exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
Some protocols, such as HTTP, require this option to be ``False``.
See :meth:`~ssl.SSLContext.wrap_socket` for details.
:param tls_hostname: host name to check the server certificate against (defaults to the value
of ``remote_host``)
:param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt
:return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
:raises OSError: if the connection attempt fails
"""
# Placed here due to https://github.com/python/mypy/issues/7057
connected_stream: SocketStream | None = None
async def try_connect(remote_host: str, event: Event) -> None:
nonlocal connected_stream
try:
stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
except OSError as exc:
oserrors.append(exc)
return
else:
if connected_stream is None:
connected_stream = stream
tg.cancel_scope.cancel()
else:
await stream.aclose()
finally:
event.set()
asynclib = get_asynclib()
local_address: IPSockAddrType | None = None
family = socket.AF_UNSPEC
if local_host:
gai_res = await getaddrinfo(str(local_host), None)
family, *_, local_address = gai_res[0]
target_host = str(remote_host)
try:
addr_obj = ip_address(remote_host)
except ValueError:
# getaddrinfo() will raise an exception if name resolution fails
gai_res = await getaddrinfo(
target_host, remote_port, family=family, type=socket.SOCK_STREAM
)
# Organize the list so that the first address is an IPv6 address (if available) and the
# second one is an IPv4 addresses. The rest can be in whatever order.
v6_found = v4_found = False
target_addrs: list[tuple[socket.AddressFamily, str]] = []
for af, *rest, sa in gai_res:
if af == socket.AF_INET6 and not v6_found:
v6_found = True
target_addrs.insert(0, (af, sa[0]))
elif af == socket.AF_INET and not v4_found and v6_found:
v4_found = True
target_addrs.insert(1, (af, sa[0]))
else:
target_addrs.append((af, sa[0]))
else:
if isinstance(addr_obj, IPv6Address):
target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
else:
target_addrs = [(socket.AF_INET, addr_obj.compressed)]
oserrors: list[OSError] = []
async with create_task_group() as tg:
for i, (af, addr) in enumerate(target_addrs):
event = Event()
tg.start_soon(try_connect, addr, event)
with move_on_after(happy_eyeballs_delay):
await event.wait()
if connected_stream is None:
cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(oserrors)
raise OSError("All connection attempts failed") from cause
if tls or tls_hostname or ssl_context:
try:
return await TLSStream.wrap(
connected_stream,
server_side=False,
hostname=tls_hostname or str(remote_host),
ssl_context=ssl_context,
standard_compatible=tls_standard_compatible,
)
except BaseException:
await aclose_forcefully(connected_stream)
raise
return connected_stream
async def connect_unix(path: str | PathLike[str]) -> UNIXSocketStream:
"""
Connect to the given UNIX socket.
Not available on Windows.
:param path: path to the socket
:return: a socket stream object
"""
path = str(Path(path))
return await get_asynclib().connect_unix(path)
async def create_tcp_listener(
*,
local_host: IPAddressType | None = None,
local_port: int = 0,
family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
backlog: int = 65536,
reuse_port: bool = False,
) -> MultiListener[SocketStream]:
"""
Create a TCP socket listener.
:param local_port: port number to listen on
:param local_host: IP address of the interface to listen on. If omitted, listen on
all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
:param family: address family (used if ``local_host`` was omitted)
:param backlog: maximum number of queued incoming connections (up to a maximum of
2**16, or 65536)
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
address/port (not supported on Windows)
:return: a list of listener objects
"""
asynclib = get_asynclib()
backlog = min(backlog, 65536)
local_host = str(local_host) if local_host is not None else None
gai_res = await getaddrinfo(
local_host, # type: ignore[arg-type]
local_port,
family=family,
type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
listeners: list[SocketListener] = []
try:
# The set() is here to work around a glibc bug:
# https://sourceware.org/bugzilla/show_bug.cgi?id=14969
sockaddr: tuple[str, int] | tuple[str, int, int, int]
for fam, kind, *_, sockaddr in sorted(set(gai_res)):
# Workaround for an uvloop bug where we don't get the correct scope ID for
# IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
# getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
continue
raw_socket = socket.socket(fam)
raw_socket.setblocking(False)
# For Windows, enable exclusive address use. For others, enable address reuse.
if sys.platform == "win32":
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# If only IPv6 was requested, disable dual stack operation
if fam == socket.AF_INET6:
raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# Workaround for #554
if "%" in sockaddr[0]:
addr, scope_id = sockaddr[0].split("%", 1)
sockaddr = (addr, sockaddr[1], 0, int(scope_id))
raw_socket.bind(sockaddr)
raw_socket.listen(backlog)
listener = asynclib.TCPSocketListener(raw_socket)
listeners.append(listener)
except BaseException:
for listener in listeners:
await listener.aclose()
raise
return MultiListener(listeners)
async def create_unix_listener(
path: str | PathLike[str],
*,
mode: int | None = None,
backlog: int = 65536,
) -> SocketListener:
"""
Create a UNIX socket listener.
Not available on Windows.
:param path: path of the socket
:param mode: permissions to set on the socket
:param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or
65536)
:return: a listener object
.. versionchanged:: 3.0
If a socket already exists on the file system in the given path, it will be removed first.
"""
path_str = str(path)
path = Path(path)
if path.is_socket():
path.unlink()
backlog = min(backlog, 65536)
raw_socket = socket.socket(socket.AF_UNIX)
raw_socket.setblocking(False)
try:
await to_thread.run_sync(raw_socket.bind, path_str, cancellable=True)
if mode is not None:
await to_thread.run_sync(chmod, path_str, mode, cancellable=True)
raw_socket.listen(backlog)
return get_asynclib().UNIXSocketListener(raw_socket)
except BaseException:
raw_socket.close()
raise
async def create_udp_socket(
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
*,
local_host: IPAddressType | None = None,
local_port: int = 0,
reuse_port: bool = False,
) -> UDPSocket:
"""
Create a UDP socket.
If ``local_port`` has been given, the socket will be bound to this port on the local
machine, making this socket suitable for providing UDP based services.
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from
``local_host`` if omitted
:param local_host: IP address or host name of the local interface to bind to
:param local_port: local port to bind to
:param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port
(not supported on Windows)
:return: a UDP socket
"""
if family is AddressFamily.AF_UNSPEC and not local_host:
raise ValueError('Either "family" or "local_host" must be given')
if local_host:
gai_res = await getaddrinfo(
str(local_host),
local_port,
family=family,
type=socket.SOCK_DGRAM,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
local_address = gai_res[0][-1]
elif family is AddressFamily.AF_INET6:
local_address = ("::", 0)
else:
local_address = ("0.0.0.0", 0)
return await get_asynclib().create_udp_socket(
family, local_address, None, reuse_port
)
async def create_connected_udp_socket(
remote_host: IPAddressType,
remote_port: int,
*,
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
local_host: IPAddressType | None = None,
local_port: int = 0,
reuse_port: bool = False,
) -> ConnectedUDPSocket:
"""
Create a connected UDP socket.
Connected UDP sockets can only communicate with the specified remote host/port, and any packets
sent from other sources are dropped.
:param remote_host: remote host to set as the default target
:param remote_port: port on the remote host to set as the default target
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from
``local_host`` or ``remote_host`` if omitted
:param local_host: IP address or host name of the local interface to bind to
:param local_port: local port to bind to
:param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port
(not supported on Windows)
:return: a connected UDP socket
"""
local_address = None
if local_host:
gai_res = await getaddrinfo(
str(local_host),
local_port,
family=family,
type=socket.SOCK_DGRAM,
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
local_address = gai_res[0][-1]
gai_res = await getaddrinfo(
str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
)
family = cast(AnyIPAddressFamily, gai_res[0][0])
remote_address = gai_res[0][-1]
return await get_asynclib().create_udp_socket(
family, local_address, remote_address, reuse_port
)
async def getaddrinfo(
host: bytearray | bytes | str,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> GetAddrInfoReturnType:
"""
Look up a numeric IP address given a host name.
Internationalized domain names are translated according to the (non-transitional) IDNA 2008
standard.
.. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
(host, port), unlike what :func:`socket.getaddrinfo` does.
:param host: host name
:param port: port number
:param family: socket family (`'AF_INET``, ...)
:param type: socket type (``SOCK_STREAM``, ...)
:param proto: protocol number
:param flags: flags to pass to upstream ``getaddrinfo()``
:return: list of tuples containing (family, type, proto, canonname, sockaddr)
.. seealso:: :func:`socket.getaddrinfo`
"""
# Handle unicode hostnames
if isinstance(host, str):
try:
encoded_host = host.encode("ascii")
except UnicodeEncodeError:
import idna
encoded_host = idna.encode(host, uts46=True)
else:
encoded_host = host
gai_res = await get_asynclib().getaddrinfo(
encoded_host, port, family=family, type=type, proto=proto, flags=flags
)
return [
(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
for family, type, proto, canonname, sockaddr in gai_res
]
def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
"""
Look up the host name of an IP address.
:param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
:param flags: flags to pass to upstream ``getnameinfo()``
:return: a tuple of (host name, service name)
.. seealso:: :func:`socket.getnameinfo`
"""
return get_asynclib().getnameinfo(sockaddr, flags)
def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
"""
Wait until the given socket has data to be read.
This does **NOT** work on Windows when using the asyncio backend with a proactor event loop
(default on py3.8+).
.. warning:: Only use this on raw sockets that have not been wrapped by any higher level
constructs like socket streams!
:param sock: a socket object
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
socket to become readable
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
to become readable
"""
return get_asynclib().wait_socket_readable(sock)
def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
"""
Wait until the given socket can be written to.
This does **NOT** work on Windows when using the asyncio backend with a proactor event loop
(default on py3.8+).
.. warning:: Only use this on raw sockets that have not been wrapped by any higher level
constructs like socket streams!
:param sock: a socket object
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
socket to become writable
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
to become writable
"""
return get_asynclib().wait_socket_writable(sock)
#
# Private API
#
def convert_ipv6_sockaddr(
sockaddr: tuple[str, int, int, int] | tuple[str, int]
) -> tuple[str, int]:
"""
Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
If the scope ID is nonzero, it is added to the address, separated with ``%``.
Otherwise the flow id and scope id are simply cut off from the tuple.
Any other kinds of socket addresses are returned as-is.
:param sockaddr: the result of :meth:`~socket.socket.getsockname`
:return: the converted socket address
"""
# This is more complicated than it should be because of MyPy
if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr)
if scope_id:
# PyPy (as of v7.3.11) leaves the interface name in the result, so
# we discard it and only get the scope ID from the end
# (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
host = host.split("%")[0]
# Add scope_id to the address
return f"{host}%{scope_id}", port
else:
return host, port
else:
return cast(Tuple[str, int], sockaddr)
| 20,663 | Python | 32.986842 | 99 | 0.641436 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_backends/_trio.py | from __future__ import annotations
import array
import math
import socket
from concurrent.futures import Future
from contextvars import copy_context
from dataclasses import dataclass
from functools import partial
from io import IOBase
from os import PathLike
from signal import Signals
from types import TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Awaitable,
Callable,
Collection,
Coroutine,
Generic,
Iterable,
Mapping,
NoReturn,
Sequence,
TypeVar,
cast,
)
import sniffio
import trio.from_thread
from outcome import Error, Outcome, Value
from trio.socket import SocketType as TrioSocketType
from trio.to_thread import run_sync
from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
from .._core._eventloop import claim_worker_thread
from .._core._exceptions import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
EndOfStream,
)
from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
from .._core._sockets import convert_ipv6_sockaddr
from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import ResourceGuard
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import IPSockAddrType, UDPPacketType
if TYPE_CHECKING:
from trio_typing import TaskStatus
try:
from trio import lowlevel as trio_lowlevel
except ImportError:
from trio import hazmat as trio_lowlevel # type: ignore[no-redef]
from trio.hazmat import wait_readable, wait_writable
else:
from trio.lowlevel import wait_readable, wait_writable
try:
trio_open_process = trio_lowlevel.open_process
except AttributeError:
# isort: off
from trio import ( # type: ignore[attr-defined, no-redef]
open_process as trio_open_process,
)
T_Retval = TypeVar("T_Retval")
T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
#
# Event loop
#
run = trio.run
current_token = trio.lowlevel.current_trio_token
RunVar = trio.lowlevel.RunVar
#
# Miscellaneous
#
sleep = trio.sleep
#
# Timeouts and cancellation
#
class CancelScope(BaseCancelScope):
def __new__(
cls, original: trio.CancelScope | None = None, **kwargs: object
) -> CancelScope:
return object.__new__(cls)
def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
self.__original = original or trio.CancelScope(**kwargs)
def __enter__(self) -> CancelScope:
self.__original.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
# https://github.com/python-trio/trio-typing/pull/79
return self.__original.__exit__( # type: ignore[func-returns-value]
exc_type, exc_val, exc_tb
)
def cancel(self) -> DeprecatedAwaitable:
self.__original.cancel()
return DeprecatedAwaitable(self.cancel)
@property
def deadline(self) -> float:
return self.__original.deadline
@deadline.setter
def deadline(self, value: float) -> None:
self.__original.deadline = value
@property
def cancel_called(self) -> bool:
return self.__original.cancel_called
@property
def shield(self) -> bool:
return self.__original.shield
@shield.setter
def shield(self, value: bool) -> None:
self.__original.shield = value
CancelledError = trio.Cancelled
checkpoint = trio.lowlevel.checkpoint
checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled
cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint
current_effective_deadline = trio.current_effective_deadline
current_time = trio.current_time
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup, trio.MultiError):
pass
class TaskGroup(abc.TaskGroup):
def __init__(self) -> None:
self._active = False
self._nursery_manager = trio.open_nursery()
self.cancel_scope = None # type: ignore[assignment]
async def __aenter__(self) -> TaskGroup:
self._active = True
self._nursery = await self._nursery_manager.__aenter__()
self.cancel_scope = CancelScope(self._nursery.cancel_scope)
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
try:
return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
except trio.MultiError as exc:
raise ExceptionGroup(exc.exceptions) from None
finally:
self._active = False
def start_soon(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> None:
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
self._nursery.start_soon(func, *args, name=name)
async def start(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> object:
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
return await self._nursery.start(func, *args, name=name)
#
# Threads
#
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval],
*args: object,
cancellable: bool = False,
limiter: trio.CapacityLimiter | None = None,
) -> T_Retval:
def wrapper() -> T_Retval:
with claim_worker_thread("trio"):
return func(*args)
# TODO: remove explicit context copying when trio 0.20 is the minimum requirement
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, None)
return await run_sync(
context.run, wrapper, cancellable=cancellable, limiter=limiter
)
# TODO: remove this workaround when trio 0.20 is the minimum requirement
def run_async_from_thread(
fn: Callable[..., Awaitable[T_Retval]], *args: Any
) -> T_Retval:
async def wrapper() -> T_Retval:
retval: T_Retval
async def inner() -> None:
nonlocal retval
__tracebackhide__ = True
retval = await fn(*args)
async with trio.open_nursery() as n:
context.run(n.start_soon, inner)
__tracebackhide__ = True
return retval # noqa: F821
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, "trio")
return trio.from_thread.run(wrapper)
def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval:
# TODO: remove explicit context copying when trio 0.20 is the minimum requirement
retval = trio.from_thread.run_sync(copy_context().run, fn, *args)
return cast(T_Retval, retval)
class BlockingPortal(abc.BlockingPortal):
def __new__(cls) -> BlockingPortal:
return object.__new__(cls)
def __init__(self) -> None:
super().__init__()
self._token = trio.lowlevel.current_trio_token()
def _spawn_task_from_thread(
self,
func: Callable,
args: tuple,
kwargs: dict[str, Any],
name: object,
future: Future,
) -> None:
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, "trio")
trio.from_thread.run_sync(
context.run,
partial(self._task_group.start_soon, name=name),
self._call_func,
func,
args,
kwargs,
future,
trio_token=self._token,
)
#
# Subprocesses
#
@dataclass(eq=False)
class ReceiveStreamWrapper(abc.ByteReceiveStream):
_stream: trio.abc.ReceiveStream
async def receive(self, max_bytes: int | None = None) -> bytes:
try:
data = await self._stream.receive_some(max_bytes)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class SendStreamWrapper(abc.ByteSendStream):
_stream: trio.abc.SendStream
async def send(self, item: bytes) -> None:
try:
await self._stream.send_all(item)
except trio.ClosedResourceError as exc:
raise ClosedResourceError from exc.__cause__
except trio.BrokenResourceError as exc:
raise BrokenResourceError from exc.__cause__
async def aclose(self) -> None:
await self._stream.aclose()
@dataclass(eq=False)
class Process(abc.Process):
_process: trio.Process
_stdin: abc.ByteSendStream | None
_stdout: abc.ByteReceiveStream | None
_stderr: abc.ByteReceiveStream | None
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: Signals) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> int | None:
return self._process.returncode
@property
def stdin(self) -> abc.ByteSendStream | None:
return self._stdin
@property
def stdout(self) -> abc.ByteReceiveStream | None:
return self._stdout
@property
def stderr(self) -> abc.ByteReceiveStream | None:
return self._stderr
async def open_process(
command: str | bytes | Sequence[str | bytes],
*,
shell: bool,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
cwd: str | bytes | PathLike | None = None,
env: Mapping[str, str] | None = None,
start_new_session: bool = False,
) -> Process:
process = await trio_open_process( # type: ignore[misc]
command, # type: ignore[arg-type]
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
cwd=cwd,
env=env,
start_new_session=start_new_session,
)
stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
def after_run(self) -> None:
super().after_run()
current_default_worker_process_limiter: RunVar = RunVar(
"current_default_worker_process_limiter"
)
async def _shutdown_process_pool(workers: set[Process]) -> None:
process: Process
try:
await sleep(math.inf)
except trio.Cancelled:
for process in workers:
if process.returncode is None:
process.kill()
with CancelScope(shield=True):
for process in workers:
await process.aclose()
def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None:
trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
#
# Sockets and networking
#
class _TrioSocketMixin(Generic[T_SockAddr]):
def __init__(self, trio_socket: TrioSocketType) -> None:
self._trio_socket = trio_socket
self._closed = False
def _check_closed(self) -> None:
if self._closed:
raise ClosedResourceError
if self._trio_socket.fileno() < 0:
raise BrokenResourceError
@property
def _raw_socket(self) -> socket.socket:
return self._trio_socket._sock # type: ignore[attr-defined]
async def aclose(self) -> None:
if self._trio_socket.fileno() >= 0:
self._closed = True
self._trio_socket.close()
def _convert_socket_error(self, exc: BaseException) -> NoReturn:
if isinstance(exc, trio.ClosedResourceError):
raise ClosedResourceError from exc
elif self._trio_socket.fileno() < 0 and self._closed:
raise ClosedResourceError from None
elif isinstance(exc, OSError):
raise BrokenResourceError from exc
else:
raise exc
class SocketStream(_TrioSocketMixin, abc.SocketStream):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
try:
data = await self._trio_socket.recv(max_bytes)
except BaseException as exc:
self._convert_socket_error(exc)
if data:
return data
else:
raise EndOfStream
async def send(self, item: bytes) -> None:
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = await self._trio_socket.send(view)
except BaseException as exc:
self._convert_socket_error(exc)
view = view[bytes_sent:]
async def send_eof(self) -> None:
self._trio_socket.shutdown(socket.SHUT_WR)
class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError("msglen must be a non-negative integer")
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError("maxfds must be a positive integer")
fds = array.array("i")
await checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = await self._trio_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
)
except BaseException as exc:
self._convert_socket_error(exc)
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(
f"Received unexpected ancillary data; message = {message!r}, "
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
)
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
if not message:
raise ValueError("message must not be empty")
if not fds:
raise ValueError("fds must not be empty")
filenos: list[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await checkpoint()
with self._send_guard:
while True:
try:
await self._trio_socket.sendmsg(
[message],
[
(
socket.SOL_SOCKET,
socket.SCM_RIGHTS, # type: ignore[list-item]
fdarray,
)
],
)
break
except BaseException as exc:
self._convert_socket_error(exc)
class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard("accepting connections from")
async def accept(self) -> SocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return SocketStream(trio_socket)
class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
self._accept_guard = ResourceGuard("accepting connections from")
async def accept(self) -> UNIXSocketStream:
with self._accept_guard:
try:
trio_socket, _addr = await self._trio_socket.accept()
except BaseException as exc:
self._convert_socket_error(exc)
return UNIXSocketStream(trio_socket)
class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> tuple[bytes, IPSockAddrType]:
with self._receive_guard:
try:
data, addr = await self._trio_socket.recvfrom(65536)
return data, convert_ipv6_sockaddr(addr)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
try:
await self._trio_socket.sendto(*item)
except BaseException as exc:
self._convert_socket_error(exc)
class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
def __init__(self, trio_socket: TrioSocketType) -> None:
super().__init__(trio_socket)
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
async def receive(self) -> bytes:
with self._receive_guard:
try:
return await self._trio_socket.recv(65536)
except BaseException as exc:
self._convert_socket_error(exc)
async def send(self, item: bytes) -> None:
with self._send_guard:
try:
await self._trio_socket.send(item)
except BaseException as exc:
self._convert_socket_error(exc)
async def connect_tcp(
host: str, port: int, local_address: IPSockAddrType | None = None
) -> SocketStream:
family = socket.AF_INET6 if ":" in host else socket.AF_INET
trio_socket = trio.socket.socket(family)
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if local_address:
await trio_socket.bind(local_address)
try:
await trio_socket.connect((host, port))
except BaseException:
trio_socket.close()
raise
return SocketStream(trio_socket)
async def connect_unix(path: str) -> UNIXSocketStream:
trio_socket = trio.socket.socket(socket.AF_UNIX)
try:
await trio_socket.connect(path)
except BaseException:
trio_socket.close()
raise
return UNIXSocketStream(trio_socket)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
if reuse_port:
trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if local_address:
await trio_socket.bind(local_address)
if remote_address:
await trio_socket.connect(remote_address)
return ConnectedUDPSocket(trio_socket)
else:
return UDPSocket(trio_socket)
getaddrinfo = trio.socket.getaddrinfo
getnameinfo = trio.socket.getnameinfo
async def wait_socket_readable(sock: socket.socket) -> None:
try:
await wait_readable(sock)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError("reading from") from None
async def wait_socket_writable(sock: socket.socket) -> None:
try:
await wait_writable(sock)
except trio.ClosedResourceError as exc:
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
except trio.BusyResourceError:
raise BusyResourceError("writing to") from None
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls) -> Event:
return object.__new__(cls)
def __init__(self) -> None:
self.__original = trio.Event()
def is_set(self) -> bool:
return self.__original.is_set()
async def wait(self) -> None:
return await self.__original.wait()
def statistics(self) -> EventStatistics:
orig_statistics = self.__original.statistics()
return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
def set(self) -> DeprecatedAwaitable:
self.__original.set()
return DeprecatedAwaitable(self.set)
class CapacityLimiter(BaseCapacityLimiter):
def __new__(cls, *args: object, **kwargs: object) -> CapacityLimiter:
return object.__new__(cls)
def __init__(
self, *args: Any, original: trio.CapacityLimiter | None = None
) -> None:
self.__original = original or trio.CapacityLimiter(*args)
async def __aenter__(self) -> None:
return await self.__original.__aenter__()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.__original.__aexit__(exc_type, exc_val, exc_tb)
@property
def total_tokens(self) -> float:
return self.__original.total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
self.__original.total_tokens = value
@property
def borrowed_tokens(self) -> int:
return self.__original.borrowed_tokens
@property
def available_tokens(self) -> float:
return self.__original.available_tokens
def acquire_nowait(self) -> DeprecatedAwaitable:
self.__original.acquire_nowait()
return DeprecatedAwaitable(self.acquire_nowait)
def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
self.__original.acquire_on_behalf_of_nowait(borrower)
return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
async def acquire(self) -> None:
await self.__original.acquire()
async def acquire_on_behalf_of(self, borrower: object) -> None:
await self.__original.acquire_on_behalf_of(borrower)
def release(self) -> None:
return self.__original.release()
def release_on_behalf_of(self, borrower: object) -> None:
return self.__original.release_on_behalf_of(borrower)
def statistics(self) -> CapacityLimiterStatistics:
orig = self.__original.statistics()
return CapacityLimiterStatistics(
borrowed_tokens=orig.borrowed_tokens,
total_tokens=orig.total_tokens,
borrowers=orig.borrowers,
tasks_waiting=orig.tasks_waiting,
)
_capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper")
def current_default_thread_limiter() -> CapacityLimiter:
try:
return _capacity_limiter_wrapper.get()
except LookupError:
limiter = CapacityLimiter(
original=trio.to_thread.current_default_thread_limiter()
)
_capacity_limiter_wrapper.set(limiter)
return limiter
#
# Signal handling
#
class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]):
_iterator: AsyncIterator[int]
def __init__(self, signals: tuple[Signals, ...]):
self._signals = signals
def __enter__(self) -> _SignalReceiver:
self._cm = trio.open_signal_receiver(*self._signals)
self._iterator = self._cm.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
return self._cm.__exit__(exc_type, exc_val, exc_tb)
def __aiter__(self) -> _SignalReceiver:
return self
async def __anext__(self) -> Signals:
signum = await self._iterator.__anext__()
return Signals(signum)
def open_signal_receiver(*signals: Signals) -> _SignalReceiver:
return _SignalReceiver(signals)
#
# Testing and debugging
#
def get_current_task() -> TaskInfo:
task = trio_lowlevel.current_task()
parent_id = None
if task.parent_nursery and task.parent_nursery.parent_task:
parent_id = id(task.parent_nursery.parent_task)
return TaskInfo(id(task), parent_id, task.name, task.coro)
def get_running_tasks() -> list[TaskInfo]:
root_task = trio_lowlevel.current_root_task()
task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)]
nurseries = root_task.child_nurseries
while nurseries:
new_nurseries: list[trio.Nursery] = []
for nursery in nurseries:
for task in nursery.child_tasks:
task_infos.append(
TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro)
)
new_nurseries.extend(task.child_nurseries)
nurseries = new_nurseries
return task_infos
def wait_all_tasks_blocked() -> Awaitable[None]:
import trio.testing
return trio.testing.wait_all_tasks_blocked()
class TestRunner(abc.TestRunner):
def __init__(self, **options: Any) -> None:
from collections import deque
from queue import Queue
self._call_queue: Queue[Callable[..., object]] = Queue()
self._result_queue: deque[Outcome] = deque()
self._stop_event: trio.Event | None = None
self._nursery: trio.Nursery | None = None
self._options = options
async def _trio_main(self) -> None:
self._stop_event = trio.Event()
async with trio.open_nursery() as self._nursery:
await self._stop_event.wait()
async def _call_func(
self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict
) -> None:
try:
retval = await func(*args, **kwargs)
except BaseException as exc:
self._result_queue.append(Error(exc))
else:
self._result_queue.append(Value(retval))
def _main_task_finished(self, outcome: object) -> None:
self._nursery = None
def _get_nursery(self) -> trio.Nursery:
if self._nursery is None:
trio.lowlevel.start_guest_run(
self._trio_main,
run_sync_soon_threadsafe=self._call_queue.put,
done_callback=self._main_task_finished,
**self._options,
)
while self._nursery is None:
self._call_queue.get()()
return self._nursery
def _call(
self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object
) -> T_Retval:
self._get_nursery().start_soon(self._call_func, func, args, kwargs)
while not self._result_queue:
self._call_queue.get()()
outcome = self._result_queue.pop()
return outcome.unwrap()
def close(self) -> None:
if self._stop_event:
self._stop_event.set()
while self._nursery is not None:
self._call_queue.get()()
def run_asyncgen_fixture(
self,
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
kwargs: dict[str, Any],
) -> Iterable[T_Retval]:
async def fixture_runner(*, task_status: TaskStatus[T_Retval]) -> None:
agen = fixture_func(**kwargs)
retval = await agen.asend(None)
task_status.started(retval)
await teardown_event.wait()
try:
await agen.asend(None)
except StopAsyncIteration:
pass
else:
await agen.aclose()
raise RuntimeError("Async generator fixture did not stop")
teardown_event = trio.Event()
fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner))
yield fixture_value
teardown_event.set()
def run_fixture(
self,
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
kwargs: dict[str, Any],
) -> T_Retval:
return self._call(fixture_func, **kwargs)
def run_test(
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
) -> None:
self._call(test_func, **kwargs)
| 30,035 | Python | 29.126379 | 88 | 0.613518 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/anyio/_backends/_asyncio.py | from __future__ import annotations
import array
import asyncio
import concurrent.futures
import math
import socket
import sys
from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
from collections import OrderedDict, deque
from concurrent.futures import Future
from contextvars import Context, copy_context
from dataclasses import dataclass
from functools import partial, wraps
from inspect import (
CORO_RUNNING,
CORO_SUSPENDED,
GEN_RUNNING,
GEN_SUSPENDED,
getcoroutinestate,
getgeneratorstate,
)
from io import IOBase
from os import PathLike
from queue import Queue
from socket import AddressFamily, SocketKind
from threading import Thread
from types import TracebackType
from typing import (
IO,
Any,
AsyncGenerator,
Awaitable,
Callable,
Collection,
Coroutine,
Generator,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from weakref import WeakKeyDictionary
import sniffio
from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
from .._core._eventloop import claim_worker_thread, threadlocals
from .._core._exceptions import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
EndOfStream,
WouldBlock,
)
from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
from .._core._synchronization import Event as BaseEvent
from .._core._synchronization import ResourceGuard
from .._core._tasks import CancelScope as BaseCancelScope
from ..abc import IPSockAddrType, UDPPacketType
from ..lowlevel import RunVar
if sys.version_info >= (3, 8):
def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]:
return task.get_coro()
else:
def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]:
return task._coro
from asyncio import all_tasks, create_task, current_task, get_running_loop
from asyncio import run as native_run
def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]:
return [cb for cb, context in task._callbacks]
T_Retval = TypeVar("T_Retval")
T_contra = TypeVar("T_contra", contravariant=True)
# Check whether there is native support for task names in asyncio (3.8+)
_native_task_names = hasattr(asyncio.Task, "get_name")
_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
def find_root_task() -> asyncio.Task:
root_task = _root_task.get(None)
if root_task is not None and not root_task.done():
return root_task
# Look for a task that has been started via run_until_complete()
for task in all_tasks():
if task._callbacks and not task.done():
for cb in _get_task_callbacks(task):
if (
cb is _run_until_complete_cb
or getattr(cb, "__module__", None) == "uvloop.loop"
):
_root_task.set(task)
return task
# Look up the topmost task in the AnyIO task tree, if possible
task = cast(asyncio.Task, current_task())
state = _task_states.get(task)
if state:
cancel_scope = state.cancel_scope
while cancel_scope and cancel_scope._parent_scope is not None:
cancel_scope = cancel_scope._parent_scope
if cancel_scope is not None:
return cast(asyncio.Task, cancel_scope._host_task)
return task
def get_callable_name(func: Callable) -> str:
module = getattr(func, "__module__", None)
qualname = getattr(func, "__qualname__", None)
return ".".join([x for x in (module, qualname) if x])
#
# Event loop
#
_run_vars = (
WeakKeyDictionary()
) # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any]
current_token = get_running_loop
def _task_started(task: asyncio.Task) -> bool:
"""Return ``True`` if the task has been started and has not finished."""
coro = cast(Coroutine[Any, Any, Any], get_coro(task))
try:
return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
except AttributeError:
try:
return getgeneratorstate(cast(Generator, coro)) in (
GEN_RUNNING,
GEN_SUSPENDED,
)
except AttributeError:
# task coro is async_genenerator_asend https://bugs.python.org/issue37771
raise Exception(f"Cannot determine if task {task} has started or not")
def _maybe_set_event_loop_policy(
policy: asyncio.AbstractEventLoopPolicy | None, use_uvloop: bool
) -> None:
# On CPython, use uvloop when possible if no other policy has been given and if not
# explicitly disabled
if policy is None and use_uvloop and sys.implementation.name == "cpython":
try:
import uvloop
except ImportError:
pass
else:
# Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
if not hasattr(
asyncio.AbstractEventLoop, "shutdown_default_executor"
) or hasattr(uvloop.loop.Loop, "shutdown_default_executor"):
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def run(
func: Callable[..., Awaitable[T_Retval]],
*args: object,
debug: bool = False,
use_uvloop: bool = False,
policy: asyncio.AbstractEventLoopPolicy | None = None,
) -> T_Retval:
@wraps(func)
async def wrapper() -> T_Retval:
task = cast(asyncio.Task, current_task())
task_state = TaskState(None, get_callable_name(func), None)
_task_states[task] = task_state
if _native_task_names:
task.set_name(task_state.name)
try:
return await func(*args)
finally:
del _task_states[task]
_maybe_set_event_loop_policy(policy, use_uvloop)
return native_run(wrapper(), debug=debug)
#
# Miscellaneous
#
sleep = asyncio.sleep
#
# Timeouts and cancellation
#
CancelledError = asyncio.CancelledError
class CancelScope(BaseCancelScope):
def __new__(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
return object.__new__(cls)
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: CancelScope | None = None
self._cancel_called = False
self._active = False
self._timeout_handle: asyncio.TimerHandle | None = None
self._cancel_handle: asyncio.Handle | None = None
self._tasks: set[asyncio.Task] = set()
self._host_task: asyncio.Task | None = None
self._timeout_expired = False
self._cancel_calls: int = 0
def __enter__(self) -> CancelScope:
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'with' block"
)
self._host_task = host_task = cast(asyncio.Task, current_task())
self._tasks.add(host_task)
try:
task_state = _task_states[host_task]
except KeyError:
task_name = host_task.get_name() if _native_task_names else None
task_state = TaskState(None, task_name, self)
_task_states[host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
self._timeout()
self._active = True
# Start cancelling the host task if the scope was cancelled before entering
if self._cancel_called:
self._deliver_cancellation()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
if not self._active:
raise RuntimeError("This cancel scope is not active")
if current_task() is not self._host_task:
raise RuntimeError(
"Attempted to exit cancel scope in a different task than it was "
"entered in"
)
assert self._host_task is not None
host_task_state = _task_states.get(self._host_task)
if host_task_state is None or host_task_state.cancel_scope is not self:
raise RuntimeError(
"Attempted to exit a cancel scope that isn't the current tasks's "
"current cancel scope"
)
self._active = False
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._tasks.remove(self._host_task)
host_task_state.cancel_scope = self._parent_scope
# Restart the cancellation effort in the farthest directly cancelled parent scope if this
# one was shielded
if self._shield:
self._deliver_cancellation_to_parent()
if exc_val is not None:
exceptions = (
exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
)
if all(isinstance(exc, CancelledError) for exc in exceptions):
if self._timeout_expired:
return self._uncancel()
elif not self._cancel_called:
# Task was cancelled natively
return None
elif not self._parent_cancelled():
# This scope was directly cancelled
return self._uncancel()
return None
def _uncancel(self) -> bool:
if sys.version_info < (3, 11) or self._host_task is None:
self._cancel_calls = 0
return True
# Uncancel all AnyIO cancellations
for i in range(self._cancel_calls):
self._host_task.uncancel()
self._cancel_calls = 0
return not self._host_task.cancelling()
def _timeout(self) -> None:
if self._deadline != math.inf:
loop = get_running_loop()
if loop.time() >= self._deadline:
self._timeout_expired = True
self.cancel()
else:
self._timeout_handle = loop.call_at(self._deadline, self._timeout)
def _deliver_cancellation(self) -> None:
"""
Deliver cancellation to directly contained tasks and nested cancel scopes.
Schedule another run at the end if we still have tasks eligible for cancellation.
"""
should_retry = False
current = current_task()
for task in self._tasks:
if task._must_cancel: # type: ignore[attr-defined]
continue
# The task is eligible for cancellation if it has started and is not in a cancel
# scope shielded from this one
cancel_scope = _task_states[task].cancel_scope
while cancel_scope is not self:
if cancel_scope is None or cancel_scope._shield:
break
else:
cancel_scope = cancel_scope._parent_scope
else:
should_retry = True
if task is not current and (
task is self._host_task or _task_started(task)
):
self._cancel_calls += 1
task.cancel()
# Schedule another callback if there are still tasks left
if should_retry:
self._cancel_handle = get_running_loop().call_soon(
self._deliver_cancellation
)
else:
self._cancel_handle = None
def _deliver_cancellation_to_parent(self) -> None:
"""Start cancellation effort in the farthest directly cancelled parent scope"""
scope = self._parent_scope
scope_to_cancel: CancelScope | None = None
while scope is not None:
if scope._cancel_called and scope._cancel_handle is None:
scope_to_cancel = scope
# No point in looking beyond any shielded scope
if scope._shield:
break
scope = scope._parent_scope
if scope_to_cancel is not None:
scope_to_cancel._deliver_cancellation()
def _parent_cancelled(self) -> bool:
# Check whether any parent has been cancelled
cancel_scope = self._parent_scope
while cancel_scope is not None and not cancel_scope._shield:
if cancel_scope._cancel_called:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
def cancel(self) -> DeprecatedAwaitable:
if not self._cancel_called:
if self._timeout_handle:
self._timeout_handle.cancel()
self._timeout_handle = None
self._cancel_called = True
if self._host_task is not None:
self._deliver_cancellation()
return DeprecatedAwaitable(self.cancel)
@property
def deadline(self) -> float:
return self._deadline
@deadline.setter
def deadline(self, value: float) -> None:
self._deadline = float(value)
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = None
if self._active and not self._cancel_called:
self._timeout()
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def shield(self) -> bool:
return self._shield
@shield.setter
def shield(self, value: bool) -> None:
if self._shield != value:
self._shield = value
if not value:
self._deliver_cancellation_to_parent()
async def checkpoint() -> None:
await sleep(0)
async def checkpoint_if_cancelled() -> None:
task = current_task()
if task is None:
return
try:
cancel_scope = _task_states[task].cancel_scope
except KeyError:
return
while cancel_scope:
if cancel_scope.cancel_called:
await sleep(0)
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
async def cancel_shielded_checkpoint() -> None:
with CancelScope(shield=True):
await sleep(0)
def current_effective_deadline() -> float:
try:
cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index]
except KeyError:
return math.inf
deadline = math.inf
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope._cancel_called:
deadline = -math.inf
break
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
def current_time() -> float:
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = "parent_id", "name", "cancel_scope"
def __init__(
self,
parent_id: int | None,
name: str | None,
cancel_scope: CancelScope | None,
):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: list[BaseException]):
super().__init__()
self.exceptions = exceptions
class _AsyncioTaskStatus(abc.TaskStatus):
def __init__(self, future: asyncio.Future, parent_id: int):
self._future = future
self._parent_id = parent_id
def started(self, value: T_contra | None = None) -> None:
try:
self._future.set_result(value)
except asyncio.InvalidStateError:
raise RuntimeError(
"called 'started' twice on the same task status"
) from None
task = cast(asyncio.Task, current_task())
_task_states[task].parent_id = self._parent_id
class TaskGroup(abc.TaskGroup):
def __init__(self) -> None:
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: list[BaseException] = []
async def __aenter__(self) -> TaskGroup:
self.cancel_scope.__enter__()
self._active = True
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
self.cancel_scope.cancel()
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
if all(
isinstance(e, CancelledError) and not e.args for e in exceptions
):
# Tasks were cancelled natively, without a cancellation message
raise CancelledError
else:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(
exceptions: Sequence[BaseException],
) -> list[BaseException]:
filtered_exceptions: list[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if len(new_exceptions) > 1:
filtered_exceptions.append(exc)
elif len(new_exceptions) == 1:
filtered_exceptions.append(new_exceptions[0])
elif new_exceptions:
new_exc = ExceptionGroup(new_exceptions)
new_exc.__cause__ = exc.__cause__
new_exc.__context__ = exc.__context__
new_exc.__traceback__ = exc.__traceback__
filtered_exceptions.append(new_exc)
elif not isinstance(exc, CancelledError) or exc.args:
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(
self, coro: Coroutine, task_status_future: asyncio.Future | None
) -> None:
# This is the code path for Python 3.7 on which asyncio freaks out if a task
# raises a BaseException.
__traceback_hide__ = __tracebackhide__ = True # noqa: F841
task = cast(asyncio.Task, current_task())
try:
await coro
except BaseException as exc:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
else:
if task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError("Child exited without calling task_status.started()")
)
finally:
if task in self.cancel_scope._tasks:
self.cancel_scope._tasks.remove(task)
del _task_states[task]
def _spawn(
self,
func: Callable[..., Awaitable[Any]],
args: tuple,
name: object,
task_status_future: asyncio.Future | None = None,
) -> asyncio.Task:
def task_done(_task: asyncio.Task) -> None:
# This is the code path for Python 3.8+
assert _task in self.cancel_scope._tasks
self.cancel_scope._tasks.remove(_task)
del _task_states[_task]
try:
exc = _task.exception()
except CancelledError as e:
while isinstance(e.__context__, CancelledError):
e = e.__context__
exc = e
if exc is not None:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
elif task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError("Child exited without calling task_status.started()")
)
if not self._active:
raise RuntimeError(
"This task group is not active; no new tasks can be started."
)
options: dict[str, Any] = {}
name = get_callable_name(func) if name is None else str(name)
if _native_task_names:
options["name"] = name
kwargs = {}
if task_status_future:
parent_id = id(current_task())
kwargs["task_status"] = _AsyncioTaskStatus(
task_status_future, id(self.cancel_scope._host_task)
)
else:
parent_id = id(self.cancel_scope._host_task)
coro = func(*args, **kwargs)
if not asyncio.iscoroutine(coro):
raise TypeError(
f"Expected an async function, but {func} appears to be synchronous"
)
foreign_coro = not hasattr(coro, "cr_frame") and not hasattr(coro, "gi_frame")
if foreign_coro or sys.version_info < (3, 8):
coro = self._run_wrapped_task(coro, task_status_future)
task = create_task(coro, **options)
if not foreign_coro and sys.version_info >= (3, 8):
task.add_done_callback(task_done)
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(
parent_id=parent_id, name=name, cancel_scope=self.cancel_scope
)
self.cancel_scope._tasks.add(task)
return task
def start_soon(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> None:
self._spawn(func, args, name)
async def start(
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
) -> None:
future: asyncio.Future = asyncio.Future()
task = self._spawn(func, args, name, future)
# If the task raises an exception after sending a start value without a switch point
# between, the task group is cancelled and this method never proceeds to process the
# completed future. That's why we have to have a shielded cancel scope here.
with CancelScope(shield=True):
try:
return await future
except CancelledError:
task.cancel()
raise
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
class WorkerThread(Thread):
MAX_IDLE_TIME = 10 # seconds
def __init__(
self,
root_task: asyncio.Task,
workers: set[WorkerThread],
idle_workers: deque[WorkerThread],
):
super().__init__(name="AnyIO worker thread")
self.root_task = root_task
self.workers = workers
self.idle_workers = idle_workers
self.loop = root_task._loop
self.queue: Queue[
tuple[Context, Callable, tuple, asyncio.Future] | None
] = Queue(2)
self.idle_since = current_time()
self.stopping = False
def _report_result(
self, future: asyncio.Future, result: Any, exc: BaseException | None
) -> None:
self.idle_since = current_time()
if not self.stopping:
self.idle_workers.append(self)
if not future.cancelled():
if exc is not None:
if isinstance(exc, StopIteration):
new_exc = RuntimeError("coroutine raised StopIteration")
new_exc.__cause__ = exc
exc = new_exc
future.set_exception(exc)
else:
future.set_result(result)
def run(self) -> None:
with claim_worker_thread("asyncio"):
threadlocals.loop = self.loop
while True:
item = self.queue.get()
if item is None:
# Shutdown command received
return
context, func, args, future = item
if not future.cancelled():
result = None
exception: BaseException | None = None
try:
result = context.run(func, *args)
except BaseException as exc:
exception = exc
if not self.loop.is_closed():
self.loop.call_soon_threadsafe(
self._report_result, future, result, exception
)
self.queue.task_done()
def stop(self, f: asyncio.Task | None = None) -> None:
self.stopping = True
self.queue.put_nowait(None)
self.workers.discard(self)
try:
self.idle_workers.remove(self)
except ValueError:
pass
_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
"_threadpool_idle_workers"
)
_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval],
*args: object,
cancellable: bool = False,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
await checkpoint()
# If this is the first run in this event loop thread, set up the necessary variables
try:
idle_workers = _threadpool_idle_workers.get()
workers = _threadpool_workers.get()
except LookupError:
idle_workers = deque()
workers = set()
_threadpool_idle_workers.set(idle_workers)
_threadpool_workers.set(workers)
async with (limiter or current_default_thread_limiter()):
with CancelScope(shield=not cancellable):
future: asyncio.Future = asyncio.Future()
root_task = find_root_task()
if not idle_workers:
worker = WorkerThread(root_task, workers, idle_workers)
worker.start()
workers.add(worker)
root_task.add_done_callback(worker.stop)
else:
worker = idle_workers.pop()
# Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer
now = current_time()
while idle_workers:
if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME:
break
expired_worker = idle_workers.popleft()
expired_worker.root_task.remove_done_callback(expired_worker.stop)
expired_worker.stop()
context = copy_context()
context.run(sniffio.current_async_library_cvar.set, None)
worker.queue.put_nowait((context, func, args, future))
return await future
def run_sync_from_thread(
func: Callable[..., T_Retval],
*args: object,
loop: asyncio.AbstractEventLoop | None = None,
) -> T_Retval:
@wraps(func)
def wrapper() -> None:
try:
f.set_result(func(*args))
except BaseException as exc:
f.set_exception(exc)
if not isinstance(exc, Exception):
raise
f: concurrent.futures.Future[T_Retval] = Future()
loop = loop or threadlocals.loop
loop.call_soon_threadsafe(wrapper)
return f.result()
def run_async_from_thread(
func: Callable[..., Awaitable[T_Retval]], *args: object
) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop
)
return f.result()
class BlockingPortal(abc.BlockingPortal):
def __new__(cls) -> BlockingPortal:
return object.__new__(cls)
def __init__(self) -> None:
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(
self,
func: Callable,
args: tuple,
kwargs: dict[str, Any],
name: object,
future: Future,
) -> None:
run_sync_from_thread(
partial(self._task_group.start_soon, name=name),
self._call_func,
func,
args,
kwargs,
future,
loop=self._loop,
)
#
# Subprocesses
#
@dataclass(eq=False)
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass(eq=False)
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass(eq=False)
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: StreamWriterWrapper | None
_stdout: StreamReaderWrapper | None
_stderr: StreamReaderWrapper | None
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> int | None:
return self._process.returncode
@property
def stdin(self) -> abc.ByteSendStream | None:
return self._stdin
@property
def stdout(self) -> abc.ByteReceiveStream | None:
return self._stdout
@property
def stderr(self) -> abc.ByteReceiveStream | None:
return self._stderr
async def open_process(
command: str | bytes | Sequence[str | bytes],
*,
shell: bool,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
cwd: str | bytes | PathLike | None = None,
env: Mapping[str, str] | None = None,
start_new_session: bool = False,
) -> Process:
await checkpoint()
if shell:
process = await asyncio.create_subprocess_shell(
cast(Union[str, bytes], command),
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
env=env,
start_new_session=start_new_session,
)
else:
process = await asyncio.create_subprocess_exec(
*command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
env=env,
start_new_session=start_new_session,
)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
def _forcibly_shutdown_process_pool_on_exit(
workers: set[Process], _task: object
) -> None:
"""
Forcibly shuts down worker processes belonging to this event loop."""
child_watcher: asyncio.AbstractChildWatcher | None
try:
child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
except NotImplementedError:
child_watcher = None
# Close as much as possible (w/o async/await) to avoid warnings
for process in workers:
if process.returncode is None:
continue
process._stdin._stream._transport.close() # type: ignore[union-attr]
process._stdout._stream._transport.close() # type: ignore[union-attr]
process._stderr._stream._transport.close() # type: ignore[union-attr]
process.kill()
if child_watcher:
child_watcher.remove_child_handler(process.pid)
async def _shutdown_process_pool_on_exit(workers: set[Process]) -> None:
"""
Shuts down worker processes belonging to this event loop.
NOTE: this only works when the event loop was started using asyncio.run() or anyio.run().
"""
process: Process
try:
await sleep(math.inf)
except asyncio.CancelledError:
for process in workers:
if process.returncode is None:
process.kill()
for process in workers:
await process.aclose()
def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None:
kwargs: dict[str, Any] = (
{"name": "AnyIO process pool shutdown task"} if _native_task_names else {}
)
create_task(_shutdown_process_pool_on_exit(workers), **kwargs)
find_root_task().add_done_callback(
partial(_forcibly_shutdown_process_pool_on_exit, workers)
)
#
# Sockets and networking
#
class StreamProtocol(asyncio.Protocol):
read_queue: deque[bytes]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Exception | None = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Exception | None) -> None:
if exc:
self.exception = BrokenResourceError()
self.exception.__cause__ = exc
self.read_event.set()
self.write_event.set()
def data_received(self, data: bytes) -> None:
self.read_queue.append(data)
self.read_event.set()
def eof_received(self) -> bool | None:
self.read_event.set()
return True
def pause_writing(self) -> None:
self.write_event = asyncio.Event()
def resume_writing(self) -> None:
self.write_event.set()
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: deque[tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Exception | None = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Exception | None) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
await checkpoint()
if (
not self._protocol.read_event.is_set()
and not self._transport.is_closing()
):
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise self._protocol.exception
else:
raise EndOfStream from None
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
# If the read queue is empty, clear the flag so that the next call will block until
# data is available
if not self._protocol.read_queue:
self._protocol.read_event.clear()
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
if self._closed:
raise ClosedResourceError
elif self._protocol.exception is not None:
raise self._protocol.exception
try:
self._transport.write(item)
except RuntimeError as exc:
if self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_event.wait()
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await sleep(0)
self._transport.abort()
class UNIXSocketStream(abc.SocketStream):
_receive_future: asyncio.Future | None = None
_send_future: asyncio.Future | None = None
_closing = False
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._loop = get_running_loop()
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f: object) -> None:
del self._receive_future
loop.remove_reader(self.__raw_socket)
f = self._receive_future = asyncio.Future()
self._loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
def callback(f: object) -> None:
del self._send_future
loop.remove_writer(self.__raw_socket)
f = self._send_future = asyncio.Future()
self._loop.add_writer(self.__raw_socket, f.set_result, None)
f.add_done_callback(callback)
return f
async def send_eof(self) -> None:
with self._send_guard:
self._raw_socket.shutdown(socket.SHUT_WR)
async def receive(self, max_bytes: int = 65536) -> bytes:
loop = get_running_loop()
await checkpoint()
with self._receive_guard:
while True:
try:
data = self.__raw_socket.recv(max_bytes)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not data:
raise EndOfStream
return data
async def send(self, item: bytes) -> None:
loop = get_running_loop()
await checkpoint()
with self._send_guard:
view = memoryview(item)
while view:
try:
bytes_sent = self.__raw_socket.send(view)
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
view = view[bytes_sent:]
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
if not isinstance(msglen, int) or msglen < 0:
raise ValueError("msglen must be a non-negative integer")
if not isinstance(maxfds, int) or maxfds < 1:
raise ValueError("maxfds must be a positive integer")
loop = get_running_loop()
fds = array.array("i")
await checkpoint()
with self._receive_guard:
while True:
try:
message, ancdata, flags, addr = self.__raw_socket.recvmsg(
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
)
except BlockingIOError:
await self._wait_until_readable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
else:
if not message and not ancdata:
raise EndOfStream
break
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
raise RuntimeError(
f"Received unexpected ancillary data; message = {message!r}, "
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
)
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return message, list(fds)
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
if not message:
raise ValueError("message must not be empty")
if not fds:
raise ValueError("fds must not be empty")
loop = get_running_loop()
filenos: list[int] = []
for fd in fds:
if isinstance(fd, int):
filenos.append(fd)
elif isinstance(fd, IOBase):
filenos.append(fd.fileno())
fdarray = array.array("i", filenos)
await checkpoint()
with self._send_guard:
while True:
try:
# The ignore can be removed after mypy picks up
# https://github.com/python/typeshed/pull/5545
self.__raw_socket.sendmsg(
[message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
)
break
except BlockingIOError:
await self._wait_until_writable(loop)
except OSError as exc:
if self._closing:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
async def aclose(self) -> None:
if not self._closing:
self._closing = True
if self.__raw_socket.fileno() != -1:
self.__raw_socket.close()
if self._receive_future:
self._receive_future.set_result(None)
if self._send_future:
self._send_future.set_result(None)
class TCPSocketListener(abc.SocketListener):
_accept_scope: CancelScope | None = None
_closed = False
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard("accepting connections from")
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
async def accept(self) -> abc.SocketStream:
if self._closed:
raise ClosedResourceError
with self._accept_guard:
await checkpoint()
with CancelScope() as self._accept_scope:
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
if self._closed:
raise ClosedResourceError from None
raise
finally:
self._accept_scope = None
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(
StreamProtocol, client_sock
)
return SocketStream(transport, protocol)
async def aclose(self) -> None:
if self._closed:
return
self._closed = True
if self._accept_scope:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except (ValueError, NotImplementedError):
pass
self._accept_scope.cancel()
await sleep(0)
self._raw_socket.close()
class UNIXSocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.socket):
self.__raw_socket = raw_socket
self._loop = get_running_loop()
self._accept_guard = ResourceGuard("accepting connections from")
self._closed = False
async def accept(self) -> abc.SocketStream:
await checkpoint()
with self._accept_guard:
while True:
try:
client_sock, _ = self.__raw_socket.accept()
client_sock.setblocking(False)
return UNIXSocketStream(client_sock)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
self._loop.add_reader(self.__raw_socket, f.set_result, None)
f.add_done_callback(
lambda _: self._loop.remove_reader(self.__raw_socket)
)
await f
except OSError as exc:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from exc
async def aclose(self) -> None:
self._closed = True
self.__raw_socket.close()
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
class UDPSocket(abc.UDPSocket):
def __init__(
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard("reading from")
self._send_guard = ResourceGuard("writing to")
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info("socket")
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> bytes:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
async def connect_tcp(
host: str, port: int, local_addr: tuple[str, int] | None = None
) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(
StreamProtocol, host, port, local_addr=local_addr
),
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def connect_unix(path: str) -> UNIXSocketStream:
await checkpoint()
loop = get_running_loop()
raw_socket = socket.socket(socket.AF_UNIX)
raw_socket.setblocking(False)
while True:
try:
raw_socket.connect(path)
except BlockingIOError:
f: asyncio.Future = asyncio.Future()
loop.add_writer(raw_socket, f.set_result, None)
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
await f
except BaseException:
raw_socket.close()
raise
else:
return UNIXSocketStream(raw_socket)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
result = await get_running_loop().create_datagram_endpoint(
DatagramProtocol,
local_addr=local_address,
remote_addr=remote_address,
family=family,
reuse_port=reuse_port,
)
transport = result[0]
protocol = result[1]
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
async def getaddrinfo(
host: bytes | str,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> GetAddrInfoReturnType:
# https://github.com/python/typeshed/pull/4304
result = await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags
)
return cast(GetAddrInfoReturnType, result)
async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> tuple[str, str]:
return await get_running_loop().getnameinfo(sockaddr, flags)
_read_events: RunVar[dict[Any, asyncio.Event]] = RunVar("read_events")
_write_events: RunVar[dict[Any, asyncio.Event]] = RunVar("write_events")
async def wait_socket_readable(sock: socket.socket) -> None:
await checkpoint()
try:
read_events = _read_events.get()
except LookupError:
read_events = {}
_read_events.set(read_events)
if read_events.get(sock):
raise BusyResourceError("reading from") from None
loop = get_running_loop()
event = read_events[sock] = asyncio.Event()
loop.add_reader(sock, event.set)
try:
await event.wait()
finally:
if read_events.pop(sock, None) is not None:
loop.remove_reader(sock)
readable = True
else:
readable = False
if not readable:
raise ClosedResourceError
async def wait_socket_writable(sock: socket.socket) -> None:
await checkpoint()
try:
write_events = _write_events.get()
except LookupError:
write_events = {}
_write_events.set(write_events)
if write_events.get(sock):
raise BusyResourceError("writing to") from None
loop = get_running_loop()
event = write_events[sock] = asyncio.Event()
loop.add_writer(sock.fileno(), event.set)
try:
await event.wait()
finally:
if write_events.pop(sock, None) is not None:
loop.remove_writer(sock)
writable = True
else:
writable = False
if not writable:
raise ClosedResourceError
#
# Synchronization
#
class Event(BaseEvent):
def __new__(cls) -> Event:
return object.__new__(cls)
def __init__(self) -> None:
self._event = asyncio.Event()
def set(self) -> DeprecatedAwaitable:
self._event.set()
return DeprecatedAwaitable(self.set)
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self) -> None:
if await self._event.wait():
await checkpoint()
def statistics(self) -> EventStatistics:
return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined]
class CapacityLimiter(BaseCapacityLimiter):
_total_tokens: float = 0
def __new__(cls, total_tokens: float) -> CapacityLimiter:
return object.__new__(cls)
def __init__(self, total_tokens: float):
self._borrowers: set[Any] = set()
self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
self.total_tokens = total_tokens
async def __aenter__(self) -> None:
await self.acquire()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.release()
@property
def total_tokens(self) -> float:
return self._total_tokens
@total_tokens.setter
def total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError("total_tokens must be an int or math.inf")
if value < 1:
raise ValueError("total_tokens must be >= 1")
old_value = self._total_tokens
self._total_tokens = value
events = []
for event in self._wait_queue.values():
if value <= old_value:
break
if not event.is_set():
events.append(event)
old_value += 1
for event in events:
event.set()
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
def acquire_nowait(self) -> DeprecatedAwaitable:
self.acquire_on_behalf_of_nowait(current_task())
return DeprecatedAwaitable(self.acquire_nowait)
def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
if borrower in self._borrowers:
raise RuntimeError(
"this borrower is already holding one of this CapacityLimiter's "
"tokens"
)
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower: object) -> None:
await checkpoint_if_cancelled()
try:
self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
raise
self._borrowers.add(borrower)
else:
try:
await cancel_shielded_checkpoint()
except BaseException:
self.release()
raise
def release(self) -> None:
self.release_on_behalf_of(current_task())
def release_on_behalf_of(self, borrower: object) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError(
"this borrower isn't holding any of this CapacityLimiter's " "tokens"
) from None
# Notify the next task in line if this limiter has free capacity now
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem(last=False)[1]
event.set()
def statistics(self) -> CapacityLimiterStatistics:
return CapacityLimiterStatistics(
self.borrowed_tokens,
self.total_tokens,
tuple(self._borrowers),
len(self._wait_queue),
)
_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
def current_default_thread_limiter() -> CapacityLimiter:
try:
return _default_thread_limiter.get()
except LookupError:
limiter = CapacityLimiter(40)
_default_thread_limiter.set(limiter)
return limiter
#
# Operating system signals
#
class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]):
def __init__(self, signals: tuple[int, ...]):
self._signals = signals
self._loop = get_running_loop()
self._signal_queue: deque[int] = deque()
self._future: asyncio.Future = asyncio.Future()
self._handled_signals: set[int] = set()
def _deliver(self, signum: int) -> None:
self._signal_queue.append(signum)
if not self._future.done():
self._future.set_result(None)
def __enter__(self) -> _SignalReceiver:
for sig in set(self._signals):
self._loop.add_signal_handler(sig, self._deliver, sig)
self._handled_signals.add(sig)
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
for sig in self._handled_signals:
self._loop.remove_signal_handler(sig)
return None
def __aiter__(self) -> _SignalReceiver:
return self
async def __anext__(self) -> int:
await checkpoint()
if not self._signal_queue:
self._future = asyncio.Future()
await self._future
return self._signal_queue.popleft()
def open_signal_receiver(*signals: int) -> _SignalReceiver:
return _SignalReceiver(signals)
#
# Testing and debugging
#
def _create_task_info(task: asyncio.Task) -> TaskInfo:
task_state = _task_states.get(task)
if task_state is None:
name = task.get_name() if _native_task_names else None
parent_id = None
else:
name = task_state.name
parent_id = task_state.parent_id
return TaskInfo(id(task), parent_id, name, get_coro(task))
def get_current_task() -> TaskInfo:
return _create_task_info(current_task()) # type: ignore[arg-type]
def get_running_tasks() -> list[TaskInfo]:
return [_create_task_info(task) for task in all_tasks() if not task.done()]
async def wait_all_tasks_blocked() -> None:
await checkpoint()
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined]
await sleep(0.1)
break
else:
return
class TestRunner(abc.TestRunner):
def __init__(
self,
debug: bool = False,
use_uvloop: bool = False,
policy: asyncio.AbstractEventLoopPolicy | None = None,
):
self._exceptions: list[BaseException] = []
_maybe_set_event_loop_policy(policy, use_uvloop)
self._loop = asyncio.new_event_loop()
self._loop.set_debug(debug)
self._loop.set_exception_handler(self._exception_handler)
asyncio.set_event_loop(self._loop)
def _cancel_all_tasks(self) -> None:
to_cancel = all_tasks(self._loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
self._loop.run_until_complete(
asyncio.gather(*to_cancel, return_exceptions=True)
)
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
raise cast(BaseException, task.exception())
def _exception_handler(
self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
) -> None:
if isinstance(context.get("exception"), Exception):
self._exceptions.append(context["exception"])
else:
loop.default_exception_handler(context)
def _raise_async_exceptions(self) -> None:
# Re-raise any exceptions raised in asynchronous callbacks
if self._exceptions:
exceptions, self._exceptions = self._exceptions, []
if len(exceptions) == 1:
raise exceptions[0]
elif exceptions:
raise ExceptionGroup(exceptions)
def close(self) -> None:
try:
self._cancel_all_tasks()
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
self._loop.close()
def run_asyncgen_fixture(
self,
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
kwargs: dict[str, Any],
) -> Iterable[T_Retval]:
async def fixture_runner() -> None:
agen = fixture_func(**kwargs)
try:
retval = await agen.asend(None)
self._raise_async_exceptions()
except BaseException as exc:
f.set_exception(exc)
return
else:
f.set_result(retval)
await event.wait()
try:
await agen.asend(None)
except StopAsyncIteration:
pass
else:
await agen.aclose()
raise RuntimeError("Async generator fixture did not stop")
f = self._loop.create_future()
event = asyncio.Event()
fixture_task = self._loop.create_task(fixture_runner())
self._loop.run_until_complete(f)
yield f.result()
event.set()
self._loop.run_until_complete(fixture_task)
self._raise_async_exceptions()
def run_fixture(
self,
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
kwargs: dict[str, Any],
) -> T_Retval:
retval = self._loop.run_until_complete(fixture_func(**kwargs))
self._raise_async_exceptions()
return retval
def run_test(
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
) -> None:
try:
self._loop.run_until_complete(test_func(**kwargs))
except Exception as exc:
self._exceptions.append(exc)
self._raise_async_exceptions()
| 67,056 | Python | 30.660529 | 97 | 0.577711 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/exceptions.py | from typing import Any, Dict, Optional, Sequence, Type
from pydantic import BaseModel, ValidationError, create_model
from pydantic.error_wrappers import ErrorList
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.exceptions import WebSocketException as WebSocketException # noqa: F401
class HTTPException(StarletteHTTPException):
def __init__(
self,
status_code: int,
detail: Any = None,
headers: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(status_code=status_code, detail=detail, headers=headers)
RequestErrorModel: Type[BaseModel] = create_model("Request")
WebSocketErrorModel: Type[BaseModel] = create_model("WebSocket")
class FastAPIError(RuntimeError):
"""
A generic, FastAPI-specific error.
"""
class RequestValidationError(ValidationError):
def __init__(self, errors: Sequence[ErrorList], *, body: Any = None) -> None:
self.body = body
super().__init__(errors, RequestErrorModel)
class WebSocketRequestValidationError(ValidationError):
def __init__(self, errors: Sequence[ErrorList]) -> None:
super().__init__(errors, WebSocketErrorModel)
| 1,205 | Python | 30.736841 | 87 | 0.711203 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/datastructures.py | from typing import Any, Callable, Dict, Iterable, Type, TypeVar
from starlette.datastructures import URL as URL # noqa: F401
from starlette.datastructures import Address as Address # noqa: F401
from starlette.datastructures import FormData as FormData # noqa: F401
from starlette.datastructures import Headers as Headers # noqa: F401
from starlette.datastructures import QueryParams as QueryParams # noqa: F401
from starlette.datastructures import State as State # noqa: F401
from starlette.datastructures import UploadFile as StarletteUploadFile
class UploadFile(StarletteUploadFile):
@classmethod
def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]:
yield cls.validate
@classmethod
def validate(cls: Type["UploadFile"], v: Any) -> Any:
if not isinstance(v, StarletteUploadFile):
raise ValueError(f"Expected UploadFile, received: {type(v)}")
return v
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update({"type": "string", "format": "binary"})
class DefaultPlaceholder:
"""
You shouldn't use this class directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
def __init__(self, value: Any):
self.value = value
def __bool__(self) -> bool:
return bool(self.value)
def __eq__(self, o: object) -> bool:
return isinstance(o, DefaultPlaceholder) and o.value == self.value
DefaultType = TypeVar("DefaultType")
def Default(value: DefaultType) -> DefaultType:
"""
You shouldn't use this function directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
return DefaultPlaceholder(value) # type: ignore
| 1,905 | Python | 32.438596 | 85 | 0.700262 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/responses.py | from typing import Any
from starlette.responses import FileResponse as FileResponse # noqa
from starlette.responses import HTMLResponse as HTMLResponse # noqa
from starlette.responses import JSONResponse as JSONResponse # noqa
from starlette.responses import PlainTextResponse as PlainTextResponse # noqa
from starlette.responses import RedirectResponse as RedirectResponse # noqa
from starlette.responses import Response as Response # noqa
from starlette.responses import StreamingResponse as StreamingResponse # noqa
try:
import ujson
except ImportError: # pragma: nocover
ujson = None # type: ignore
try:
import orjson
except ImportError: # pragma: nocover
orjson = None # type: ignore
class UJSONResponse(JSONResponse):
def render(self, content: Any) -> bytes:
assert ujson is not None, "ujson must be installed to use UJSONResponse"
return ujson.dumps(content, ensure_ascii=False).encode("utf-8")
class ORJSONResponse(JSONResponse):
media_type = "application/json"
def render(self, content: Any) -> bytes:
assert orjson is not None, "orjson must be installed to use ORJSONResponse"
return orjson.dumps(
content, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SERIALIZE_NUMPY
)
| 1,279 | Python | 33.594594 | 83 | 0.745895 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/background.py | from starlette.background import BackgroundTasks as BackgroundTasks # noqa
| 76 | Python | 37.499981 | 75 | 0.855263 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/templating.py | from starlette.templating import Jinja2Templates as Jinja2Templates # noqa
| 76 | Python | 37.499981 | 75 | 0.855263 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/__init__.py | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.92.0"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
| 1,080 | Python | 40.576922 | 92 | 0.827778 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/websockets.py | from starlette.websockets import WebSocket as WebSocket # noqa
from starlette.websockets import WebSocketDisconnect as WebSocketDisconnect # noqa
from starlette.websockets import WebSocketState as WebSocketState # noqa
| 222 | Python | 54.749986 | 83 | 0.851351 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/utils.py | import functools
import re
import warnings
from dataclasses import is_dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union, cast
import fastapi
from fastapi.datastructures import DefaultPlaceholder, DefaultType
from fastapi.openapi.constants import REF_PREFIX
from pydantic import BaseConfig, BaseModel, create_model
from pydantic.class_validators import Validator
from pydantic.fields import FieldInfo, ModelField, UndefinedType
from pydantic.schema import model_process_schema
from pydantic.utils import lenient_issubclass
if TYPE_CHECKING: # pragma: nocover
from .routing import APIRoute
def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
if status_code is None:
return True
# Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
if status_code in {
"default",
"1XX",
"2XX",
"3XX",
"4XX",
"5XX",
}:
return True
current_status_code = int(status_code)
return not (current_status_code < 200 or current_status_code in {204, 304})
def get_model_definitions(
*,
flat_models: Set[Union[Type[BaseModel], Type[Enum]]],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Dict[str, Any]:
definitions: Dict[str, Dict[str, Any]] = {}
for model in flat_models:
m_schema, m_definitions, m_nested_models = model_process_schema(
model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
definitions.update(m_definitions)
model_name = model_name_map[model]
if "description" in m_schema:
m_schema["description"] = m_schema["description"].split("\f")[0]
definitions[model_name] = m_schema
return definitions
def get_path_param_names(path: str) -> Set[str]:
return set(re.findall("{(.*?)}", path))
def create_response_field(
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]] = None,
default: Optional[Any] = None,
required: Union[bool, UndefinedType] = True,
model_config: Type[BaseConfig] = BaseConfig,
field_info: Optional[FieldInfo] = None,
alias: Optional[str] = None,
) -> ModelField:
"""
Create a new response field. Raises if type_ is invalid.
"""
class_validators = class_validators or {}
field_info = field_info or FieldInfo()
response_field = functools.partial(
ModelField,
name=name,
type_=type_,
class_validators=class_validators,
default=default,
required=required,
model_config=model_config,
alias=alias,
)
try:
return response_field(field_info=field_info)
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
"Invalid args for response field! Hint: "
f"check that {type_} is a valid Pydantic field type. "
"If you are using a return type annotation that is not a valid Pydantic "
"field (e.g. Union[Response, dict, None]) you can disable generating the "
"response model from the type annotation with the path operation decorator "
"parameter response_model=None. Read more: "
"https://fastapi.tiangolo.com/tutorial/response-model/"
) from None
def create_cloned_field(
field: ModelField,
*,
cloned_types: Optional[Dict[Type[BaseModel], Type[BaseModel]]] = None,
) -> ModelField:
# _cloned_types has already cloned types, to support recursive models
if cloned_types is None:
cloned_types = {}
original_type = field.type_
if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
original_type = original_type.__pydantic_model__
use_type = original_type
if lenient_issubclass(original_type, BaseModel):
original_type = cast(Type[BaseModel], original_type)
use_type = cloned_types.get(original_type)
if use_type is None:
use_type = create_model(original_type.__name__, __base__=original_type)
cloned_types[original_type] = use_type
for f in original_type.__fields__.values():
use_type.__fields__[f.name] = create_cloned_field(
f, cloned_types=cloned_types
)
new_field = create_response_field(name=field.name, type_=use_type)
new_field.has_alias = field.has_alias
new_field.alias = field.alias
new_field.class_validators = field.class_validators
new_field.default = field.default
new_field.required = field.required
new_field.model_config = field.model_config
new_field.field_info = field.field_info
new_field.allow_none = field.allow_none
new_field.validate_always = field.validate_always
if field.sub_fields:
new_field.sub_fields = [
create_cloned_field(sub_field, cloned_types=cloned_types)
for sub_field in field.sub_fields
]
if field.key_field:
new_field.key_field = create_cloned_field(
field.key_field, cloned_types=cloned_types
)
new_field.validators = field.validators
new_field.pre_validators = field.pre_validators
new_field.post_validators = field.post_validators
new_field.parse_json = field.parse_json
new_field.shape = field.shape
new_field.populate_validators()
return new_field
def generate_operation_id_for_path(
*, name: str, path: str, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.utils.generate_operation_id_for_path() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
operation_id = name + path
operation_id = re.sub(r"\W", "_", operation_id)
operation_id = operation_id + "_" + method.lower()
return operation_id
def generate_unique_id(route: "APIRoute") -> str:
operation_id = route.name + route.path_format
operation_id = re.sub(r"\W", "_", operation_id)
assert route.methods
operation_id = operation_id + "_" + list(route.methods)[0].lower()
return operation_id
def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
for key, value in update_dict.items():
if (
key in main_dict
and isinstance(main_dict[key], dict)
and isinstance(value, dict)
):
deep_dict_update(main_dict[key], value)
elif (
key in main_dict
and isinstance(main_dict[key], list)
and isinstance(update_dict[key], list)
):
main_dict[key] = main_dict[key] + update_dict[key]
else:
main_dict[key] = value
def get_value_or_default(
first_item: Union[DefaultPlaceholder, DefaultType],
*extra_items: Union[DefaultPlaceholder, DefaultType],
) -> Union[DefaultPlaceholder, DefaultType]:
"""
Pass items or `DefaultPlaceholder`s by descending priority.
The first one to _not_ be a `DefaultPlaceholder` will be returned.
Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
"""
items = (first_item,) + extra_items
for item in items:
if not isinstance(item, DefaultPlaceholder):
return item
return first_item
| 7,371 | Python | 34.442308 | 102 | 0.646317 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/encoders.py | import dataclasses
from collections import defaultdict
from enum import Enum
from pathlib import PurePath
from types import GeneratorType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
SetIntStr = Set[Union[int, str]]
DictIntStrAny = Dict[Union[int, str], Any]
def generate_encoders_by_class_tuples(
type_encoder_map: Dict[Any, Callable[[Any], Any]]
) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]:
encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(
tuple
)
for type_, encoder in type_encoder_map.items():
encoders_by_class_tuples[encoder] += (type_,)
return encoders_by_class_tuples
encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
def jsonable_encoder(
obj: Any,
include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None,
sqlalchemy_safe: bool = True,
) -> Any:
custom_encoder = custom_encoder or {}
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder_instance in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder_instance(obj)
if include is not None and not isinstance(include, (set, dict)):
include = set(include)
if exclude is not None and not isinstance(exclude, (set, dict)):
exclude = set(exclude)
if isinstance(obj, BaseModel):
encoder = getattr(obj.__config__, "json_encoders", {})
if custom_encoder:
encoder.update(custom_encoder)
obj_dict = obj.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
)
if "__root__" in obj_dict:
obj_dict = obj_dict["__root__"]
return jsonable_encoder(
obj_dict,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
custom_encoder=encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
if dataclasses.is_dataclass(obj):
obj_dict = dataclasses.asdict(obj)
return jsonable_encoder(
obj_dict,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, PurePath):
return str(obj)
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
encoded_dict = {}
allowed_keys = set(obj.keys())
if include is not None:
allowed_keys &= set(include)
if exclude is not None:
allowed_keys -= set(exclude)
for key, value in obj.items():
if (
(
not sqlalchemy_safe
or (not isinstance(key, str))
or (not key.startswith("_sa"))
)
and (value is not None or not exclude_none)
and key in allowed_keys
):
encoded_key = jsonable_encoder(
key,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
encoded_value = jsonable_encoder(
value,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
encoded_dict[encoded_key] = encoded_value
return encoded_dict
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
encoded_list = []
for item in obj:
encoded_list.append(
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
)
return encoded_list
if type(obj) in ENCODERS_BY_TYPE:
return ENCODERS_BY_TYPE[type(obj)](obj)
for encoder, classes_tuple in encoders_by_class_tuples.items():
if isinstance(obj, classes_tuple):
return encoder(obj)
try:
data = dict(obj)
except Exception as e:
errors: List[Exception] = []
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors) from e
return jsonable_encoder(
data,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
| 5,962 | Python | 33.668604 | 88 | 0.559879 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/testclient.py | from starlette.testclient import TestClient as TestClient # noqa
| 66 | Python | 32.499984 | 65 | 0.833333 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/concurrency.py | from contextlib import AsyncExitStack as AsyncExitStack # noqa
from contextlib import asynccontextmanager as asynccontextmanager
from typing import AsyncGenerator, ContextManager, TypeVar
import anyio
from anyio import CapacityLimiter
from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa
from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa
from starlette.concurrency import ( # noqa
run_until_first_complete as run_until_first_complete,
)
_T = TypeVar("_T")
@asynccontextmanager
async def contextmanager_in_threadpool(
cm: ContextManager[_T],
) -> AsyncGenerator[_T, None]:
# blocking __exit__ from running waiting on a free thread
# can create race conditions/deadlocks if the context manager itself
# has it's own internal pool (e.g. a database connection pool)
# to avoid this we let __exit__ run without a capacity limit
# since we're creating a new limiter for each call, any non-zero limit
# works (1 is arbitrary)
exit_limiter = CapacityLimiter(1)
try:
yield await run_in_threadpool(cm.__enter__)
except Exception as e:
ok = bool(
await anyio.to_thread.run_sync(
cm.__exit__, type(e), e, None, limiter=exit_limiter
)
)
if not ok:
raise e
else:
await anyio.to_thread.run_sync(
cm.__exit__, None, None, None, limiter=exit_limiter
)
| 1,468 | Python | 34.829267 | 88 | 0.685286 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/routing.py | import asyncio
import dataclasses
import email.message
import inspect
import json
from contextlib import AsyncExitStack
from enum import Enum, IntEnum
from typing import (
Any,
Callable,
Coroutine,
Dict,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
from fastapi import params
from fastapi.datastructures import Default, DefaultPlaceholder
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
get_typed_return_annotation,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.types import DecoratedCallable
from fastapi.utils import (
create_cloned_field,
create_response_field,
generate_unique_id,
get_value_or_default,
is_body_allowed_for_status_code,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import ModelField, Undefined
from pydantic.utils import lenient_issubclass
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import BaseRoute, Match
from starlette.routing import Mount as Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp, Scope
from starlette.websockets import WebSocket
def _prepare_response_content(
res: Any,
*,
exclude_unset: bool,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Any:
if isinstance(res, BaseModel):
read_with_orm_mode = getattr(res.__config__, "read_with_orm_mode", None)
if read_with_orm_mode:
# Let from_orm extract the data from this model instead of converting
# it now to a dict.
# Otherwise there's no way to extract lazy data that requires attribute
# access instead of dict iteration, e.g. lazy relationships.
return res
return res.dict(
by_alias=True,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
elif isinstance(res, list):
return [
_prepare_response_content(
item,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for item in res
]
elif isinstance(res, dict):
return {
k: _prepare_response_content(
v,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for k, v in res.items()
}
elif dataclasses.is_dataclass(res):
return dataclasses.asdict(res)
return res
async def serialize_response(
*,
field: Optional[ModelField] = None,
response_content: Any,
include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
response_content,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
else:
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
else:
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: Optional[ModelField] = None,
status_code: Optional[int] = None,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(JSONResponse),
response_field: Optional[ModelField] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
dependency_overrides_provider: Optional[Any] = None,
) -> Callable[[Request], Coroutine[Any, Any, Response]]:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.field_info, params.Form)
if isinstance(response_class, DefaultPlaceholder):
actual_response_class: Type[Response] = response_class.value
else:
actual_response_class = response_class
async def app(request: Request) -> Response:
try:
body: Any = None
if body_field:
if is_body_form:
body = await request.form()
stack = request.scope.get("fastapi_astack")
assert isinstance(stack, AsyncExitStack)
stack.push_async_callback(body.close)
else:
body_bytes = await request.body()
if body_bytes:
json_body: Any = Undefined
content_type_value = request.headers.get("content-type")
if not content_type_value:
json_body = await request.json()
else:
message = email.message.Message()
message["content-type"] = content_type_value
if message.get_content_maintype() == "application":
subtype = message.get_content_subtype()
if subtype == "json" or subtype.endswith("+json"):
json_body = await request.json()
if json_body != Undefined:
body = json_body
else:
body = body_bytes
except json.JSONDecodeError as e:
raise RequestValidationError(
[ErrorWrapper(e, ("body", e.pos))], body=e.doc
) from e
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
else:
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_args: Dict[str, Any] = {"background": background_tasks}
# If status_code was set, use it, otherwise use the default from the
# response class, in the case of redirect it's 307
current_status_code = (
status_code if status_code else sub_response.status_code
)
if current_status_code is not None:
response_args["status_code"] = current_status_code
if sub_response.status_code:
response_args["status_code"] = sub_response.status_code
content = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
exclude_defaults=response_model_exclude_defaults,
exclude_none=response_model_exclude_none,
is_coroutine=is_coroutine,
)
response = actual_response_class(content, **response_args)
if not is_body_allowed_for_status_code(response.status_code):
response.body = b""
response.headers.raw.extend(sub_response.headers.raw)
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Optional[Any] = None
) -> Callable[[WebSocket], Coroutine[Any, Any, Any]]:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable[..., Any],
*,
name: Optional[str] = None,
dependency_overrides_provider: Optional[Any] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
def matches(self, scope: Scope) -> Tuple[Match, Scope]:
match, child_scope = super().matches(scope)
if match != Match.NONE:
child_scope["route"] = self
return match, child_scope
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable[..., Any],
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
name: Optional[str] = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
dependency_overrides_provider: Optional[Any] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Union[
Callable[["APIRoute"], str], DefaultPlaceholder
] = Default(generate_unique_id),
) -> None:
self.path = path
self.endpoint = endpoint
if isinstance(response_model, DefaultPlaceholder):
return_annotation = get_typed_return_annotation(endpoint)
if lenient_issubclass(return_annotation, Response):
response_model = None
else:
response_model = return_annotation
self.response_model = response_model
self.summary = summary
self.response_description = response_description
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.response_model_exclude_defaults = response_model_exclude_defaults
self.response_model_exclude_none = response_model_exclude_none
self.include_in_schema = include_in_schema
self.response_class = response_class
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.openapi_extra = openapi_extra
self.generate_unique_id_function = generate_unique_id_function
self.tags = tags or []
self.responses = responses or {}
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods: Set[str] = {method.upper() for method in methods}
if isinstance(generate_unique_id_function, DefaultPlaceholder):
current_generate_unique_id: Callable[
["APIRoute"], str
] = generate_unique_id_function.value
else:
current_generate_unique_id = generate_unique_id_function
self.unique_id = self.operation_id or current_generate_unique_id(self)
# normalize enums e.g. http.HTTPStatus
if isinstance(status_code, IntEnum):
status_code = int(status_code)
self.status_code = status_code
if self.response_model:
assert is_body_allowed_for_status_code(
status_code
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0].strip()
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert is_body_allowed_for_status_code(
additional_status_code
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
assert callable(endpoint), "An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
response_model_exclude_defaults=self.response_model_exclude_defaults,
response_model_exclude_none=self.response_model_exclude_none,
dependency_overrides_provider=self.dependency_overrides_provider,
)
def matches(self, scope: Scope) -> Tuple[Match, Scope]:
match, child_scope = super().matches(scope)
if match != Match.NONE:
child_scope["route"] = self
return match, child_scope
class APIRouter(routing.Router):
def __init__(
self,
*,
prefix: str = "",
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
routes: Optional[List[routing.BaseRoute]] = None,
redirect_slashes: bool = True,
default: Optional[ASGIApp] = None,
dependency_overrides_provider: Optional[Any] = None,
route_class: Type[APIRoute] = APIRoute,
on_startup: Optional[Sequence[Callable[[], Any]]] = None,
on_shutdown: Optional[Sequence[Callable[[], Any]]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> None:
super().__init__(
routes=routes,
redirect_slashes=redirect_slashes,
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
self.prefix = prefix
self.tags: List[Union[str, Enum]] = tags or []
self.dependencies = list(dependencies or []) or []
self.deprecated = deprecated
self.include_in_schema = include_in_schema
self.responses = responses or {}
self.callbacks = callbacks or []
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
self.generate_unique_id_function = generate_unique_id_function
def route(
self,
path: str,
methods: Optional[List[str]] = None,
name: Optional[str] = None,
include_in_schema: bool = True,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_route(
path,
func,
methods=methods,
name=name,
include_in_schema=include_in_schema,
)
return func
return decorator
def add_api_route(
self,
path: str,
endpoint: Callable[..., Any],
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
name: Optional[str] = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Union[
Callable[[APIRoute], str], DefaultPlaceholder
] = Default(generate_unique_id),
) -> None:
route_class = route_class_override or self.route_class
responses = responses or {}
combined_responses = {**self.responses, **responses}
current_response_class = get_value_or_default(
response_class, self.default_response_class
)
current_tags = self.tags.copy()
if tags:
current_tags.extend(tags)
current_dependencies = self.dependencies.copy()
if dependencies:
current_dependencies.extend(dependencies)
current_callbacks = self.callbacks.copy()
if callbacks:
current_callbacks.extend(callbacks)
current_generate_unique_id = get_value_or_default(
generate_unique_id_function, self.generate_unique_id_function
)
route = route_class(
self.prefix + path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=current_tags,
dependencies=current_dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=combined_responses,
deprecated=deprecated or self.deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema and self.include_in_schema,
response_class=current_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=current_callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=current_generate_unique_id,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None
) -> None:
route = APIWebSocketRoute(
self.prefix + path,
endpoint=endpoint,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def websocket(
self, path: str, name: Optional[str] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def websocket_route(
self, path: str, name: Union[str, None] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path") # noqa: B009
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
use_response_class = get_value_or_default(
route.response_class,
router.default_response_class,
default_response_class,
self.default_response_class,
)
current_tags = []
if tags:
current_tags.extend(tags)
if route.tags:
current_tags.extend(route.tags)
current_dependencies: List[params.Depends] = []
if dependencies:
current_dependencies.extend(dependencies)
if route.dependencies:
current_dependencies.extend(route.dependencies)
current_callbacks = []
if callbacks:
current_callbacks.extend(callbacks)
if route.callbacks:
current_callbacks.extend(route.callbacks)
current_generate_unique_id = get_value_or_default(
route.generate_unique_id_function,
router.generate_unique_id_function,
generate_unique_id_function,
self.generate_unique_id_function,
)
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=current_tags,
dependencies=current_dependencies,
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated or deprecated or self.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
response_model_exclude_defaults=route.response_model_exclude_defaults,
response_model_exclude_none=route.response_model_exclude_none,
include_in_schema=route.include_in_schema
and self.include_in_schema
and include_in_schema,
response_class=use_response_class,
name=route.name,
route_class_override=type(route),
callbacks=current_callbacks,
openapi_extra=route.openapi_extra,
generate_unique_id_function=current_generate_unique_id,
)
elif isinstance(route, routing.Route):
methods = list(route.methods or [])
self.add_route(
prefix + route.path,
route.endpoint,
methods=methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def put(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def post(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def delete(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def options(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def head(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def patch(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def trace(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def on_event(
self, event_type: str
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_event_handler(event_type, func)
return func
return decorator
| 55,317 | Python | 41.982129 | 90 | 0.603612 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/types.py | from typing import Any, Callable, TypeVar
DecoratedCallable = TypeVar("DecoratedCallable", bound=Callable[..., Any])
| 118 | Python | 28.749993 | 74 | 0.771186 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/staticfiles.py | from starlette.staticfiles import StaticFiles as StaticFiles # noqa
| 69 | Python | 33.999983 | 68 | 0.84058 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/requests.py | from starlette.requests import HTTPConnection as HTTPConnection # noqa: F401
from starlette.requests import Request as Request # noqa: F401
| 142 | Python | 46.666651 | 77 | 0.816901 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/params.py | from enum import Enum
from typing import Any, Callable, Dict, Optional, Sequence
from pydantic.fields import FieldInfo, Undefined
class ParamTypes(Enum):
query = "query"
header = "header"
path = "path"
cookie = "cookie"
class Param(FieldInfo):
in_: ParamTypes
def __init__(
self,
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
):
self.deprecated = deprecated
self.example = example
self.examples = examples
self.include_in_schema = include_in_schema
super().__init__(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
**extra,
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.default})"
class Path(Param):
in_ = ParamTypes.path
def __init__(
self,
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
):
self.in_ = self.in_
super().__init__(
default=...,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
deprecated=deprecated,
example=example,
examples=examples,
include_in_schema=include_in_schema,
**extra,
)
class Query(Param):
in_ = ParamTypes.query
def __init__(
self,
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
):
super().__init__(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
deprecated=deprecated,
example=example,
examples=examples,
include_in_schema=include_in_schema,
**extra,
)
class Header(Param):
in_ = ParamTypes.header
def __init__(
self,
default: Any = Undefined,
*,
alias: Optional[str] = None,
convert_underscores: bool = True,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
):
self.convert_underscores = convert_underscores
super().__init__(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
deprecated=deprecated,
example=example,
examples=examples,
include_in_schema=include_in_schema,
**extra,
)
class Cookie(Param):
in_ = ParamTypes.cookie
def __init__(
self,
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
):
super().__init__(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
deprecated=deprecated,
example=example,
examples=examples,
include_in_schema=include_in_schema,
**extra,
)
class Body(FieldInfo):
def __init__(
self,
default: Any = Undefined,
*,
embed: bool = False,
media_type: str = "application/json",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
):
self.embed = embed
self.media_type = media_type
self.example = example
self.examples = examples
super().__init__(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
**extra,
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.default})"
class Form(Body):
def __init__(
self,
default: Any,
*,
media_type: str = "application/x-www-form-urlencoded",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
):
super().__init__(
default=default,
embed=True,
media_type=media_type,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
**extra,
)
class File(Form):
def __init__(
self,
default: Any,
*,
media_type: str = "multipart/form-data",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
):
super().__init__(
default=default,
media_type=media_type,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
**extra,
)
class Depends:
def __init__(
self, dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True
):
self.dependency = dependency
self.use_cache = use_cache
def __repr__(self) -> str:
attr = getattr(self.dependency, "__name__", type(self.dependency).__name__)
cache = "" if self.use_cache else ", use_cache=False"
return f"{self.__class__.__name__}({attr}{cache})"
class Security(Depends):
def __init__(
self,
dependency: Optional[Callable[..., Any]] = None,
*,
scopes: Optional[Sequence[str]] = None,
use_cache: bool = True,
):
super().__init__(dependency=dependency, use_cache=use_cache)
self.scopes = scopes or []
| 10,600 | Python | 26.824147 | 88 | 0.501604 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/exception_handlers.py | from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
from fastapi.utils import is_body_allowed_for_status_code
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_exception_handler(request: Request, exc: HTTPException) -> Response:
headers = getattr(exc, "headers", None)
if not is_body_allowed_for_status_code(exc.status_code):
return Response(status_code=exc.status_code, headers=headers)
return JSONResponse(
{"detail": exc.detail}, status_code=exc.status_code, headers=headers
)
async def request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content={"detail": jsonable_encoder(exc.errors())},
)
| 988 | Python | 37.03846 | 83 | 0.763158 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/param_functions.py | from typing import Any, Callable, Dict, Optional, Sequence
from fastapi import params
from pydantic.fields import Undefined
def Path( # noqa: N802
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
) -> Any:
return params.Path(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra,
)
def Query( # noqa: N802
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
) -> Any:
return params.Query(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra,
)
def Header( # noqa: N802
default: Any = Undefined,
*,
alias: Optional[str] = None,
convert_underscores: bool = True,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
) -> Any:
return params.Header(
default=default,
alias=alias,
convert_underscores=convert_underscores,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra,
)
def Cookie( # noqa: N802
default: Any = Undefined,
*,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any,
) -> Any:
return params.Cookie(
default=default,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra,
)
def Body( # noqa: N802
default: Any = Undefined,
*,
embed: bool = False,
media_type: str = "application/json",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
) -> Any:
return params.Body(
default=default,
embed=embed,
media_type=media_type,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
**extra,
)
def Form( # noqa: N802
default: Any = Undefined,
*,
media_type: str = "application/x-www-form-urlencoded",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
) -> Any:
return params.Form(
default=default,
media_type=media_type,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
**extra,
)
def File( # noqa: N802
default: Any = Undefined,
*,
media_type: str = "multipart/form-data",
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
regex: Optional[str] = None,
example: Any = Undefined,
examples: Optional[Dict[str, Any]] = None,
**extra: Any,
) -> Any:
return params.File(
default=default,
media_type=media_type,
alias=alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
regex=regex,
example=example,
examples=examples,
**extra,
)
def Depends( # noqa: N802
dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True
) -> Any:
return params.Depends(dependency=dependency, use_cache=use_cache)
def Security( # noqa: N802
dependency: Optional[Callable[..., Any]] = None,
*,
scopes: Optional[Sequence[str]] = None,
use_cache: bool = True,
) -> Any:
return params.Security(dependency=dependency, scopes=scopes, use_cache=use_cache)
| 7,521 | Python | 24.848797 | 85 | 0.578381 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/applications.py | from enum import Enum
from typing import (
Any,
Awaitable,
Callable,
Coroutine,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from fastapi import routing
from fastapi.datastructures import Default, DefaultPlaceholder
from fastapi.encoders import DictIntStrAny, SetIntStr
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from fastapi.logger import logger
from fastapi.middleware.asyncexitstack import AsyncExitStackMiddleware
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.openapi.utils import get_openapi
from fastapi.params import Depends
from fastapi.types import DecoratedCallable
from fastapi.utils import generate_unique_id
from starlette.applications import Starlette
from starlette.datastructures import State
from starlette.exceptions import HTTPException
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.errors import ServerErrorMiddleware
from starlette.middleware.exceptions import ExceptionMiddleware
from starlette.requests import Request
from starlette.responses import HTMLResponse, JSONResponse, Response
from starlette.routing import BaseRoute
from starlette.types import ASGIApp, Receive, Scope, Send
class FastAPI(Starlette):
def __init__(
self,
*,
debug: bool = False,
routes: Optional[List[BaseRoute]] = None,
title: str = "FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[Dict[str, Any]] = None,
middleware: Optional[Sequence[Middleware]] = None,
exception_handlers: Optional[
Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
]
] = None,
on_startup: Optional[Sequence[Callable[[], Any]]] = None,
on_shutdown: Optional[Sequence[Callable[[], Any]]] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
openapi_prefix: str = "",
root_path: str = "",
root_path_in_servers: bool = True,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
swagger_ui_parameters: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
**extra: Any,
) -> None:
self.debug = debug
self.title = title
self.description = description
self.version = version
self.terms_of_service = terms_of_service
self.contact = contact
self.license_info = license_info
self.openapi_url = openapi_url
self.openapi_tags = openapi_tags
self.root_path_in_servers = root_path_in_servers
self.docs_url = docs_url
self.redoc_url = redoc_url
self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url
self.swagger_ui_init_oauth = swagger_ui_init_oauth
self.swagger_ui_parameters = swagger_ui_parameters
self.servers = servers or []
self.extra = extra
self.openapi_version = "3.0.2"
self.openapi_schema: Optional[Dict[str, Any]] = None
if self.openapi_url:
assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'"
assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'"
# TODO: remove when discarding the openapi_prefix parameter
if openapi_prefix:
logger.warning(
'"openapi_prefix" has been deprecated in favor of "root_path", which '
"follows more closely the ASGI standard, is simpler, and more "
"automatic. Check the docs at "
"https://fastapi.tiangolo.com/advanced/sub-applications/"
)
self.root_path = root_path or openapi_prefix
self.state: State = State()
self.dependency_overrides: Dict[Callable[..., Any], Callable[..., Any]] = {}
self.router: routing.APIRouter = routing.APIRouter(
routes=routes,
dependency_overrides_provider=self,
on_startup=on_startup,
on_shutdown=on_shutdown,
default_response_class=default_response_class,
dependencies=dependencies,
callbacks=callbacks,
deprecated=deprecated,
include_in_schema=include_in_schema,
responses=responses,
generate_unique_id_function=generate_unique_id_function,
)
self.exception_handlers: Dict[
Any, Callable[[Request, Any], Union[Response, Awaitable[Response]]]
] = ({} if exception_handlers is None else dict(exception_handlers))
self.exception_handlers.setdefault(HTTPException, http_exception_handler)
self.exception_handlers.setdefault(
RequestValidationError, request_validation_exception_handler
)
self.user_middleware: List[Middleware] = (
[] if middleware is None else list(middleware)
)
self.middleware_stack: Union[ASGIApp, None] = None
self.setup()
def build_middleware_stack(self) -> ASGIApp:
# Duplicate/override from Starlette to add AsyncExitStackMiddleware
# inside of ExceptionMiddleware, inside of custom user middlewares
debug = self.debug
error_handler = None
exception_handlers = {}
for key, value in self.exception_handlers.items():
if key in (500, Exception):
error_handler = value
else:
exception_handlers[key] = value
middleware = (
[Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]
+ self.user_middleware
+ [
Middleware(
ExceptionMiddleware, handlers=exception_handlers, debug=debug
),
# Add FastAPI-specific AsyncExitStackMiddleware for dependencies with
# contextvars.
# This needs to happen after user middlewares because those create a
# new contextvars context copy by using a new AnyIO task group.
# The initial part of dependencies with yield is executed in the
# FastAPI code, inside all the middlewares, but the teardown part
# (after yield) is executed in the AsyncExitStack in this middleware,
# if the AsyncExitStack lived outside of the custom middlewares and
# contextvars were set in a dependency with yield in that internal
# contextvars context, the values would not be available in the
# outside context of the AsyncExitStack.
# By putting the middleware and the AsyncExitStack here, inside all
# user middlewares, the code before and after yield in dependencies
# with yield is executed in the same contextvars context, so all values
# set in contextvars before yield is still available after yield as
# would be expected.
# Additionally, by having this AsyncExitStack here, after the
# ExceptionMiddleware, now dependencies can catch handled exceptions,
# e.g. HTTPException, to customize the teardown code (e.g. DB session
# rollback).
Middleware(AsyncExitStackMiddleware),
]
)
app = self.router
for cls, options in reversed(middleware):
app = cls(app=app, **options)
return app
def openapi(self) -> Dict[str, Any]:
if not self.openapi_schema:
self.openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
description=self.description,
terms_of_service=self.terms_of_service,
contact=self.contact,
license_info=self.license_info,
routes=self.routes,
tags=self.openapi_tags,
servers=self.servers,
)
return self.openapi_schema
def setup(self) -> None:
if self.openapi_url:
urls = (server_data.get("url") for server_data in self.servers)
server_urls = {url for url in urls if url}
async def openapi(req: Request) -> JSONResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
if root_path not in server_urls:
if root_path and self.root_path_in_servers:
self.servers.insert(0, {"url": root_path})
server_urls.add(root_path)
return JSONResponse(self.openapi())
self.add_route(self.openapi_url, openapi, include_in_schema=False)
if self.openapi_url and self.docs_url:
async def swagger_ui_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url
if oauth2_redirect_url:
oauth2_redirect_url = root_path + oauth2_redirect_url
return get_swagger_ui_html(
openapi_url=openapi_url,
title=self.title + " - Swagger UI",
oauth2_redirect_url=oauth2_redirect_url,
init_oauth=self.swagger_ui_init_oauth,
swagger_ui_parameters=self.swagger_ui_parameters,
)
self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False)
if self.swagger_ui_oauth2_redirect_url:
async def swagger_ui_redirect(req: Request) -> HTMLResponse:
return get_swagger_ui_oauth2_redirect_html()
self.add_route(
self.swagger_ui_oauth2_redirect_url,
swagger_ui_redirect,
include_in_schema=False,
)
if self.openapi_url and self.redoc_url:
async def redoc_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
return get_redoc_html(
openapi_url=openapi_url, title=self.title + " - ReDoc"
)
self.add_route(self.redoc_url, redoc_html, include_in_schema=False)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if self.root_path:
scope["root_path"] = self.root_path
await super().__call__(scope, receive, send)
def add_api_route(
self,
path: str,
endpoint: Callable[..., Coroutine[Any, Any, Response]],
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
name: Optional[str] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> None:
self.router.add_api_route(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def api_route(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.router.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None
) -> None:
self.router.add_api_websocket_route(path, endpoint, name=name)
def websocket(
self, path: str, name: Optional[str] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: routing.APIRouter,
*,
prefix: str = "",
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
default_response_class: Type[Response] = Default(JSONResponse),
callbacks: Optional[List[BaseRoute]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> None:
self.router.include_router(
router,
prefix=prefix,
tags=tags,
dependencies=dependencies,
responses=responses,
deprecated=deprecated,
include_in_schema=include_in_schema,
default_response_class=default_response_class,
callbacks=callbacks,
generate_unique_id_function=generate_unique_id_function,
)
def get(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.get(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def put(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.put(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def post(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.post(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def delete(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.delete(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def options(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.options(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def head(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.head(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def patch(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.patch(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def trace(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.trace(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def websocket_route(
self, path: str, name: Union[str, None] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.router.add_websocket_route(path, func, name=name)
return func
return decorator
def on_event(
self, event_type: str
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.router.on_event(event_type)
def middleware(
self, middleware_type: str
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_middleware(BaseHTTPMiddleware, dispatch=func)
return func
return decorator
def exception_handler(
self, exc_class_or_status_code: Union[int, Type[Exception]]
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_exception_handler(exc_class_or_status_code, func)
return func
return decorator
| 39,361 | Python | 42.445916 | 88 | 0.611875 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/logger.py | import logging
logger = logging.getLogger("fastapi")
| 54 | Python | 12.749997 | 37 | 0.777778 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/dependencies/utils.py | import dataclasses
import inspect
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Any,
Callable,
Coroutine,
Dict,
ForwardRef,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import anyio
from fastapi import params
from fastapi.concurrency import (
AsyncExitStack,
asynccontextmanager,
contextmanager_in_threadpool,
)
from fastapi.dependencies.models import Dependant, SecurityRequirement
from fastapi.logger import logger
from fastapi.security.base import SecurityBase
from fastapi.security.oauth2 import OAuth2, SecurityScopes
from fastapi.security.open_id_connect_url import OpenIdConnect
from fastapi.utils import create_response_field, get_path_param_names
from pydantic import BaseModel, create_model
from pydantic.error_wrappers import ErrorWrapper
from pydantic.errors import MissingError
from pydantic.fields import (
SHAPE_FROZENSET,
SHAPE_LIST,
SHAPE_SEQUENCE,
SHAPE_SET,
SHAPE_SINGLETON,
SHAPE_TUPLE,
SHAPE_TUPLE_ELLIPSIS,
FieldInfo,
ModelField,
Required,
Undefined,
)
from pydantic.schema import get_annotation_from_field_info
from pydantic.typing import evaluate_forwardref
from pydantic.utils import lenient_issubclass
from starlette.background import BackgroundTasks
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import FormData, Headers, QueryParams, UploadFile
from starlette.requests import HTTPConnection, Request
from starlette.responses import Response
from starlette.websockets import WebSocket
sequence_shapes = {
SHAPE_LIST,
SHAPE_SET,
SHAPE_FROZENSET,
SHAPE_TUPLE,
SHAPE_SEQUENCE,
SHAPE_TUPLE_ELLIPSIS,
}
sequence_types = (list, set, tuple)
sequence_shape_to_type = {
SHAPE_LIST: list,
SHAPE_SET: set,
SHAPE_TUPLE: tuple,
SHAPE_SEQUENCE: list,
SHAPE_TUPLE_ELLIPSIS: list,
}
multipart_not_installed_error = (
'Form data requires "python-multipart" to be installed. \n'
'You can install "python-multipart" with: \n\n'
"pip install python-multipart\n"
)
multipart_incorrect_install_error = (
'Form data requires "python-multipart" to be installed. '
'It seems you installed "multipart" instead. \n'
'You can remove "multipart" with: \n\n'
"pip uninstall multipart\n\n"
'And then install "python-multipart" with: \n\n'
"pip install python-multipart\n"
)
def check_file_field(field: ModelField) -> None:
field_info = field.field_info
if isinstance(field_info, params.Form):
try:
# __version__ is available in both multiparts, and can be mocked
from multipart import __version__ # type: ignore
assert __version__
try:
# parse_options_header is only available in the right multipart
from multipart.multipart import parse_options_header # type: ignore
assert parse_options_header
except ImportError:
logger.error(multipart_incorrect_install_error)
raise RuntimeError(multipart_incorrect_install_error) from None
except ImportError:
logger.error(multipart_not_installed_error)
raise RuntimeError(multipart_not_installed_error) from None
def get_param_sub_dependant(
*, param: inspect.Parameter, path: str, security_scopes: Optional[List[str]] = None
) -> Dependant:
depends: params.Depends = param.default
if depends.dependency:
dependency = depends.dependency
else:
dependency = param.annotation
return get_sub_dependant(
depends=depends,
dependency=dependency,
path=path,
name=param.name,
security_scopes=security_scopes,
)
def get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:
assert callable(
depends.dependency
), "A parameter-less dependency must have a callable dependency"
return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)
def get_sub_dependant(
*,
depends: params.Depends,
dependency: Callable[..., Any],
path: str,
name: Optional[str] = None,
security_scopes: Optional[List[str]] = None,
) -> Dependant:
security_requirement = None
security_scopes = security_scopes or []
if isinstance(depends, params.Security):
dependency_scopes = depends.scopes
security_scopes.extend(dependency_scopes)
if isinstance(dependency, SecurityBase):
use_scopes: List[str] = []
if isinstance(dependency, (OAuth2, OpenIdConnect)):
use_scopes = security_scopes
security_requirement = SecurityRequirement(
security_scheme=dependency, scopes=use_scopes
)
sub_dependant = get_dependant(
path=path,
call=dependency,
name=name,
security_scopes=security_scopes,
use_cache=depends.use_cache,
)
if security_requirement:
sub_dependant.security_requirements.append(security_requirement)
return sub_dependant
CacheKey = Tuple[Optional[Callable[..., Any]], Tuple[str, ...]]
def get_flat_dependant(
dependant: Dependant,
*,
skip_repeats: bool = False,
visited: Optional[List[CacheKey]] = None,
) -> Dependant:
if visited is None:
visited = []
visited.append(dependant.cache_key)
flat_dependant = Dependant(
path_params=dependant.path_params.copy(),
query_params=dependant.query_params.copy(),
header_params=dependant.header_params.copy(),
cookie_params=dependant.cookie_params.copy(),
body_params=dependant.body_params.copy(),
security_schemes=dependant.security_requirements.copy(),
use_cache=dependant.use_cache,
path=dependant.path,
)
for sub_dependant in dependant.dependencies:
if skip_repeats and sub_dependant.cache_key in visited:
continue
flat_sub = get_flat_dependant(
sub_dependant, skip_repeats=skip_repeats, visited=visited
)
flat_dependant.path_params.extend(flat_sub.path_params)
flat_dependant.query_params.extend(flat_sub.query_params)
flat_dependant.header_params.extend(flat_sub.header_params)
flat_dependant.cookie_params.extend(flat_sub.cookie_params)
flat_dependant.body_params.extend(flat_sub.body_params)
flat_dependant.security_requirements.extend(flat_sub.security_requirements)
return flat_dependant
def get_flat_params(dependant: Dependant) -> List[ModelField]:
flat_dependant = get_flat_dependant(dependant, skip_repeats=True)
return (
flat_dependant.path_params
+ flat_dependant.query_params
+ flat_dependant.header_params
+ flat_dependant.cookie_params
)
def is_scalar_field(field: ModelField) -> bool:
field_info = field.field_info
if not (
field.shape == SHAPE_SINGLETON
and not lenient_issubclass(field.type_, BaseModel)
and not lenient_issubclass(field.type_, sequence_types + (dict,))
and not dataclasses.is_dataclass(field.type_)
and not isinstance(field_info, params.Body)
):
return False
if field.sub_fields:
if not all(is_scalar_field(f) for f in field.sub_fields):
return False
return True
def is_scalar_sequence_field(field: ModelField) -> bool:
if (field.shape in sequence_shapes) and not lenient_issubclass(
field.type_, BaseModel
):
if field.sub_fields is not None:
for sub_field in field.sub_fields:
if not is_scalar_field(sub_field):
return False
return True
if lenient_issubclass(field.type_, sequence_types):
return True
return False
def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
signature = inspect.signature(call)
globalns = getattr(call, "__globals__", {})
typed_params = [
inspect.Parameter(
name=param.name,
kind=param.kind,
default=param.default,
annotation=get_typed_annotation(param.annotation, globalns),
)
for param in signature.parameters.values()
]
typed_signature = inspect.Signature(typed_params)
return typed_signature
def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any:
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
annotation = evaluate_forwardref(annotation, globalns, globalns)
return annotation
def get_typed_return_annotation(call: Callable[..., Any]) -> Any:
signature = inspect.signature(call)
annotation = signature.return_annotation
if annotation is inspect.Signature.empty:
return None
globalns = getattr(call, "__globals__", {})
return get_typed_annotation(annotation, globalns)
def get_dependant(
*,
path: str,
call: Callable[..., Any],
name: Optional[str] = None,
security_scopes: Optional[List[str]] = None,
use_cache: bool = True,
) -> Dependant:
path_param_names = get_path_param_names(path)
endpoint_signature = get_typed_signature(call)
signature_params = endpoint_signature.parameters
dependant = Dependant(
call=call,
name=name,
path=path,
security_scopes=security_scopes,
use_cache=use_cache,
)
for param_name, param in signature_params.items():
if isinstance(param.default, params.Depends):
sub_dependant = get_param_sub_dependant(
param=param, path=path, security_scopes=security_scopes
)
dependant.dependencies.append(sub_dependant)
continue
if add_non_field_param_to_dependency(param=param, dependant=dependant):
continue
param_field = get_param_field(
param=param, default_field_info=params.Query, param_name=param_name
)
if param_name in path_param_names:
assert is_scalar_field(
field=param_field
), "Path params must be of one of the supported types"
ignore_default = not isinstance(param.default, params.Path)
param_field = get_param_field(
param=param,
param_name=param_name,
default_field_info=params.Path,
force_type=params.ParamTypes.path,
ignore_default=ignore_default,
)
add_param_to_fields(field=param_field, dependant=dependant)
elif is_scalar_field(field=param_field):
add_param_to_fields(field=param_field, dependant=dependant)
elif isinstance(
param.default, (params.Query, params.Header)
) and is_scalar_sequence_field(param_field):
add_param_to_fields(field=param_field, dependant=dependant)
else:
field_info = param_field.field_info
assert isinstance(
field_info, params.Body
), f"Param: {param_field.name} can only be a request body, using Body()"
dependant.body_params.append(param_field)
return dependant
def add_non_field_param_to_dependency(
*, param: inspect.Parameter, dependant: Dependant
) -> Optional[bool]:
if lenient_issubclass(param.annotation, Request):
dependant.request_param_name = param.name
return True
elif lenient_issubclass(param.annotation, WebSocket):
dependant.websocket_param_name = param.name
return True
elif lenient_issubclass(param.annotation, HTTPConnection):
dependant.http_connection_param_name = param.name
return True
elif lenient_issubclass(param.annotation, Response):
dependant.response_param_name = param.name
return True
elif lenient_issubclass(param.annotation, BackgroundTasks):
dependant.background_tasks_param_name = param.name
return True
elif lenient_issubclass(param.annotation, SecurityScopes):
dependant.security_scopes_param_name = param.name
return True
return None
def get_param_field(
*,
param: inspect.Parameter,
param_name: str,
default_field_info: Type[params.Param] = params.Param,
force_type: Optional[params.ParamTypes] = None,
ignore_default: bool = False,
) -> ModelField:
default_value: Any = Undefined
had_schema = False
if not param.default == param.empty and ignore_default is False:
default_value = param.default
if isinstance(default_value, FieldInfo):
had_schema = True
field_info = default_value
default_value = field_info.default
if (
isinstance(field_info, params.Param)
and getattr(field_info, "in_", None) is None
):
field_info.in_ = default_field_info.in_
if force_type:
field_info.in_ = force_type # type: ignore
else:
field_info = default_field_info(default=default_value)
required = True
if default_value is Required or ignore_default:
required = True
default_value = None
elif default_value is not Undefined:
required = False
annotation: Any = Any
if not param.annotation == param.empty:
annotation = param.annotation
annotation = get_annotation_from_field_info(annotation, field_info, param_name)
if not field_info.alias and getattr(field_info, "convert_underscores", None):
alias = param.name.replace("_", "-")
else:
alias = field_info.alias or param.name
field = create_response_field(
name=param.name,
type_=annotation,
default=default_value,
alias=alias,
required=required,
field_info=field_info,
)
if not had_schema and not is_scalar_field(field=field):
field.field_info = params.Body(field_info.default)
if not had_schema and lenient_issubclass(field.type_, UploadFile):
field.field_info = params.File(field_info.default)
return field
def add_param_to_fields(*, field: ModelField, dependant: Dependant) -> None:
field_info = cast(params.Param, field.field_info)
if field_info.in_ == params.ParamTypes.path:
dependant.path_params.append(field)
elif field_info.in_ == params.ParamTypes.query:
dependant.query_params.append(field)
elif field_info.in_ == params.ParamTypes.header:
dependant.header_params.append(field)
else:
assert (
field_info.in_ == params.ParamTypes.cookie
), f"non-body parameters must be in path, query, header or cookie: {field.name}"
dependant.cookie_params.append(field)
def is_coroutine_callable(call: Callable[..., Any]) -> bool:
if inspect.isroutine(call):
return inspect.iscoroutinefunction(call)
if inspect.isclass(call):
return False
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.iscoroutinefunction(dunder_call)
def is_async_gen_callable(call: Callable[..., Any]) -> bool:
if inspect.isasyncgenfunction(call):
return True
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.isasyncgenfunction(dunder_call)
def is_gen_callable(call: Callable[..., Any]) -> bool:
if inspect.isgeneratorfunction(call):
return True
dunder_call = getattr(call, "__call__", None) # noqa: B004
return inspect.isgeneratorfunction(dunder_call)
async def solve_generator(
*, call: Callable[..., Any], stack: AsyncExitStack, sub_values: Dict[str, Any]
) -> Any:
if is_gen_callable(call):
cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))
elif is_async_gen_callable(call):
cm = asynccontextmanager(call)(**sub_values)
return await stack.enter_async_context(cm)
async def solve_dependencies(
*,
request: Union[Request, WebSocket],
dependant: Dependant,
body: Optional[Union[Dict[str, Any], FormData]] = None,
background_tasks: Optional[BackgroundTasks] = None,
response: Optional[Response] = None,
dependency_overrides_provider: Optional[Any] = None,
dependency_cache: Optional[Dict[Tuple[Callable[..., Any], Tuple[str]], Any]] = None,
) -> Tuple[
Dict[str, Any],
List[ErrorWrapper],
Optional[BackgroundTasks],
Response,
Dict[Tuple[Callable[..., Any], Tuple[str]], Any],
]:
values: Dict[str, Any] = {}
errors: List[ErrorWrapper] = []
if response is None:
response = Response()
del response.headers["content-length"]
response.status_code = None # type: ignore
dependency_cache = dependency_cache or {}
sub_dependant: Dependant
for sub_dependant in dependant.dependencies:
sub_dependant.call = cast(Callable[..., Any], sub_dependant.call)
sub_dependant.cache_key = cast(
Tuple[Callable[..., Any], Tuple[str]], sub_dependant.cache_key
)
call = sub_dependant.call
use_sub_dependant = sub_dependant
if (
dependency_overrides_provider
and dependency_overrides_provider.dependency_overrides
):
original_call = sub_dependant.call
call = getattr(
dependency_overrides_provider, "dependency_overrides", {}
).get(original_call, original_call)
use_path: str = sub_dependant.path # type: ignore
use_sub_dependant = get_dependant(
path=use_path,
call=call,
name=sub_dependant.name,
security_scopes=sub_dependant.security_scopes,
)
solved_result = await solve_dependencies(
request=request,
dependant=use_sub_dependant,
body=body,
background_tasks=background_tasks,
response=response,
dependency_overrides_provider=dependency_overrides_provider,
dependency_cache=dependency_cache,
)
(
sub_values,
sub_errors,
background_tasks,
_, # the subdependency returns the same response we have
sub_dependency_cache,
) = solved_result
dependency_cache.update(sub_dependency_cache)
if sub_errors:
errors.extend(sub_errors)
continue
if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:
solved = dependency_cache[sub_dependant.cache_key]
elif is_gen_callable(call) or is_async_gen_callable(call):
stack = request.scope.get("fastapi_astack")
assert isinstance(stack, AsyncExitStack)
solved = await solve_generator(
call=call, stack=stack, sub_values=sub_values
)
elif is_coroutine_callable(call):
solved = await call(**sub_values)
else:
solved = await run_in_threadpool(call, **sub_values)
if sub_dependant.name is not None:
values[sub_dependant.name] = solved
if sub_dependant.cache_key not in dependency_cache:
dependency_cache[sub_dependant.cache_key] = solved
path_values, path_errors = request_params_to_args(
dependant.path_params, request.path_params
)
query_values, query_errors = request_params_to_args(
dependant.query_params, request.query_params
)
header_values, header_errors = request_params_to_args(
dependant.header_params, request.headers
)
cookie_values, cookie_errors = request_params_to_args(
dependant.cookie_params, request.cookies
)
values.update(path_values)
values.update(query_values)
values.update(header_values)
values.update(cookie_values)
errors += path_errors + query_errors + header_errors + cookie_errors
if dependant.body_params:
(
body_values,
body_errors,
) = await request_body_to_args( # body_params checked above
required_params=dependant.body_params, received_body=body
)
values.update(body_values)
errors.extend(body_errors)
if dependant.http_connection_param_name:
values[dependant.http_connection_param_name] = request
if dependant.request_param_name and isinstance(request, Request):
values[dependant.request_param_name] = request
elif dependant.websocket_param_name and isinstance(request, WebSocket):
values[dependant.websocket_param_name] = request
if dependant.background_tasks_param_name:
if background_tasks is None:
background_tasks = BackgroundTasks()
values[dependant.background_tasks_param_name] = background_tasks
if dependant.response_param_name:
values[dependant.response_param_name] = response
if dependant.security_scopes_param_name:
values[dependant.security_scopes_param_name] = SecurityScopes(
scopes=dependant.security_scopes
)
return values, errors, background_tasks, response, dependency_cache
def request_params_to_args(
required_params: Sequence[ModelField],
received_params: Union[Mapping[str, Any], QueryParams, Headers],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
for field in required_params:
if is_scalar_sequence_field(field) and isinstance(
received_params, (QueryParams, Headers)
):
value = received_params.getlist(field.alias) or field.default
else:
value = received_params.get(field.alias)
field_info = field.field_info
assert isinstance(
field_info, params.Param
), "Params must be subclasses of Param"
if value is None:
if field.required:
errors.append(
ErrorWrapper(
MissingError(), loc=(field_info.in_.value, field.alias)
)
)
else:
values[field.name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(
value, values, loc=(field_info.in_.value, field.alias)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
async def request_body_to_args(
required_params: List[ModelField],
received_body: Optional[Union[Dict[str, Any], FormData]],
) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:
values = {}
errors = []
if required_params:
field = required_params[0]
field_info = field.field_info
embed = getattr(field_info, "embed", None)
field_alias_omitted = len(required_params) == 1 and not embed
if field_alias_omitted:
received_body = {field.alias: received_body}
for field in required_params:
loc: Tuple[str, ...]
if field_alias_omitted:
loc = ("body",)
else:
loc = ("body", field.alias)
value: Optional[Any] = None
if received_body is not None:
if (
field.shape in sequence_shapes or field.type_ in sequence_types
) and isinstance(received_body, FormData):
value = received_body.getlist(field.alias)
else:
try:
value = received_body.get(field.alias)
except AttributeError:
errors.append(get_missing_field_error(loc))
continue
if (
value is None
or (isinstance(field_info, params.Form) and value == "")
or (
isinstance(field_info, params.Form)
and field.shape in sequence_shapes
and len(value) == 0
)
):
if field.required:
errors.append(get_missing_field_error(loc))
else:
values[field.name] = deepcopy(field.default)
continue
if (
isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, UploadFile)
):
value = await value.read()
elif (
field.shape in sequence_shapes
and isinstance(field_info, params.File)
and lenient_issubclass(field.type_, bytes)
and isinstance(value, sequence_types)
):
results: List[Union[bytes, str]] = []
async def process_fn(
fn: Callable[[], Coroutine[Any, Any, Any]]
) -> None:
result = await fn()
results.append(result)
async with anyio.create_task_group() as tg:
for sub_value in value:
tg.start_soon(process_fn, sub_value.read)
value = sequence_shape_to_type[field.shape](results)
v_, errors_ = field.validate(value, values, loc=loc)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[field.name] = v_
return values, errors
def get_missing_field_error(loc: Tuple[str, ...]) -> ErrorWrapper:
missing_field_error = ErrorWrapper(MissingError(), loc=loc)
return missing_field_error
def get_body_field(*, dependant: Dependant, name: str) -> Optional[ModelField]:
flat_dependant = get_flat_dependant(dependant)
if not flat_dependant.body_params:
return None
first_param = flat_dependant.body_params[0]
field_info = first_param.field_info
embed = getattr(field_info, "embed", None)
body_param_names_set = {param.name for param in flat_dependant.body_params}
if len(body_param_names_set) == 1 and not embed:
check_file_field(first_param)
return first_param
# If one field requires to embed, all have to be embedded
# in case a sub-dependency is evaluated with a single unique body field
# That is combined (embedded) with other body fields
for param in flat_dependant.body_params:
setattr(param.field_info, "embed", True) # noqa: B010
model_name = "Body_" + name
BodyModel: Type[BaseModel] = create_model(model_name)
for f in flat_dependant.body_params:
BodyModel.__fields__[f.name] = f
required = any(True for f in flat_dependant.body_params if f.required)
BodyFieldInfo_kwargs: Dict[str, Any] = {"default": None}
if any(isinstance(f.field_info, params.File) for f in flat_dependant.body_params):
BodyFieldInfo: Type[params.Body] = params.File
elif any(isinstance(f.field_info, params.Form) for f in flat_dependant.body_params):
BodyFieldInfo = params.Form
else:
BodyFieldInfo = params.Body
body_param_media_types = [
f.field_info.media_type
for f in flat_dependant.body_params
if isinstance(f.field_info, params.Body)
]
if len(set(body_param_media_types)) == 1:
BodyFieldInfo_kwargs["media_type"] = body_param_media_types[0]
final_field = create_response_field(
name="body",
type_=BodyModel,
required=required,
alias="body",
field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),
)
check_file_field(final_field)
return final_field
| 27,773 | Python | 35.164062 | 88 | 0.630756 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/dependencies/models.py | from typing import Any, Callable, List, Optional, Sequence
from fastapi.security.base import SecurityBase
from pydantic.fields import ModelField
class SecurityRequirement:
def __init__(
self, security_scheme: SecurityBase, scopes: Optional[Sequence[str]] = None
):
self.security_scheme = security_scheme
self.scopes = scopes
class Dependant:
def __init__(
self,
*,
path_params: Optional[List[ModelField]] = None,
query_params: Optional[List[ModelField]] = None,
header_params: Optional[List[ModelField]] = None,
cookie_params: Optional[List[ModelField]] = None,
body_params: Optional[List[ModelField]] = None,
dependencies: Optional[List["Dependant"]] = None,
security_schemes: Optional[List[SecurityRequirement]] = None,
name: Optional[str] = None,
call: Optional[Callable[..., Any]] = None,
request_param_name: Optional[str] = None,
websocket_param_name: Optional[str] = None,
http_connection_param_name: Optional[str] = None,
response_param_name: Optional[str] = None,
background_tasks_param_name: Optional[str] = None,
security_scopes_param_name: Optional[str] = None,
security_scopes: Optional[List[str]] = None,
use_cache: bool = True,
path: Optional[str] = None,
) -> None:
self.path_params = path_params or []
self.query_params = query_params or []
self.header_params = header_params or []
self.cookie_params = cookie_params or []
self.body_params = body_params or []
self.dependencies = dependencies or []
self.security_requirements = security_schemes or []
self.request_param_name = request_param_name
self.websocket_param_name = websocket_param_name
self.http_connection_param_name = http_connection_param_name
self.response_param_name = response_param_name
self.background_tasks_param_name = background_tasks_param_name
self.security_scopes = security_scopes
self.security_scopes_param_name = security_scopes_param_name
self.name = name
self.call = call
self.use_cache = use_cache
# Store the path to be able to re-generate a dependable from it in overrides
self.path = path
# Save the cache key at creation to optimize performance
self.cache_key = (self.call, tuple(sorted(set(self.security_scopes or []))))
| 2,494 | Python | 41.288135 | 84 | 0.64595 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/openapi/docs.py | import json
from typing import Any, Dict, Optional
from fastapi.encoders import jsonable_encoder
from starlette.responses import HTMLResponse
swagger_ui_default_parameters = {
"dom_id": "#swagger-ui",
"layout": "BaseLayout",
"deepLinking": True,
"showExtensions": True,
"showCommonExtensions": True,
}
def get_swagger_ui_html(
*,
openapi_url: str,
title: str,
swagger_js_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@4/swagger-ui-bundle.js",
swagger_css_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@4/swagger-ui.css",
swagger_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png",
oauth2_redirect_url: Optional[str] = None,
init_oauth: Optional[Dict[str, Any]] = None,
swagger_ui_parameters: Optional[Dict[str, Any]] = None,
) -> HTMLResponse:
current_swagger_ui_parameters = swagger_ui_default_parameters.copy()
if swagger_ui_parameters:
current_swagger_ui_parameters.update(swagger_ui_parameters)
html = f"""
<!DOCTYPE html>
<html>
<head>
<link type="text/css" rel="stylesheet" href="{swagger_css_url}">
<link rel="shortcut icon" href="{swagger_favicon_url}">
<title>{title}</title>
</head>
<body>
<div id="swagger-ui">
</div>
<script src="{swagger_js_url}"></script>
<!-- `SwaggerUIBundle` is now available on the page -->
<script>
const ui = SwaggerUIBundle({{
url: '{openapi_url}',
"""
for key, value in current_swagger_ui_parameters.items():
html += f"{json.dumps(key)}: {json.dumps(jsonable_encoder(value))},\n"
if oauth2_redirect_url:
html += f"oauth2RedirectUrl: window.location.origin + '{oauth2_redirect_url}',"
html += """
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIBundle.SwaggerUIStandalonePreset
],
})"""
if init_oauth:
html += f"""
ui.initOAuth({json.dumps(jsonable_encoder(init_oauth))})
"""
html += """
</script>
</body>
</html>
"""
return HTMLResponse(html)
def get_redoc_html(
*,
openapi_url: str,
title: str,
redoc_js_url: str = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js",
redoc_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png",
with_google_fonts: bool = True,
) -> HTMLResponse:
html = f"""
<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
<!-- needed for adaptive design -->
<meta charset="utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1">
"""
if with_google_fonts:
html += """
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
"""
html += f"""
<link rel="shortcut icon" href="{redoc_favicon_url}">
<!--
ReDoc doesn't change outer page styles
-->
<style>
body {{
margin: 0;
padding: 0;
}}
</style>
</head>
<body>
<noscript>
ReDoc requires Javascript to function. Please enable it to browse the documentation.
</noscript>
<redoc spec-url="{openapi_url}"></redoc>
<script src="{redoc_js_url}"> </script>
</body>
</html>
"""
return HTMLResponse(html)
def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse:
# copied from https://github.com/swagger-api/swagger-ui/blob/v4.14.0/dist/oauth2-redirect.html
html = """
<!doctype html>
<html lang="en-US">
<head>
<title>Swagger UI: OAuth2 Redirect</title>
</head>
<body>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1).replace('?', '&');
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
if (document.readyState !== 'loading') {
run();
} else {
document.addEventListener('DOMContentLoaded', function () {
run();
});
}
</script>
</body>
</html>
"""
return HTMLResponse(content=html)
| 6,532 | Python | 31.02451 | 150 | 0.530312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/openapi/constants.py | METHODS_WITH_BODY = {"GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"}
REF_PREFIX = "#/components/schemas/"
| 107 | Python | 34.999988 | 69 | 0.616822 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/openapi/utils.py | import http.client
import inspect
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
from fastapi import routing
from fastapi.datastructures import DefaultPlaceholder
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import get_flat_dependant, get_flat_params
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX
from fastapi.openapi.models import OpenAPI
from fastapi.params import Body, Param
from fastapi.responses import Response
from fastapi.utils import (
deep_dict_update,
generate_operation_id_for_path,
get_model_definitions,
is_body_allowed_for_status_code,
)
from pydantic import BaseModel
from pydantic.fields import ModelField, Undefined
from pydantic.schema import (
field_schema,
get_flat_models_from_fields,
get_model_name_map,
)
from pydantic.utils import lenient_issubclass
from starlette.responses import JSONResponse
from starlette.routing import BaseRoute
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
validation_error_definition = {
"title": "ValidationError",
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
"required": ["loc", "msg", "type"],
}
validation_error_response_definition = {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": REF_PREFIX + "ValidationError"},
}
},
}
status_code_ranges: Dict[str, str] = {
"1XX": "Information",
"2XX": "Success",
"3XX": "Redirection",
"4XX": "Client Error",
"5XX": "Server Error",
"DEFAULT": "Default Response",
}
def get_openapi_security_definitions(
flat_dependant: Dependant,
) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
security_definitions = {}
operation_security = []
for security_requirement in flat_dependant.security_requirements:
security_definition = jsonable_encoder(
security_requirement.security_scheme.model,
by_alias=True,
exclude_none=True,
)
security_name = security_requirement.security_scheme.scheme_name
security_definitions[security_name] = security_definition
operation_security.append({security_name: security_requirement.scopes})
return security_definitions, operation_security
def get_openapi_operation_parameters(
*,
all_route_params: Sequence[ModelField],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> List[Dict[str, Any]]:
parameters = []
for param in all_route_params:
field_info = param.field_info
field_info = cast(Param, field_info)
if not field_info.include_in_schema:
continue
parameter = {
"name": param.alias,
"in": field_info.in_.value,
"required": param.required,
"schema": field_schema(
param, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)[0],
}
if field_info.description:
parameter["description"] = field_info.description
if field_info.examples:
parameter["examples"] = jsonable_encoder(field_info.examples)
elif field_info.example != Undefined:
parameter["example"] = jsonable_encoder(field_info.example)
if field_info.deprecated:
parameter["deprecated"] = field_info.deprecated
parameters.append(parameter)
return parameters
def get_openapi_operation_request_body(
*,
body_field: Optional[ModelField],
model_name_map: Dict[Union[Type[BaseModel], Type[Enum]], str],
) -> Optional[Dict[str, Any]]:
if not body_field:
return None
assert isinstance(body_field, ModelField)
body_schema, _, _ = field_schema(
body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
field_info = cast(Body, body_field.field_info)
request_media_type = field_info.media_type
required = body_field.required
request_body_oai: Dict[str, Any] = {}
if required:
request_body_oai["required"] = required
request_media_content: Dict[str, Any] = {"schema": body_schema}
if field_info.examples:
request_media_content["examples"] = jsonable_encoder(field_info.examples)
elif field_info.example != Undefined:
request_media_content["example"] = jsonable_encoder(field_info.example)
request_body_oai["content"] = {request_media_type: request_media_content}
return request_body_oai
def generate_operation_id(
*, route: routing.APIRoute, method: str
) -> str: # pragma: nocover
warnings.warn(
"fastapi.openapi.utils.generate_operation_id() was deprecated, "
"it is not used internally, and will be removed soon",
DeprecationWarning,
stacklevel=2,
)
if route.operation_id:
return route.operation_id
path: str = route.path_format
return generate_operation_id_for_path(name=route.name, path=path, method=method)
def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str:
if route.summary:
return route.summary
return route.name.replace("_", " ").title()
def get_openapi_operation_metadata(
*, route: routing.APIRoute, method: str, operation_ids: Set[str]
) -> Dict[str, Any]:
operation: Dict[str, Any] = {}
if route.tags:
operation["tags"] = route.tags
operation["summary"] = generate_operation_summary(route=route, method=method)
if route.description:
operation["description"] = route.description
operation_id = route.operation_id or route.unique_id
if operation_id in operation_ids:
message = (
f"Duplicate Operation ID {operation_id} for function "
+ f"{route.endpoint.__name__}"
)
file_name = getattr(route.endpoint, "__globals__", {}).get("__file__")
if file_name:
message += f" at {file_name}"
warnings.warn(message)
operation_ids.add(operation_id)
operation["operationId"] = operation_id
if route.deprecated:
operation["deprecated"] = route.deprecated
return operation
def get_openapi_path(
*, route: routing.APIRoute, model_name_map: Dict[type, str], operation_ids: Set[str]
) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
path = {}
security_schemes: Dict[str, Any] = {}
definitions: Dict[str, Any] = {}
assert route.methods is not None, "Methods must be a list"
if isinstance(route.response_class, DefaultPlaceholder):
current_response_class: Type[Response] = route.response_class.value
else:
current_response_class = route.response_class
assert current_response_class, "A response class is needed to generate OpenAPI"
route_response_media_type: Optional[str] = current_response_class.media_type
if route.include_in_schema:
for method in route.methods:
operation = get_openapi_operation_metadata(
route=route, method=method, operation_ids=operation_ids
)
parameters: List[Dict[str, Any]] = []
flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True)
security_definitions, operation_security = get_openapi_security_definitions(
flat_dependant=flat_dependant
)
if operation_security:
operation.setdefault("security", []).extend(operation_security)
if security_definitions:
security_schemes.update(security_definitions)
all_route_params = get_flat_params(route.dependant)
operation_parameters = get_openapi_operation_parameters(
all_route_params=all_route_params, model_name_map=model_name_map
)
parameters.extend(operation_parameters)
if parameters:
all_parameters = {
(param["in"], param["name"]): param for param in parameters
}
required_parameters = {
(param["in"], param["name"]): param
for param in parameters
if param.get("required")
}
# Make sure required definitions of the same parameter take precedence
# over non-required definitions
all_parameters.update(required_parameters)
operation["parameters"] = list(all_parameters.values())
if method in METHODS_WITH_BODY:
request_body_oai = get_openapi_operation_request_body(
body_field=route.body_field, model_name_map=model_name_map
)
if request_body_oai:
operation["requestBody"] = request_body_oai
if route.callbacks:
callbacks = {}
for callback in route.callbacks:
if isinstance(callback, routing.APIRoute):
(
cb_path,
cb_security_schemes,
cb_definitions,
) = get_openapi_path(
route=callback,
model_name_map=model_name_map,
operation_ids=operation_ids,
)
callbacks[callback.name] = {callback.path: cb_path}
operation["callbacks"] = callbacks
if route.status_code is not None:
status_code = str(route.status_code)
else:
# It would probably make more sense for all response classes to have an
# explicit default status_code, and to extract it from them, instead of
# doing this inspection tricks, that would probably be in the future
# TODO: probably make status_code a default class attribute for all
# responses in Starlette
response_signature = inspect.signature(current_response_class.__init__)
status_code_param = response_signature.parameters.get("status_code")
if status_code_param is not None:
if isinstance(status_code_param.default, int):
status_code = str(status_code_param.default)
operation.setdefault("responses", {}).setdefault(status_code, {})[
"description"
] = route.response_description
if route_response_media_type and is_body_allowed_for_status_code(
route.status_code
):
response_schema = {"type": "string"}
if lenient_issubclass(current_response_class, JSONResponse):
if route.response_field:
response_schema, _, _ = field_schema(
route.response_field,
model_name_map=model_name_map,
ref_prefix=REF_PREFIX,
)
else:
response_schema = {}
operation.setdefault("responses", {}).setdefault(
status_code, {}
).setdefault("content", {}).setdefault(route_response_media_type, {})[
"schema"
] = response_schema
if route.responses:
operation_responses = operation.setdefault("responses", {})
for (
additional_status_code,
additional_response,
) in route.responses.items():
process_response = additional_response.copy()
process_response.pop("model", None)
status_code_key = str(additional_status_code).upper()
if status_code_key == "DEFAULT":
status_code_key = "default"
openapi_response = operation_responses.setdefault(
status_code_key, {}
)
assert isinstance(
process_response, dict
), "An additional response must be a dict"
field = route.response_fields.get(additional_status_code)
additional_field_schema: Optional[Dict[str, Any]] = None
if field:
additional_field_schema, _, _ = field_schema(
field, model_name_map=model_name_map, ref_prefix=REF_PREFIX
)
media_type = route_response_media_type or "application/json"
additional_schema = (
process_response.setdefault("content", {})
.setdefault(media_type, {})
.setdefault("schema", {})
)
deep_dict_update(additional_schema, additional_field_schema)
status_text: Optional[str] = status_code_ranges.get(
str(additional_status_code).upper()
) or http.client.responses.get(int(additional_status_code))
description = (
process_response.get("description")
or openapi_response.get("description")
or status_text
or "Additional Response"
)
deep_dict_update(openapi_response, process_response)
openapi_response["description"] = description
http422 = str(HTTP_422_UNPROCESSABLE_ENTITY)
if (all_route_params or route.body_field) and not any(
[
status in operation["responses"]
for status in [http422, "4XX", "default"]
]
):
operation["responses"][http422] = {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {"$ref": REF_PREFIX + "HTTPValidationError"}
}
},
}
if "ValidationError" not in definitions:
definitions.update(
{
"ValidationError": validation_error_definition,
"HTTPValidationError": validation_error_response_definition,
}
)
if route.openapi_extra:
deep_dict_update(operation, route.openapi_extra)
path[method.lower()] = operation
return path, security_schemes, definitions
def get_flat_models_from_routes(
routes: Sequence[BaseRoute],
) -> Set[Union[Type[BaseModel], Type[Enum]]]:
body_fields_from_routes: List[ModelField] = []
responses_from_routes: List[ModelField] = []
request_fields_from_routes: List[ModelField] = []
callback_flat_models: Set[Union[Type[BaseModel], Type[Enum]]] = set()
for route in routes:
if getattr(route, "include_in_schema", None) and isinstance(
route, routing.APIRoute
):
if route.body_field:
assert isinstance(
route.body_field, ModelField
), "A request body must be a Pydantic Field"
body_fields_from_routes.append(route.body_field)
if route.response_field:
responses_from_routes.append(route.response_field)
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
if route.callbacks:
callback_flat_models |= get_flat_models_from_routes(route.callbacks)
params = get_flat_params(route.dependant)
request_fields_from_routes.extend(params)
flat_models = callback_flat_models | get_flat_models_from_fields(
body_fields_from_routes + responses_from_routes + request_fields_from_routes,
known_models=set(),
)
return flat_models
def get_openapi(
*,
title: str,
version: str,
openapi_version: str = "3.0.2",
description: Optional[str] = None,
routes: Sequence[BaseRoute],
tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
) -> Dict[str, Any]:
info: Dict[str, Any] = {"title": title, "version": version}
if description:
info["description"] = description
if terms_of_service:
info["termsOfService"] = terms_of_service
if contact:
info["contact"] = contact
if license_info:
info["license"] = license_info
output: Dict[str, Any] = {"openapi": openapi_version, "info": info}
if servers:
output["servers"] = servers
components: Dict[str, Dict[str, Any]] = {}
paths: Dict[str, Dict[str, Any]] = {}
operation_ids: Set[str] = set()
flat_models = get_flat_models_from_routes(routes)
model_name_map = get_model_name_map(flat_models)
definitions = get_model_definitions(
flat_models=flat_models, model_name_map=model_name_map
)
for route in routes:
if isinstance(route, routing.APIRoute):
result = get_openapi_path(
route=route, model_name_map=model_name_map, operation_ids=operation_ids
)
if result:
path, security_schemes, path_definitions = result
if path:
paths.setdefault(route.path_format, {}).update(path)
if security_schemes:
components.setdefault("securitySchemes", {}).update(
security_schemes
)
if path_definitions:
definitions.update(path_definitions)
if definitions:
components["schemas"] = {k: definitions[k] for k in sorted(definitions)}
if components:
output["components"] = components
output["paths"] = paths
if tags:
output["tags"] = tags
return jsonable_encoder(OpenAPI(**output), by_alias=True, exclude_none=True) # type: ignore
| 18,808 | Python | 40.890869 | 96 | 0.573905 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/openapi/models.py | from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
from fastapi.logger import logger
from pydantic import AnyUrl, BaseModel, Field
try:
import email_validator # type: ignore
assert email_validator # make autoflake ignore the unused import
from pydantic import EmailStr
except ImportError: # pragma: no cover
class EmailStr(str): # type: ignore
@classmethod
def __get_validators__(cls) -> Iterable[Callable[..., Any]]:
yield cls.validate
@classmethod
def validate(cls, v: Any) -> str:
logger.warning(
"email-validator not installed, email fields will be treated as str.\n"
"To install, run: pip install email-validator"
)
return str(v)
class Contact(BaseModel):
name: Optional[str] = None
url: Optional[AnyUrl] = None
email: Optional[EmailStr] = None
class Config:
extra = "allow"
class License(BaseModel):
name: str
url: Optional[AnyUrl] = None
class Config:
extra = "allow"
class Info(BaseModel):
title: str
description: Optional[str] = None
termsOfService: Optional[str] = None
contact: Optional[Contact] = None
license: Optional[License] = None
version: str
class Config:
extra = "allow"
class ServerVariable(BaseModel):
enum: Optional[List[str]] = None
default: str
description: Optional[str] = None
class Config:
extra = "allow"
class Server(BaseModel):
url: Union[AnyUrl, str]
description: Optional[str] = None
variables: Optional[Dict[str, ServerVariable]] = None
class Config:
extra = "allow"
class Reference(BaseModel):
ref: str = Field(alias="$ref")
class Discriminator(BaseModel):
propertyName: str
mapping: Optional[Dict[str, str]] = None
class XML(BaseModel):
name: Optional[str] = None
namespace: Optional[str] = None
prefix: Optional[str] = None
attribute: Optional[bool] = None
wrapped: Optional[bool] = None
class Config:
extra = "allow"
class ExternalDocumentation(BaseModel):
description: Optional[str] = None
url: AnyUrl
class Config:
extra = "allow"
class Schema(BaseModel):
ref: Optional[str] = Field(default=None, alias="$ref")
title: Optional[str] = None
multipleOf: Optional[float] = None
maximum: Optional[float] = None
exclusiveMaximum: Optional[float] = None
minimum: Optional[float] = None
exclusiveMinimum: Optional[float] = None
maxLength: Optional[int] = Field(default=None, gte=0)
minLength: Optional[int] = Field(default=None, gte=0)
pattern: Optional[str] = None
maxItems: Optional[int] = Field(default=None, gte=0)
minItems: Optional[int] = Field(default=None, gte=0)
uniqueItems: Optional[bool] = None
maxProperties: Optional[int] = Field(default=None, gte=0)
minProperties: Optional[int] = Field(default=None, gte=0)
required: Optional[List[str]] = None
enum: Optional[List[Any]] = None
type: Optional[str] = None
allOf: Optional[List["Schema"]] = None
oneOf: Optional[List["Schema"]] = None
anyOf: Optional[List["Schema"]] = None
not_: Optional["Schema"] = Field(default=None, alias="not")
items: Optional[Union["Schema", List["Schema"]]] = None
properties: Optional[Dict[str, "Schema"]] = None
additionalProperties: Optional[Union["Schema", Reference, bool]] = None
description: Optional[str] = None
format: Optional[str] = None
default: Optional[Any] = None
nullable: Optional[bool] = None
discriminator: Optional[Discriminator] = None
readOnly: Optional[bool] = None
writeOnly: Optional[bool] = None
xml: Optional[XML] = None
externalDocs: Optional[ExternalDocumentation] = None
example: Optional[Any] = None
deprecated: Optional[bool] = None
class Config:
extra: str = "allow"
class Example(BaseModel):
summary: Optional[str] = None
description: Optional[str] = None
value: Optional[Any] = None
externalValue: Optional[AnyUrl] = None
class Config:
extra = "allow"
class ParameterInType(Enum):
query = "query"
header = "header"
path = "path"
cookie = "cookie"
class Encoding(BaseModel):
contentType: Optional[str] = None
headers: Optional[Dict[str, Union["Header", Reference]]] = None
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
class Config:
extra = "allow"
class MediaType(BaseModel):
schema_: Optional[Union[Schema, Reference]] = Field(default=None, alias="schema")
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
encoding: Optional[Dict[str, Encoding]] = None
class Config:
extra = "allow"
class ParameterBase(BaseModel):
description: Optional[str] = None
required: Optional[bool] = None
deprecated: Optional[bool] = None
# Serialization rules for simple scenarios
style: Optional[str] = None
explode: Optional[bool] = None
allowReserved: Optional[bool] = None
schema_: Optional[Union[Schema, Reference]] = Field(default=None, alias="schema")
example: Optional[Any] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
# Serialization rules for more complex scenarios
content: Optional[Dict[str, MediaType]] = None
class Config:
extra = "allow"
class Parameter(ParameterBase):
name: str
in_: ParameterInType = Field(alias="in")
class Header(ParameterBase):
pass
class RequestBody(BaseModel):
description: Optional[str] = None
content: Dict[str, MediaType]
required: Optional[bool] = None
class Config:
extra = "allow"
class Link(BaseModel):
operationRef: Optional[str] = None
operationId: Optional[str] = None
parameters: Optional[Dict[str, Union[Any, str]]] = None
requestBody: Optional[Union[Any, str]] = None
description: Optional[str] = None
server: Optional[Server] = None
class Config:
extra = "allow"
class Response(BaseModel):
description: str
headers: Optional[Dict[str, Union[Header, Reference]]] = None
content: Optional[Dict[str, MediaType]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
class Config:
extra = "allow"
class Operation(BaseModel):
tags: Optional[List[str]] = None
summary: Optional[str] = None
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
operationId: Optional[str] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
requestBody: Optional[Union[RequestBody, Reference]] = None
# Using Any for Specification Extensions
responses: Dict[str, Union[Response, Any]]
callbacks: Optional[Dict[str, Union[Dict[str, "PathItem"], Reference]]] = None
deprecated: Optional[bool] = None
security: Optional[List[Dict[str, List[str]]]] = None
servers: Optional[List[Server]] = None
class Config:
extra = "allow"
class PathItem(BaseModel):
ref: Optional[str] = Field(default=None, alias="$ref")
summary: Optional[str] = None
description: Optional[str] = None
get: Optional[Operation] = None
put: Optional[Operation] = None
post: Optional[Operation] = None
delete: Optional[Operation] = None
options: Optional[Operation] = None
head: Optional[Operation] = None
patch: Optional[Operation] = None
trace: Optional[Operation] = None
servers: Optional[List[Server]] = None
parameters: Optional[List[Union[Parameter, Reference]]] = None
class Config:
extra = "allow"
class SecuritySchemeType(Enum):
apiKey = "apiKey"
http = "http"
oauth2 = "oauth2"
openIdConnect = "openIdConnect"
class SecurityBase(BaseModel):
type_: SecuritySchemeType = Field(alias="type")
description: Optional[str] = None
class Config:
extra = "allow"
class APIKeyIn(Enum):
query = "query"
header = "header"
cookie = "cookie"
class APIKey(SecurityBase):
type_ = Field(SecuritySchemeType.apiKey, alias="type")
in_: APIKeyIn = Field(alias="in")
name: str
class HTTPBase(SecurityBase):
type_ = Field(SecuritySchemeType.http, alias="type")
scheme: str
class HTTPBearer(HTTPBase):
scheme = "bearer"
bearerFormat: Optional[str] = None
class OAuthFlow(BaseModel):
refreshUrl: Optional[str] = None
scopes: Dict[str, str] = {}
class Config:
extra = "allow"
class OAuthFlowImplicit(OAuthFlow):
authorizationUrl: str
class OAuthFlowPassword(OAuthFlow):
tokenUrl: str
class OAuthFlowClientCredentials(OAuthFlow):
tokenUrl: str
class OAuthFlowAuthorizationCode(OAuthFlow):
authorizationUrl: str
tokenUrl: str
class OAuthFlows(BaseModel):
implicit: Optional[OAuthFlowImplicit] = None
password: Optional[OAuthFlowPassword] = None
clientCredentials: Optional[OAuthFlowClientCredentials] = None
authorizationCode: Optional[OAuthFlowAuthorizationCode] = None
class Config:
extra = "allow"
class OAuth2(SecurityBase):
type_ = Field(SecuritySchemeType.oauth2, alias="type")
flows: OAuthFlows
class OpenIdConnect(SecurityBase):
type_ = Field(SecuritySchemeType.openIdConnect, alias="type")
openIdConnectUrl: str
SecurityScheme = Union[APIKey, HTTPBase, OAuth2, OpenIdConnect, HTTPBearer]
class Components(BaseModel):
schemas: Optional[Dict[str, Union[Schema, Reference]]] = None
responses: Optional[Dict[str, Union[Response, Reference]]] = None
parameters: Optional[Dict[str, Union[Parameter, Reference]]] = None
examples: Optional[Dict[str, Union[Example, Reference]]] = None
requestBodies: Optional[Dict[str, Union[RequestBody, Reference]]] = None
headers: Optional[Dict[str, Union[Header, Reference]]] = None
securitySchemes: Optional[Dict[str, Union[SecurityScheme, Reference]]] = None
links: Optional[Dict[str, Union[Link, Reference]]] = None
# Using Any for Specification Extensions
callbacks: Optional[Dict[str, Union[Dict[str, PathItem], Reference, Any]]] = None
class Config:
extra = "allow"
class Tag(BaseModel):
name: str
description: Optional[str] = None
externalDocs: Optional[ExternalDocumentation] = None
class Config:
extra = "allow"
class OpenAPI(BaseModel):
openapi: str
info: Info
servers: Optional[List[Server]] = None
# Using Any for Specification Extensions
paths: Dict[str, Union[PathItem, Any]]
components: Optional[Components] = None
security: Optional[List[Dict[str, List[str]]]] = None
tags: Optional[List[Tag]] = None
externalDocs: Optional[ExternalDocumentation] = None
class Config:
extra = "allow"
Schema.update_forward_refs()
Operation.update_forward_refs()
Encoding.update_forward_refs()
| 11,027 | Python | 26.095823 | 87 | 0.673166 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/wsgi.py | from starlette.middleware.wsgi import WSGIMiddleware as WSGIMiddleware # noqa
| 79 | Python | 38.999981 | 78 | 0.848101 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/gzip.py | from starlette.middleware.gzip import GZipMiddleware as GZipMiddleware # noqa
| 79 | Python | 38.999981 | 78 | 0.848101 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/httpsredirect.py | from starlette.middleware.httpsredirect import ( # noqa
HTTPSRedirectMiddleware as HTTPSRedirectMiddleware,
)
| 115 | Python | 27.999993 | 56 | 0.817391 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/cors.py | from starlette.middleware.cors import CORSMiddleware as CORSMiddleware # noqa
| 79 | Python | 38.999981 | 78 | 0.848101 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/__init__.py | from starlette.middleware import Middleware as Middleware
| 58 | Python | 28.499986 | 57 | 0.87931 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/trustedhost.py | from starlette.middleware.trustedhost import ( # noqa
TrustedHostMiddleware as TrustedHostMiddleware,
)
| 109 | Python | 26.499993 | 54 | 0.807339 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/middleware/asyncexitstack.py | from typing import Optional
from fastapi.concurrency import AsyncExitStack
from starlette.types import ASGIApp, Receive, Scope, Send
class AsyncExitStackMiddleware:
def __init__(self, app: ASGIApp, context_name: str = "fastapi_astack") -> None:
self.app = app
self.context_name = context_name
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if AsyncExitStack:
dependency_exception: Optional[Exception] = None
async with AsyncExitStack() as stack:
scope[self.context_name] = stack
try:
await self.app(scope, receive, send)
except Exception as e:
dependency_exception = e
raise e
if dependency_exception:
# This exception was possibly handled by the dependency but it should
# still bubble up so that the ServerErrorMiddleware can return a 500
# or the ExceptionMiddleware can catch and handle any other exceptions
raise dependency_exception
else:
await self.app(scope, receive, send) # pragma: no cover
| 1,197 | Python | 40.310343 | 86 | 0.609858 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/oauth2.py | from typing import Any, Dict, List, Optional, Union
from fastapi.exceptions import HTTPException
from fastapi.openapi.models import OAuth2 as OAuth2Model
from fastapi.openapi.models import OAuthFlows as OAuthFlowsModel
from fastapi.param_functions import Form
from fastapi.security.base import SecurityBase
from fastapi.security.utils import get_authorization_scheme_param
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
class OAuth2PasswordRequestForm:
"""
This is a dependency class, use it like:
@app.post("/login")
def login(form_data: OAuth2PasswordRequestForm = Depends()):
data = form_data.parse()
print(data.username)
print(data.password)
for scope in data.scopes:
print(scope)
if data.client_id:
print(data.client_id)
if data.client_secret:
print(data.client_secret)
return data
It creates the following Form request parameters in your endpoint:
grant_type: the OAuth2 spec says it is required and MUST be the fixed string "password".
Nevertheless, this dependency class is permissive and allows not passing it. If you want to enforce it,
use instead the OAuth2PasswordRequestFormStrict dependency.
username: username string. The OAuth2 spec requires the exact field name "username".
password: password string. The OAuth2 spec requires the exact field name "password".
scope: Optional string. Several scopes (each one a string) separated by spaces. E.g.
"items:read items:write users:read profile openid"
client_id: optional string. OAuth2 recommends sending the client_id and client_secret (if any)
using HTTP Basic auth, as: client_id:client_secret
client_secret: optional string. OAuth2 recommends sending the client_id and client_secret (if any)
using HTTP Basic auth, as: client_id:client_secret
"""
def __init__(
self,
grant_type: str = Form(default=None, regex="password"),
username: str = Form(),
password: str = Form(),
scope: str = Form(default=""),
client_id: Optional[str] = Form(default=None),
client_secret: Optional[str] = Form(default=None),
):
self.grant_type = grant_type
self.username = username
self.password = password
self.scopes = scope.split()
self.client_id = client_id
self.client_secret = client_secret
class OAuth2PasswordRequestFormStrict(OAuth2PasswordRequestForm):
"""
This is a dependency class, use it like:
@app.post("/login")
def login(form_data: OAuth2PasswordRequestFormStrict = Depends()):
data = form_data.parse()
print(data.username)
print(data.password)
for scope in data.scopes:
print(scope)
if data.client_id:
print(data.client_id)
if data.client_secret:
print(data.client_secret)
return data
It creates the following Form request parameters in your endpoint:
grant_type: the OAuth2 spec says it is required and MUST be the fixed string "password".
This dependency is strict about it. If you want to be permissive, use instead the
OAuth2PasswordRequestForm dependency class.
username: username string. The OAuth2 spec requires the exact field name "username".
password: password string. The OAuth2 spec requires the exact field name "password".
scope: Optional string. Several scopes (each one a string) separated by spaces. E.g.
"items:read items:write users:read profile openid"
client_id: optional string. OAuth2 recommends sending the client_id and client_secret (if any)
using HTTP Basic auth, as: client_id:client_secret
client_secret: optional string. OAuth2 recommends sending the client_id and client_secret (if any)
using HTTP Basic auth, as: client_id:client_secret
"""
def __init__(
self,
grant_type: str = Form(regex="password"),
username: str = Form(),
password: str = Form(),
scope: str = Form(default=""),
client_id: Optional[str] = Form(default=None),
client_secret: Optional[str] = Form(default=None),
):
super().__init__(
grant_type=grant_type,
username=username,
password=password,
scope=scope,
client_id=client_id,
client_secret=client_secret,
)
class OAuth2(SecurityBase):
def __init__(
self,
*,
flows: Union[OAuthFlowsModel, Dict[str, Dict[str, Any]]] = OAuthFlowsModel(),
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True
):
self.model = OAuth2Model(flows=flows, description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
if not authorization:
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return authorization
class OAuth2PasswordBearer(OAuth2):
def __init__(
self,
tokenUrl: str,
scheme_name: Optional[str] = None,
scopes: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
if not scopes:
scopes = {}
flows = OAuthFlowsModel(password={"tokenUrl": tokenUrl, "scopes": scopes})
super().__init__(
flows=flows,
scheme_name=scheme_name,
description=description,
auto_error=auto_error,
)
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if not authorization or scheme.lower() != "bearer":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
return param
class OAuth2AuthorizationCodeBearer(OAuth2):
def __init__(
self,
authorizationUrl: str,
tokenUrl: str,
refreshUrl: Optional[str] = None,
scheme_name: Optional[str] = None,
scopes: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
if not scopes:
scopes = {}
flows = OAuthFlowsModel(
authorizationCode={
"authorizationUrl": authorizationUrl,
"tokenUrl": tokenUrl,
"refreshUrl": refreshUrl,
"scopes": scopes,
}
)
super().__init__(
flows=flows,
scheme_name=scheme_name,
description=description,
auto_error=auto_error,
)
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if not authorization or scheme.lower() != "bearer":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None # pragma: nocover
return param
class SecurityScopes:
def __init__(self, scopes: Optional[List[str]] = None):
self.scopes = scopes or []
self.scope_str = " ".join(self.scopes)
| 8,197 | Python | 36.095022 | 111 | 0.606197 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/http.py | import binascii
from base64 import b64decode
from typing import Optional
from fastapi.exceptions import HTTPException
from fastapi.openapi.models import HTTPBase as HTTPBaseModel
from fastapi.openapi.models import HTTPBearer as HTTPBearerModel
from fastapi.security.base import SecurityBase
from fastapi.security.utils import get_authorization_scheme_param
from pydantic import BaseModel
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
class HTTPBasicCredentials(BaseModel):
username: str
password: str
class HTTPAuthorizationCredentials(BaseModel):
scheme: str
credentials: str
class HTTPBase(SecurityBase):
def __init__(
self,
*,
scheme: str,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
self.model = HTTPBaseModel(scheme=scheme, description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPBasic(HTTPBase):
def __init__(
self,
*,
scheme_name: Optional[str] = None,
realm: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
self.model = HTTPBaseModel(scheme="basic", description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.realm = realm
self.auto_error = auto_error
async def __call__( # type: ignore
self, request: Request
) -> Optional[HTTPBasicCredentials]:
authorization = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if self.realm:
unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'}
else:
unauthorized_headers = {"WWW-Authenticate": "Basic"}
invalid_user_credentials_exc = HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers=unauthorized_headers,
)
if not authorization or scheme.lower() != "basic":
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers=unauthorized_headers,
)
else:
return None
try:
data = b64decode(param).decode("ascii")
except (ValueError, UnicodeDecodeError, binascii.Error):
raise invalid_user_credentials_exc
username, separator, password = data.partition(":")
if not separator:
raise invalid_user_credentials_exc
return HTTPBasicCredentials(username=username, password=password)
class HTTPBearer(HTTPBase):
def __init__(
self,
*,
bearerFormat: Optional[str] = None,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "bearer":
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
class HTTPDigest(HTTPBase):
def __init__(
self,
*,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True,
):
self.model = HTTPBaseModel(scheme="digest", description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
if scheme.lower() != "digest":
raise HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Invalid authentication credentials",
)
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
| 5,964 | Python | 34.933735 | 88 | 0.614353 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/open_id_connect_url.py | from typing import Optional
from fastapi.openapi.models import OpenIdConnect as OpenIdConnectModel
from fastapi.security.base import SecurityBase
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.status import HTTP_403_FORBIDDEN
class OpenIdConnect(SecurityBase):
def __init__(
self,
*,
openIdConnectUrl: str,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True
):
self.model = OpenIdConnectModel(
openIdConnectUrl=openIdConnectUrl, description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
if not authorization:
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return authorization
| 1,140 | Python | 31.599999 | 78 | 0.641228 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/base.py | from fastapi.openapi.models import SecurityBase as SecurityBaseModel
class SecurityBase:
model: SecurityBaseModel
scheme_name: str
| 141 | Python | 19.285712 | 68 | 0.801418 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/__init__.py | from .api_key import APIKeyCookie as APIKeyCookie
from .api_key import APIKeyHeader as APIKeyHeader
from .api_key import APIKeyQuery as APIKeyQuery
from .http import HTTPAuthorizationCredentials as HTTPAuthorizationCredentials
from .http import HTTPBasic as HTTPBasic
from .http import HTTPBasicCredentials as HTTPBasicCredentials
from .http import HTTPBearer as HTTPBearer
from .http import HTTPDigest as HTTPDigest
from .oauth2 import OAuth2 as OAuth2
from .oauth2 import OAuth2AuthorizationCodeBearer as OAuth2AuthorizationCodeBearer
from .oauth2 import OAuth2PasswordBearer as OAuth2PasswordBearer
from .oauth2 import OAuth2PasswordRequestForm as OAuth2PasswordRequestForm
from .oauth2 import OAuth2PasswordRequestFormStrict as OAuth2PasswordRequestFormStrict
from .oauth2 import SecurityScopes as SecurityScopes
from .open_id_connect_url import OpenIdConnect as OpenIdConnect
| 881 | Python | 54.124997 | 86 | 0.874007 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/api_key.py | from typing import Optional
from fastapi.openapi.models import APIKey, APIKeyIn
from fastapi.security.base import SecurityBase
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.status import HTTP_403_FORBIDDEN
class APIKeyBase(SecurityBase):
pass
class APIKeyQuery(APIKeyBase):
def __init__(
self,
*,
name: str,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True
):
self.model: APIKey = APIKey(
**{"in": APIKeyIn.query}, name=name, description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
api_key = request.query_params.get(self.model.name)
if not api_key:
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return api_key
class APIKeyHeader(APIKeyBase):
def __init__(
self,
*,
name: str,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True
):
self.model: APIKey = APIKey(
**{"in": APIKeyIn.header}, name=name, description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
api_key = request.headers.get(self.model.name)
if not api_key:
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return api_key
class APIKeyCookie(APIKeyBase):
def __init__(
self,
*,
name: str,
scheme_name: Optional[str] = None,
description: Optional[str] = None,
auto_error: bool = True
):
self.model: APIKey = APIKey(
**{"in": APIKeyIn.cookie}, name=name, description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(self, request: Request) -> Optional[str]:
api_key = request.cookies.get(self.model.name)
if not api_key:
if self.auto_error:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
)
else:
return None
return api_key
| 2,783 | Python | 28.935484 | 78 | 0.563061 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/fastapi/security/utils.py | from typing import Optional, Tuple
def get_authorization_scheme_param(
authorization_header_value: Optional[str],
) -> Tuple[str, str]:
if not authorization_header_value:
return "", ""
scheme, _, param = authorization_header_value.partition(" ")
return scheme, param
| 293 | Python | 25.72727 | 64 | 0.686007 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multidict/_abc.py | import abc
import sys
import types
from collections.abc import Mapping, MutableMapping
class _TypingMeta(abc.ABCMeta):
# A fake metaclass to satisfy typing deps in runtime
# basically MultiMapping[str] and other generic-like type instantiations
# are emulated.
# Note: real type hints are provided by __init__.pyi stub file
if sys.version_info >= (3, 9):
def __getitem__(self, key):
return types.GenericAlias(self, key)
else:
def __getitem__(self, key):
return self
class MultiMapping(Mapping, metaclass=_TypingMeta):
@abc.abstractmethod
def getall(self, key, default=None):
raise KeyError
@abc.abstractmethod
def getone(self, key, default=None):
raise KeyError
class MutableMultiMapping(MultiMapping, MutableMapping):
@abc.abstractmethod
def add(self, key, value):
raise NotImplementedError
@abc.abstractmethod
def extend(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def popone(self, key, default=None):
raise KeyError
@abc.abstractmethod
def popall(self, key, default=None):
raise KeyError
| 1,190 | Python | 23.306122 | 76 | 0.670588 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multidict/_compat.py | import os
import platform
NO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS"))
PYPY = platform.python_implementation() == "PyPy"
USE_EXTENSIONS = not NO_EXTENSIONS and not PYPY
if USE_EXTENSIONS:
try:
from . import _multidict # noqa
except ImportError:
USE_EXTENSIONS = False
| 316 | Python | 20.133332 | 63 | 0.699367 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multidict/__init__.py | """Multidict implementation.
HTTP Headers and URL query string require specific data structure:
multidict. It behaves mostly like a dict but it can have
several values for the same key.
"""
from ._abc import MultiMapping, MutableMultiMapping
from ._compat import USE_EXTENSIONS
__all__ = (
"MultiMapping",
"MutableMultiMapping",
"MultiDictProxy",
"CIMultiDictProxy",
"MultiDict",
"CIMultiDict",
"upstr",
"istr",
"getversion",
)
__version__ = "6.0.4"
try:
if not USE_EXTENSIONS:
raise ImportError
from ._multidict import (
CIMultiDict,
CIMultiDictProxy,
MultiDict,
MultiDictProxy,
getversion,
istr,
)
except ImportError: # pragma: no cover
from ._multidict_py import (
CIMultiDict,
CIMultiDictProxy,
MultiDict,
MultiDictProxy,
getversion,
istr,
)
upstr = istr
| 928 | Python | 17.959183 | 66 | 0.625 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multidict/_multidict_py.py | import sys
import types
from array import array
from collections import abc
from ._abc import MultiMapping, MutableMultiMapping
_marker = object()
if sys.version_info >= (3, 9):
GenericAlias = types.GenericAlias
else:
def GenericAlias(cls):
return cls
class istr(str):
"""Case insensitive str."""
__is_istr__ = True
upstr = istr # for relaxing backward compatibility problems
def getversion(md):
if not isinstance(md, _Base):
raise TypeError("Parameter should be multidict or proxy")
return md._impl._version
_version = array("Q", [0])
class _Impl:
__slots__ = ("_items", "_version")
def __init__(self):
self._items = []
self.incr_version()
def incr_version(self):
global _version
v = _version
v[0] += 1
self._version = v[0]
if sys.implementation.name != "pypy":
def __sizeof__(self):
return object.__sizeof__(self) + sys.getsizeof(self._items)
class _Base:
def _title(self, key):
return key
def getall(self, key, default=_marker):
"""Return a list of all values matching the key."""
identity = self._title(key)
res = [v for i, k, v in self._impl._items if i == identity]
if res:
return res
if not res and default is not _marker:
return default
raise KeyError("Key not found: %r" % key)
def getone(self, key, default=_marker):
"""Get first value matching the key.
Raises KeyError if the key is not found and no default is provided.
"""
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return v
if default is not _marker:
return default
raise KeyError("Key not found: %r" % key)
# Mapping interface #
def __getitem__(self, key):
return self.getone(key)
def get(self, key, default=None):
"""Get first value matching the key.
If the key is not found, returns the default (or None if no default is provided)
"""
return self.getone(key, default)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._impl._items)
def keys(self):
"""Return a new view of the dictionary's keys."""
return _KeysView(self._impl)
def items(self):
"""Return a new view of the dictionary's items *(key, value) pairs)."""
return _ItemsView(self._impl)
def values(self):
"""Return a new view of the dictionary's values."""
return _ValuesView(self._impl)
def __eq__(self, other):
if not isinstance(other, abc.Mapping):
return NotImplemented
if isinstance(other, _Base):
lft = self._impl._items
rht = other._impl._items
if len(lft) != len(rht):
return False
for (i1, k2, v1), (i2, k2, v2) in zip(lft, rht):
if i1 != i2 or v1 != v2:
return False
return True
if len(self._impl._items) != len(other):
return False
for k, v in self.items():
nv = other.get(k, _marker)
if v != nv:
return False
return True
def __contains__(self, key):
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return True
return False
def __repr__(self):
body = ", ".join("'{}': {!r}".format(k, v) for k, v in self.items())
return "<{}({})>".format(self.__class__.__name__, body)
__class_getitem__ = classmethod(GenericAlias)
class MultiDictProxy(_Base, MultiMapping):
"""Read-only proxy for MultiDict instance."""
def __init__(self, arg):
if not isinstance(arg, (MultiDict, MultiDictProxy)):
raise TypeError(
"ctor requires MultiDict or MultiDictProxy instance"
", not {}".format(type(arg))
)
self._impl = arg._impl
def __reduce__(self):
raise TypeError("can't pickle {} objects".format(self.__class__.__name__))
def copy(self):
"""Return a copy of itself."""
return MultiDict(self.items())
class CIMultiDictProxy(MultiDictProxy):
"""Read-only proxy for CIMultiDict instance."""
def __init__(self, arg):
if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):
raise TypeError(
"ctor requires CIMultiDict or CIMultiDictProxy instance"
", not {}".format(type(arg))
)
self._impl = arg._impl
def _title(self, key):
return key.title()
def copy(self):
"""Return a copy of itself."""
return CIMultiDict(self.items())
class MultiDict(_Base, MutableMultiMapping):
"""Dictionary with the support for duplicate keys."""
def __init__(self, *args, **kwargs):
self._impl = _Impl()
self._extend(args, kwargs, self.__class__.__name__, self._extend_items)
if sys.implementation.name != "pypy":
def __sizeof__(self):
return object.__sizeof__(self) + sys.getsizeof(self._impl)
def __reduce__(self):
return (self.__class__, (list(self.items()),))
def _title(self, key):
return key
def _key(self, key):
if isinstance(key, str):
return key
else:
raise TypeError(
"MultiDict keys should be either str " "or subclasses of str"
)
def add(self, key, value):
identity = self._title(key)
self._impl._items.append((identity, self._key(key), value))
self._impl.incr_version()
def copy(self):
"""Return a copy of itself."""
cls = self.__class__
return cls(self.items())
__copy__ = copy
def extend(self, *args, **kwargs):
"""Extend current MultiDict with more values.
This method must be used instead of update.
"""
self._extend(args, kwargs, "extend", self._extend_items)
def _extend(self, args, kwargs, name, method):
if len(args) > 1:
raise TypeError(
"{} takes at most 1 positional argument"
" ({} given)".format(name, len(args))
)
if args:
arg = args[0]
if isinstance(args[0], (MultiDict, MultiDictProxy)) and not kwargs:
items = arg._impl._items
else:
if hasattr(arg, "items"):
arg = arg.items()
if kwargs:
arg = list(arg)
arg.extend(list(kwargs.items()))
items = []
for item in arg:
if not len(item) == 2:
raise TypeError(
"{} takes either dict or list of (key, value) "
"tuples".format(name)
)
items.append((self._title(item[0]), self._key(item[0]), item[1]))
method(items)
else:
method(
[
(self._title(key), self._key(key), value)
for key, value in kwargs.items()
]
)
def _extend_items(self, items):
for identity, key, value in items:
self.add(key, value)
def clear(self):
"""Remove all items from MultiDict."""
self._impl._items.clear()
self._impl.incr_version()
# Mapping interface #
def __setitem__(self, key, value):
self._replace(key, value)
def __delitem__(self, key):
identity = self._title(key)
items = self._impl._items
found = False
for i in range(len(items) - 1, -1, -1):
if items[i][0] == identity:
del items[i]
found = True
if not found:
raise KeyError(key)
else:
self._impl.incr_version()
def setdefault(self, key, default=None):
"""Return value for key, set value to default if key is not present."""
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return v
self.add(key, default)
return default
def popone(self, key, default=_marker):
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
"""
identity = self._title(key)
for i in range(len(self._impl._items)):
if self._impl._items[i][0] == identity:
value = self._impl._items[i][2]
del self._impl._items[i]
self._impl.incr_version()
return value
if default is _marker:
raise KeyError(key)
else:
return default
pop = popone # type: ignore
def popall(self, key, default=_marker):
"""Remove all occurrences of key and return the list of corresponding
values.
If key is not found, default is returned if given, otherwise
KeyError is raised.
"""
found = False
identity = self._title(key)
ret = []
for i in range(len(self._impl._items) - 1, -1, -1):
item = self._impl._items[i]
if item[0] == identity:
ret.append(item[2])
del self._impl._items[i]
self._impl.incr_version()
found = True
if not found:
if default is _marker:
raise KeyError(key)
else:
return default
else:
ret.reverse()
return ret
def popitem(self):
"""Remove and return an arbitrary (key, value) pair."""
if self._impl._items:
i = self._impl._items.pop(0)
self._impl.incr_version()
return i[1], i[2]
else:
raise KeyError("empty multidict")
def update(self, *args, **kwargs):
"""Update the dictionary from *other*, overwriting existing keys."""
self._extend(args, kwargs, "update", self._update_items)
def _update_items(self, items):
if not items:
return
used_keys = {}
for identity, key, value in items:
start = used_keys.get(identity, 0)
for i in range(start, len(self._impl._items)):
item = self._impl._items[i]
if item[0] == identity:
used_keys[identity] = i + 1
self._impl._items[i] = (identity, key, value)
break
else:
self._impl._items.append((identity, key, value))
used_keys[identity] = len(self._impl._items)
# drop tails
i = 0
while i < len(self._impl._items):
item = self._impl._items[i]
identity = item[0]
pos = used_keys.get(identity)
if pos is None:
i += 1
continue
if i >= pos:
del self._impl._items[i]
else:
i += 1
self._impl.incr_version()
def _replace(self, key, value):
key = self._key(key)
identity = self._title(key)
items = self._impl._items
for i in range(len(items)):
item = items[i]
if item[0] == identity:
items[i] = (identity, key, value)
# i points to last found item
rgt = i
self._impl.incr_version()
break
else:
self._impl._items.append((identity, key, value))
self._impl.incr_version()
return
# remove all tail items
i = rgt + 1
while i < len(items):
item = items[i]
if item[0] == identity:
del items[i]
else:
i += 1
class CIMultiDict(MultiDict):
"""Dictionary with the support for duplicate case-insensitive keys."""
def _title(self, key):
return key.title()
class _Iter:
__slots__ = ("_size", "_iter")
def __init__(self, size, iterator):
self._size = size
self._iter = iterator
def __iter__(self):
return self
def __next__(self):
return next(self._iter)
def __length_hint__(self):
return self._size
class _ViewBase:
def __init__(self, impl):
self._impl = impl
def __len__(self):
return len(self._impl._items)
class _ItemsView(_ViewBase, abc.ItemsView):
def __contains__(self, item):
assert isinstance(item, tuple) or isinstance(item, list)
assert len(item) == 2
for i, k, v in self._impl._items:
if item[0] == k and item[1] == v:
return True
return False
def __iter__(self):
return _Iter(len(self), self._iter(self._impl._version))
def _iter(self, version):
for i, k, v in self._impl._items:
if version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield k, v
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}: {!r}".format(item[1], item[2]))
body = ", ".join(lst)
return "{}({})".format(self.__class__.__name__, body)
class _ValuesView(_ViewBase, abc.ValuesView):
def __contains__(self, value):
for item in self._impl._items:
if item[2] == value:
return True
return False
def __iter__(self):
return _Iter(len(self), self._iter(self._impl._version))
def _iter(self, version):
for item in self._impl._items:
if version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield item[2]
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}".format(item[2]))
body = ", ".join(lst)
return "{}({})".format(self.__class__.__name__, body)
class _KeysView(_ViewBase, abc.KeysView):
def __contains__(self, key):
for item in self._impl._items:
if item[1] == key:
return True
return False
def __iter__(self):
return _Iter(len(self), self._iter(self._impl._version))
def _iter(self, version):
for item in self._impl._items:
if version != self._impl._version:
raise RuntimeError("Dictionary changed during iteration")
yield item[1]
def __repr__(self):
lst = []
for item in self._impl._items:
lst.append("{!r}".format(item[1]))
body = ", ".join(lst)
return "{}({})".format(self.__class__.__name__, body)
| 15,049 | Python | 27.557875 | 88 | 0.511994 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/multidict/_multidict_base.py | from collections.abc import ItemsView, Iterable, KeysView, Set, ValuesView
def _abc_itemsview_register(view_cls):
ItemsView.register(view_cls)
def _abc_keysview_register(view_cls):
KeysView.register(view_cls)
def _abc_valuesview_register(view_cls):
ValuesView.register(view_cls)
def _viewbaseset_richcmp(view, other, op):
if op == 0: # <
if not isinstance(other, Set):
return NotImplemented
return len(view) < len(other) and view <= other
elif op == 1: # <=
if not isinstance(other, Set):
return NotImplemented
if len(view) > len(other):
return False
for elem in view:
if elem not in other:
return False
return True
elif op == 2: # ==
if not isinstance(other, Set):
return NotImplemented
return len(view) == len(other) and view <= other
elif op == 3: # !=
return not view == other
elif op == 4: # >
if not isinstance(other, Set):
return NotImplemented
return len(view) > len(other) and view >= other
elif op == 5: # >=
if not isinstance(other, Set):
return NotImplemented
if len(view) < len(other):
return False
for elem in other:
if elem not in view:
return False
return True
def _viewbaseset_and(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view & other
def _viewbaseset_or(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view | other
def _viewbaseset_sub(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view - other
def _viewbaseset_xor(view, other):
if not isinstance(other, Iterable):
return NotImplemented
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
other = set(iter(other))
if not isinstance(other, Set):
other = set(iter(other))
return view ^ other
def _itemsview_isdisjoint(view, other):
"Return True if two sets have a null intersection."
for v in other:
if v in view:
return False
return True
def _itemsview_repr(view):
lst = []
for k, v in view:
lst.append("{!r}: {!r}".format(k, v))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _keysview_isdisjoint(view, other):
"Return True if two sets have a null intersection."
for k in other:
if k in view:
return False
return True
def _keysview_repr(view):
lst = []
for k in view:
lst.append("{!r}".format(k))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _valuesview_repr(view):
lst = []
for v in view:
lst.append("{!r}".format(v))
body = ", ".join(lst)
return "{}({})".format(view.__class__.__name__, body)
def _mdrepr(md):
lst = []
for k, v in md.items():
lst.append("'{}': {!r}".format(k, v))
body = ", ".join(lst)
return "<{}({})>".format(md.__class__.__name__, body)
| 3,791 | Python | 25.151724 | 74 | 0.570562 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/async_timeout/__init__.py | import asyncio
import enum
import sys
import warnings
from types import TracebackType
from typing import Any, Optional, Type
if sys.version_info >= (3, 8):
from typing import final
else:
from typing_extensions import final
__version__ = "4.0.2"
__all__ = ("timeout", "timeout_at", "Timeout")
def timeout(delay: Optional[float]) -> "Timeout":
"""timeout context manager.
Useful in cases when you want to apply timeout logic around block
of code or in cases when asyncio.wait_for is not suitable. For example:
>>> async with timeout(0.001):
... async with aiohttp.get('https://github.com') as r:
... await r.text()
delay - value in seconds or None to disable timeout logic
"""
loop = _get_running_loop()
if delay is not None:
deadline = loop.time() + delay # type: Optional[float]
else:
deadline = None
return Timeout(deadline, loop)
def timeout_at(deadline: Optional[float]) -> "Timeout":
"""Schedule the timeout at absolute time.
deadline argument points on the time in the same clock system
as loop.time().
Please note: it is not POSIX time but a time with
undefined starting base, e.g. the time of the system power on.
>>> async with timeout_at(loop.time() + 10):
... async with aiohttp.get('https://github.com') as r:
... await r.text()
"""
loop = _get_running_loop()
return Timeout(deadline, loop)
class _State(enum.Enum):
INIT = "INIT"
ENTER = "ENTER"
TIMEOUT = "TIMEOUT"
EXIT = "EXIT"
@final
class Timeout:
# Internal class, please don't instantiate it directly
# Use timeout() and timeout_at() public factories instead.
#
# Implementation note: `async with timeout()` is preferred
# over `with timeout()`.
# While technically the Timeout class implementation
# doesn't need to be async at all,
# the `async with` statement explicitly points that
# the context manager should be used from async function context.
#
# This design allows to avoid many silly misusages.
#
# TimeoutError is raised immadiatelly when scheduled
# if the deadline is passed.
# The purpose is to time out as sson as possible
# without waiting for the next await expression.
__slots__ = ("_deadline", "_loop", "_state", "_timeout_handler")
def __init__(
self, deadline: Optional[float], loop: asyncio.AbstractEventLoop
) -> None:
self._loop = loop
self._state = _State.INIT
self._timeout_handler = None # type: Optional[asyncio.Handle]
if deadline is None:
self._deadline = None # type: Optional[float]
else:
self.update(deadline)
def __enter__(self) -> "Timeout":
warnings.warn(
"with timeout() is deprecated, use async with timeout() instead",
DeprecationWarning,
stacklevel=2,
)
self._do_enter()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self._do_exit(exc_type)
return None
async def __aenter__(self) -> "Timeout":
self._do_enter()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self._do_exit(exc_type)
return None
@property
def expired(self) -> bool:
"""Is timeout expired during execution?"""
return self._state == _State.TIMEOUT
@property
def deadline(self) -> Optional[float]:
return self._deadline
def reject(self) -> None:
"""Reject scheduled timeout if any."""
# cancel is maybe better name but
# task.cancel() raises CancelledError in asyncio world.
if self._state not in (_State.INIT, _State.ENTER):
raise RuntimeError(f"invalid state {self._state.value}")
self._reject()
def _reject(self) -> None:
if self._timeout_handler is not None:
self._timeout_handler.cancel()
self._timeout_handler = None
def shift(self, delay: float) -> None:
"""Advance timeout on delay seconds.
The delay can be negative.
Raise RuntimeError if shift is called when deadline is not scheduled
"""
deadline = self._deadline
if deadline is None:
raise RuntimeError("cannot shift timeout if deadline is not scheduled")
self.update(deadline + delay)
def update(self, deadline: float) -> None:
"""Set deadline to absolute value.
deadline argument points on the time in the same clock system
as loop.time().
If new deadline is in the past the timeout is raised immediatelly.
Please note: it is not POSIX time but a time with
undefined starting base, e.g. the time of the system power on.
"""
if self._state == _State.EXIT:
raise RuntimeError("cannot reschedule after exit from context manager")
if self._state == _State.TIMEOUT:
raise RuntimeError("cannot reschedule expired timeout")
if self._timeout_handler is not None:
self._timeout_handler.cancel()
self._deadline = deadline
if self._state != _State.INIT:
self._reschedule()
def _reschedule(self) -> None:
assert self._state == _State.ENTER
deadline = self._deadline
if deadline is None:
return
now = self._loop.time()
if self._timeout_handler is not None:
self._timeout_handler.cancel()
task = _current_task(self._loop)
if deadline <= now:
self._timeout_handler = self._loop.call_soon(self._on_timeout, task)
else:
self._timeout_handler = self._loop.call_at(deadline, self._on_timeout, task)
def _do_enter(self) -> None:
if self._state != _State.INIT:
raise RuntimeError(f"invalid state {self._state.value}")
self._state = _State.ENTER
self._reschedule()
def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None:
if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT:
self._timeout_handler = None
raise asyncio.TimeoutError
# timeout has not expired
self._state = _State.EXIT
self._reject()
return None
def _on_timeout(self, task: "asyncio.Task[None]") -> None:
task.cancel()
self._state = _State.TIMEOUT
# drop the reference early
self._timeout_handler = None
if sys.version_info >= (3, 7):
def _current_task(loop: asyncio.AbstractEventLoop) -> "Optional[asyncio.Task[Any]]":
return asyncio.current_task(loop=loop)
else:
def _current_task(loop: asyncio.AbstractEventLoop) -> "Optional[asyncio.Task[Any]]":
return asyncio.Task.current_task(loop=loop)
if sys.version_info >= (3, 7):
def _get_running_loop() -> asyncio.AbstractEventLoop:
return asyncio.get_running_loop()
else:
def _get_running_loop() -> asyncio.AbstractEventLoop:
loop = asyncio.get_event_loop()
if not loop.is_running():
raise RuntimeError("no running event loop")
return loop
| 7,487 | Python | 29.193548 | 88 | 0.612395 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/testing.py | import contextlib
import io
import os
import shlex
import shutil
import sys
import tempfile
import typing as t
from types import TracebackType
from . import formatting
from . import termui
from . import utils
from ._compat import _find_binary_reader
if t.TYPE_CHECKING:
from .core import BaseCommand
class EchoingStdin:
def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None:
self._input = input
self._output = output
self._paused = False
def __getattr__(self, x: str) -> t.Any:
return getattr(self._input, x)
def _echo(self, rv: bytes) -> bytes:
if not self._paused:
self._output.write(rv)
return rv
def read(self, n: int = -1) -> bytes:
return self._echo(self._input.read(n))
def read1(self, n: int = -1) -> bytes:
return self._echo(self._input.read1(n)) # type: ignore
def readline(self, n: int = -1) -> bytes:
return self._echo(self._input.readline(n))
def readlines(self) -> t.List[bytes]:
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self) -> t.Iterator[bytes]:
return iter(self._echo(x) for x in self._input)
def __repr__(self) -> str:
return repr(self._input)
@contextlib.contextmanager
def _pause_echo(stream: t.Optional[EchoingStdin]) -> t.Iterator[None]:
if stream is None:
yield
else:
stream._paused = True
yield
stream._paused = False
class _NamedTextIOWrapper(io.TextIOWrapper):
def __init__(
self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any
) -> None:
super().__init__(buffer, **kwargs)
self._name = name
self._mode = mode
@property
def name(self) -> str:
return self._name
@property
def mode(self) -> str:
return self._mode
def make_input_stream(
input: t.Optional[t.Union[str, bytes, t.IO]], charset: str
) -> t.BinaryIO:
# Is already an input stream.
if hasattr(input, "read"):
rv = _find_binary_reader(t.cast(t.IO, input))
if rv is not None:
return rv
raise TypeError("Could not find binary reader for input stream.")
if input is None:
input = b""
elif isinstance(input, str):
input = input.encode(charset)
return io.BytesIO(t.cast(bytes, input))
class Result:
"""Holds the captured result of an invoked CLI script."""
def __init__(
self,
runner: "CliRunner",
stdout_bytes: bytes,
stderr_bytes: t.Optional[bytes],
return_value: t.Any,
exit_code: int,
exception: t.Optional[BaseException],
exc_info: t.Optional[
t.Tuple[t.Type[BaseException], BaseException, TracebackType]
] = None,
):
#: The runner that created the result
self.runner = runner
#: The standard output as bytes.
self.stdout_bytes = stdout_bytes
#: The standard error as bytes, or None if not available
self.stderr_bytes = stderr_bytes
#: The value returned from the invoked command.
#:
#: .. versionadded:: 8.0
self.return_value = return_value
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happened if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self) -> str:
"""The (standard) output as unicode string."""
return self.stdout
@property
def stdout(self) -> str:
"""The standard output as unicode string."""
return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
@property
def stderr(self) -> str:
"""The standard error as unicode string."""
if self.stderr_bytes is None:
raise ValueError("stderr not separately captured")
return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
def __repr__(self) -> str:
exc_str = repr(self.exception) if self.exception else "okay"
return f"<{type(self).__name__} {exc_str}>"
class CliRunner:
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
:param mix_stderr: if this is set to `False`, then stdout and stderr are
preserved as independent streams. This is useful for
Unix-philosophy apps that have predictable stdout and
noisy stderr, such that each may be measured
independently
"""
def __init__(
self,
charset: str = "utf-8",
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
echo_stdin: bool = False,
mix_stderr: bool = True,
) -> None:
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
self.mix_stderr = mix_stderr
def get_default_prog_name(self, cli: "BaseCommand") -> str:
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or "root"
def make_env(
self, overrides: t.Optional[t.Mapping[str, t.Optional[str]]] = None
) -> t.Mapping[str, t.Optional[str]]:
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(
self,
input: t.Optional[t.Union[str, bytes, t.IO]] = None,
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
color: bool = False,
) -> t.Iterator[t.Tuple[io.BytesIO, t.Optional[io.BytesIO]]]:
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionchanged:: 8.0
``stderr`` is opened with ``errors="backslashreplace"``
instead of the default ``"strict"``.
.. versionchanged:: 4.0
Added the ``color`` parameter.
"""
bytes_input = make_input_stream(input, self.charset)
echo_input = None
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = formatting.FORCED_WIDTH
formatting.FORCED_WIDTH = 80
env = self.make_env(env)
bytes_output = io.BytesIO()
if self.echo_stdin:
bytes_input = echo_input = t.cast(
t.BinaryIO, EchoingStdin(bytes_input, bytes_output)
)
sys.stdin = text_input = _NamedTextIOWrapper(
bytes_input, encoding=self.charset, name="<stdin>", mode="r"
)
if self.echo_stdin:
# Force unbuffered reads, otherwise TextIOWrapper reads a
# large chunk which is echoed early.
text_input._CHUNK_SIZE = 1 # type: ignore
sys.stdout = _NamedTextIOWrapper(
bytes_output, encoding=self.charset, name="<stdout>", mode="w"
)
bytes_error = None
if self.mix_stderr:
sys.stderr = sys.stdout
else:
bytes_error = io.BytesIO()
sys.stderr = _NamedTextIOWrapper(
bytes_error,
encoding=self.charset,
name="<stderr>",
mode="w",
errors="backslashreplace",
)
@_pause_echo(echo_input) # type: ignore
def visible_input(prompt: t.Optional[str] = None) -> str:
sys.stdout.write(prompt or "")
val = text_input.readline().rstrip("\r\n")
sys.stdout.write(f"{val}\n")
sys.stdout.flush()
return val
@_pause_echo(echo_input) # type: ignore
def hidden_input(prompt: t.Optional[str] = None) -> str:
sys.stdout.write(f"{prompt or ''}\n")
sys.stdout.flush()
return text_input.readline().rstrip("\r\n")
@_pause_echo(echo_input) # type: ignore
def _getchar(echo: bool) -> str:
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(
stream: t.Optional[t.IO] = None, color: t.Optional[bool] = None
) -> bool:
if color is None:
return not default_color
return not color
old_visible_prompt_func = termui.visible_prompt_func
old_hidden_prompt_func = termui.hidden_prompt_func
old__getchar_func = termui._getchar
old_should_strip_ansi = utils.should_strip_ansi # type: ignore
termui.visible_prompt_func = visible_input
termui.hidden_prompt_func = hidden_input
termui._getchar = _getchar
utils.should_strip_ansi = should_strip_ansi # type: ignore
old_env = {}
try:
for key, value in env.items():
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield (bytes_output, bytes_error)
finally:
for key, value in old_env.items():
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
termui.visible_prompt_func = old_visible_prompt_func
termui.hidden_prompt_func = old_hidden_prompt_func
termui._getchar = old__getchar_func
utils.should_strip_ansi = old_should_strip_ansi # type: ignore
formatting.FORCED_WIDTH = old_forced_width
def invoke(
self,
cli: "BaseCommand",
args: t.Optional[t.Union[str, t.Sequence[str]]] = None,
input: t.Optional[t.Union[str, bytes, t.IO]] = None,
env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
catch_exceptions: bool = True,
color: bool = False,
**extra: t.Any,
) -> Result:
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
:param cli: the command to invoke
:param args: the arguments to invoke. It may be given as an iterable
or a string. When given as string it will be interpreted
as a Unix shell command. More details at
:func:`shlex.split`.
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
.. versionchanged:: 8.0
The result object has the ``return_value`` attribute with
the value returned from the invoked command.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionchanged:: 3.0
Added the ``catch_exceptions`` parameter.
.. versionchanged:: 3.0
The result object has the ``exc_info`` attribute with the
traceback if available.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as outstreams:
return_value = None
exception: t.Optional[BaseException] = None
exit_code = 0
if isinstance(args, str):
args = shlex.split(args)
try:
prog_name = extra.pop("prog_name")
except KeyError:
prog_name = self.get_default_prog_name(cli)
try:
return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
except SystemExit as e:
exc_info = sys.exc_info()
e_code = t.cast(t.Optional[t.Union[int, t.Any]], e.code)
if e_code is None:
e_code = 0
if e_code != 0:
exception = e
if not isinstance(e_code, int):
sys.stdout.write(str(e_code))
sys.stdout.write("\n")
e_code = 1
exit_code = e_code
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = 1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
stdout = outstreams[0].getvalue()
if self.mix_stderr:
stderr = None
else:
stderr = outstreams[1].getvalue() # type: ignore
return Result(
runner=self,
stdout_bytes=stdout,
stderr_bytes=stderr,
return_value=return_value,
exit_code=exit_code,
exception=exception,
exc_info=exc_info, # type: ignore
)
@contextlib.contextmanager
def isolated_filesystem(
self, temp_dir: t.Optional[t.Union[str, os.PathLike]] = None
) -> t.Iterator[str]:
"""A context manager that creates a temporary directory and
changes the current working directory to it. This isolates tests
that affect the contents of the CWD to prevent them from
interfering with each other.
:param temp_dir: Create the temporary directory under this
directory. If given, the created directory is not removed
when exiting.
.. versionchanged:: 8.0
Added the ``temp_dir`` parameter.
"""
cwd = os.getcwd()
dt = tempfile.mkdtemp(dir=temp_dir) # type: ignore[type-var]
os.chdir(dt)
try:
yield t.cast(str, dt)
finally:
os.chdir(cwd)
if temp_dir is None:
try:
shutil.rmtree(dt)
except OSError: # noqa: B014
pass
| 16,063 | Python | 32.466667 | 86 | 0.557057 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/globals.py | import typing as t
from threading import local
if t.TYPE_CHECKING:
import typing_extensions as te
from .core import Context
_local = local()
@t.overload
def get_current_context(silent: "te.Literal[False]" = False) -> "Context":
...
@t.overload
def get_current_context(silent: bool = ...) -> t.Optional["Context"]:
...
def get_current_context(silent: bool = False) -> t.Optional["Context"]:
"""Returns the current click context. This can be used as a way to
access the current context object from anywhere. This is a more implicit
alternative to the :func:`pass_context` decorator. This function is
primarily useful for helpers such as :func:`echo` which might be
interested in changing its behavior based on the current context.
To push the current context, :meth:`Context.scope` can be used.
.. versionadded:: 5.0
:param silent: if set to `True` the return value is `None` if no context
is available. The default behavior is to raise a
:exc:`RuntimeError`.
"""
try:
return t.cast("Context", _local.stack[-1])
except (AttributeError, IndexError) as e:
if not silent:
raise RuntimeError("There is no active click context.") from e
return None
def push_context(ctx: "Context") -> None:
"""Pushes a new context to the current stack."""
_local.__dict__.setdefault("stack", []).append(ctx)
def pop_context() -> None:
"""Removes the top level from the stack."""
_local.stack.pop()
def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]:
"""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
return None
| 1,961 | Python | 27.434782 | 78 | 0.654768 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/shell_completion.py | import os
import re
import typing as t
from gettext import gettext as _
from .core import Argument
from .core import BaseCommand
from .core import Context
from .core import MultiCommand
from .core import Option
from .core import Parameter
from .core import ParameterSource
from .parser import split_arg_string
from .utils import echo
def shell_complete(
cli: BaseCommand,
ctx_args: t.Dict[str, t.Any],
prog_name: str,
complete_var: str,
instruction: str,
) -> int:
"""Perform shell completion for the given CLI program.
:param cli: Command being called.
:param ctx_args: Extra arguments to pass to
``cli.make_context``.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
:param instruction: Value of ``complete_var`` with the completion
instruction and shell, in the form ``instruction_shell``.
:return: Status code to exit with.
"""
shell, _, instruction = instruction.partition("_")
comp_cls = get_completion_class(shell)
if comp_cls is None:
return 1
comp = comp_cls(cli, ctx_args, prog_name, complete_var)
if instruction == "source":
echo(comp.source())
return 0
if instruction == "complete":
echo(comp.complete())
return 0
return 1
class CompletionItem:
"""Represents a completion value and metadata about the value. The
default metadata is ``type`` to indicate special shell handling,
and ``help`` if a shell supports showing a help string next to the
value.
Arbitrary parameters can be passed when creating the object, and
accessed using ``item.attr``. If an attribute wasn't passed,
accessing it returns ``None``.
:param value: The completion suggestion.
:param type: Tells the shell script to provide special completion
support for the type. Click uses ``"dir"`` and ``"file"``.
:param help: String shown next to the value if supported.
:param kwargs: Arbitrary metadata. The built-in implementations
don't use this, but custom type completions paired with custom
shell support could use it.
"""
__slots__ = ("value", "type", "help", "_info")
def __init__(
self,
value: t.Any,
type: str = "plain",
help: t.Optional[str] = None,
**kwargs: t.Any,
) -> None:
self.value = value
self.type = type
self.help = help
self._info = kwargs
def __getattr__(self, name: str) -> t.Any:
return self._info.get(name)
# Only Bash >= 4.4 has the nosort option.
_SOURCE_BASH = """\
%(complete_func)s() {
local IFS=$'\\n'
local response
response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
%(complete_var)s=bash_complete $1)
for completion in $response; do
IFS=',' read type value <<< "$completion"
if [[ $type == 'dir' ]]; then
COMPREPLY=()
compopt -o dirnames
elif [[ $type == 'file' ]]; then
COMPREPLY=()
compopt -o default
elif [[ $type == 'plain' ]]; then
COMPREPLY+=($value)
fi
done
return 0
}
%(complete_func)s_setup() {
complete -o nosort -F %(complete_func)s %(prog_name)s
}
%(complete_func)s_setup;
"""
_SOURCE_ZSH = """\
#compdef %(prog_name)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(prog_name)s] )) && return 1
response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \
%(complete_var)s=zsh_complete %(prog_name)s)}")
for type key descr in ${response}; do
if [[ "$type" == "plain" ]]; then
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
elif [[ "$type" == "dir" ]]; then
_path_files -/
elif [[ "$type" == "file" ]]; then
_path_files -f
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
}
compdef %(complete_func)s %(prog_name)s;
"""
_SOURCE_FISH = """\
function %(complete_func)s;
set -l response;
for value in (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \
COMP_CWORD=(commandline -t) %(prog_name)s);
set response $response $value;
end;
for completion in $response;
set -l metadata (string split "," $completion);
if test $metadata[1] = "dir";
__fish_complete_directories $metadata[2];
else if test $metadata[1] = "file";
__fish_complete_path $metadata[2];
else if test $metadata[1] = "plain";
echo $metadata[2];
end;
end;
end;
complete --no-files --command %(prog_name)s --arguments \
"(%(complete_func)s)";
"""
class ShellComplete:
"""Base class for providing shell completion support. A subclass for
a given shell will override attributes and methods to implement the
completion instructions (``source`` and ``complete``).
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
.. versionadded:: 8.0
"""
name: t.ClassVar[str]
"""Name to register the shell as with :func:`add_completion_class`.
This is used in completion instructions (``{name}_source`` and
``{name}_complete``).
"""
source_template: t.ClassVar[str]
"""Completion script template formatted by :meth:`source`. This must
be provided by subclasses.
"""
def __init__(
self,
cli: BaseCommand,
ctx_args: t.Dict[str, t.Any],
prog_name: str,
complete_var: str,
) -> None:
self.cli = cli
self.ctx_args = ctx_args
self.prog_name = prog_name
self.complete_var = complete_var
@property
def func_name(self) -> str:
"""The name of the shell function defined by the completion
script.
"""
safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), re.ASCII)
return f"_{safe_name}_completion"
def source_vars(self) -> t.Dict[str, t.Any]:
"""Vars for formatting :attr:`source_template`.
By default this provides ``complete_func``, ``complete_var``,
and ``prog_name``.
"""
return {
"complete_func": self.func_name,
"complete_var": self.complete_var,
"prog_name": self.prog_name,
}
def source(self) -> str:
"""Produce the shell script that defines the completion
function. By default this ``%``-style formats
:attr:`source_template` with the dict returned by
:meth:`source_vars`.
"""
return self.source_template % self.source_vars()
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
"""Use the env vars defined by the shell script to return a
tuple of ``args, incomplete``. This must be implemented by
subclasses.
"""
raise NotImplementedError
def get_completions(
self, args: t.List[str], incomplete: str
) -> t.List[CompletionItem]:
"""Determine the context and last complete command or parameter
from the complete args. Call that object's ``shell_complete``
method to get the completions for the incomplete value.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
return obj.shell_complete(ctx, incomplete)
def format_completion(self, item: CompletionItem) -> str:
"""Format a completion item into the form recognized by the
shell script. This must be implemented by subclasses.
:param item: Completion item to format.
"""
raise NotImplementedError
def complete(self) -> str:
"""Produce the completion data to send back to the shell.
By default this calls :meth:`get_completion_args`, gets the
completions, then calls :meth:`format_completion` for each
completion.
"""
args, incomplete = self.get_completion_args()
completions = self.get_completions(args, incomplete)
out = [self.format_completion(item) for item in completions]
return "\n".join(out)
class BashComplete(ShellComplete):
"""Shell completion for Bash."""
name = "bash"
source_template = _SOURCE_BASH
def _check_version(self) -> None:
import subprocess
output = subprocess.run(
["bash", "-c", "echo ${BASH_VERSION}"], stdout=subprocess.PIPE
)
match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode())
if match is not None:
major, minor = match.groups()
if major < "4" or major == "4" and minor < "4":
raise RuntimeError(
_(
"Shell completion is not supported for Bash"
" versions older than 4.4."
)
)
else:
raise RuntimeError(
_("Couldn't detect Bash version, shell completion is not supported.")
)
def source(self) -> str:
self._check_version()
return super().source()
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
return f"{item.type},{item.value}"
class ZshComplete(ShellComplete):
"""Shell completion for Zsh."""
name = "zsh"
source_template = _SOURCE_ZSH
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}"
class FishComplete(ShellComplete):
"""Shell completion for Fish."""
name = "fish"
source_template = _SOURCE_FISH
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
args = cwords[1:]
# Fish stores the partial word in both COMP_WORDS and
# COMP_CWORD, remove it from complete args.
if incomplete and args and args[-1] == incomplete:
args.pop()
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
if item.help:
return f"{item.type},{item.value}\t{item.help}"
return f"{item.type},{item.value}"
_available_shells: t.Dict[str, t.Type[ShellComplete]] = {
"bash": BashComplete,
"fish": FishComplete,
"zsh": ZshComplete,
}
def add_completion_class(
cls: t.Type[ShellComplete], name: t.Optional[str] = None
) -> None:
"""Register a :class:`ShellComplete` subclass under the given name.
The name will be provided by the completion instruction environment
variable during completion.
:param cls: The completion class that will handle completion for the
shell.
:param name: Name to register the class under. Defaults to the
class's ``name`` attribute.
"""
if name is None:
name = cls.name
_available_shells[name] = cls
def get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]:
"""Look up a registered :class:`ShellComplete` subclass by the name
provided by the completion instruction environment variable. If the
name isn't registered, returns ``None``.
:param shell: Name the class is registered under.
"""
return _available_shells.get(shell)
def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:
"""Determine if the given parameter is an argument that can still
accept values.
:param ctx: Invocation context for the command represented by the
parsed complete args.
:param param: Argument object being checked.
"""
if not isinstance(param, Argument):
return False
assert param.name is not None
value = ctx.params[param.name]
return (
param.nargs == -1
or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE
or (
param.nargs > 1
and isinstance(value, (tuple, list))
and len(value) < param.nargs
)
)
def _start_of_option(ctx: Context, value: str) -> bool:
"""Check if the value looks like the start of an option."""
if not value:
return False
c = value[0]
return c in ctx._opt_prefixes
def _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool:
"""Determine if the given parameter is an option that needs a value.
:param args: List of complete args before the incomplete value.
:param param: Option object being checked.
"""
if not isinstance(param, Option):
return False
if param.is_flag or param.count:
return False
last_option = None
for index, arg in enumerate(reversed(args)):
if index + 1 > param.nargs:
break
if _start_of_option(ctx, arg):
last_option = arg
return last_option is not None and last_option in param.opts
def _resolve_context(
cli: BaseCommand, ctx_args: t.Dict[str, t.Any], prog_name: str, args: t.List[str]
) -> Context:
"""Produce the context hierarchy starting with the command and
traversing the complete arguments. This only follows the commands,
it doesn't trigger input prompts or callbacks.
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param args: List of complete args before the incomplete value.
"""
ctx_args["resilient_parsing"] = True
ctx = cli.make_context(prog_name, args.copy(), **ctx_args)
args = ctx.protected_args + ctx.args
while args:
command = ctx.command
if isinstance(command, MultiCommand):
if not command.chain:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True)
args = ctx.protected_args + ctx.args
else:
while args:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(
name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
)
args = sub_ctx.args
ctx = sub_ctx
args = [*sub_ctx.protected_args, *sub_ctx.args]
else:
break
return ctx
def _resolve_incomplete(
ctx: Context, args: t.List[str], incomplete: str
) -> t.Tuple[t.Union[BaseCommand, Parameter], str]:
"""Find the Click object that will handle the completion of the
incomplete value. Return the object and the incomplete value.
:param ctx: Invocation context for the command represented by
the parsed complete args.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
# Different shells treat an "=" between a long option name and
# value differently. Might keep the value joined, return the "="
# as a separate item, or return the split name and value. Always
# split and discard the "=" to make completion easier.
if incomplete == "=":
incomplete = ""
elif "=" in incomplete and _start_of_option(ctx, incomplete):
name, _, incomplete = incomplete.partition("=")
args.append(name)
# The "--" marker tells Click to stop treating values as options
# even if they start with the option character. If it hasn't been
# given and the incomplete arg looks like an option, the current
# command will provide option name completions.
if "--" not in args and _start_of_option(ctx, incomplete):
return ctx.command, incomplete
params = ctx.command.get_params(ctx)
# If the last complete arg is an option name with an incomplete
# value, the option will provide value completions.
for param in params:
if _is_incomplete_option(ctx, args, param):
return param, incomplete
# It's not an option name or value. The first argument without a
# parsed value will provide value completions.
for param in params:
if _is_incomplete_argument(ctx, param):
return param, incomplete
# There were no unparsed arguments, the command may be a group that
# will provide command name completions.
return ctx.command, incomplete
| 18,018 | Python | 30.013769 | 86 | 0.605506 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/exceptions.py | import os
import typing as t
from gettext import gettext as _
from gettext import ngettext
from ._compat import get_text_stderr
from .utils import echo
if t.TYPE_CHECKING:
from .core import Context
from .core import Parameter
def _join_param_hints(
param_hint: t.Optional[t.Union[t.Sequence[str], str]]
) -> t.Optional[str]:
if param_hint is not None and not isinstance(param_hint, str):
return " / ".join(repr(x) for x in param_hint)
return param_hint
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception.
exit_code = 1
def __init__(self, message: str) -> None:
super().__init__(message)
self.message = message
def format_message(self) -> str:
return self.message
def __str__(self) -> str:
return self.message
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
echo(_("Error: {message}").format(message=self.format_message()), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None:
super().__init__(message)
self.ctx = ctx
self.cmd = self.ctx.command if self.ctx else None
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
color = None
hint = ""
if (
self.ctx is not None
and self.ctx.command.get_help_option(self.ctx) is not None
):
hint = _("Try '{command} {option}' for help.").format(
command=self.ctx.command_path, option=self.ctx.help_option_names[0]
)
hint = f"{hint}\n"
if self.ctx is not None:
color = self.ctx.color
echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
echo(
_("Error: {message}").format(message=self.format_message()),
file=file,
color=color,
)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(
self,
message: str,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
) -> None:
super().__init__(message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self) -> str:
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
return _("Invalid value: {message}").format(message=self.message)
return _("Invalid value for {param_hint}: {message}").format(
param_hint=_join_param_hints(param_hint), message=self.message
)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(
self,
message: t.Optional[str] = None,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
param_type: t.Optional[str] = None,
) -> None:
super().__init__(message or "", ctx, param, param_hint)
self.param_type = param_type
def format_message(self) -> str:
if self.param_hint is not None:
param_hint: t.Optional[str] = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_hint = f" {param_hint}" if param_hint else ""
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += f". {msg_extra}"
else:
msg = msg_extra
msg = f" {msg}" if msg else ""
# Translate param_type for known types.
if param_type == "argument":
missing = _("Missing argument")
elif param_type == "option":
missing = _("Missing option")
elif param_type == "parameter":
missing = _("Missing parameter")
else:
missing = _("Missing {param_type}").format(param_type=param_type)
return f"{missing}{param_hint}.{msg}"
def __str__(self) -> str:
if not self.message:
param_name = self.param.name if self.param else None
return _("Missing parameter: {param_name}").format(param_name=param_name)
else:
return self.message
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(
self,
option_name: str,
message: t.Optional[str] = None,
possibilities: t.Optional[t.Sequence[str]] = None,
ctx: t.Optional["Context"] = None,
) -> None:
if message is None:
message = _("No such option: {name}").format(name=option_name)
super().__init__(message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self) -> str:
if not self.possibilities:
return self.message
possibility_str = ", ".join(sorted(self.possibilities))
suggest = ngettext(
"Did you mean {possibility}?",
"(Possible options: {possibilities})",
len(self.possibilities),
).format(possibility=possibility_str, possibilities=possibility_str)
return f"{self.message} {suggest}"
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(
self, option_name: str, message: str, ctx: t.Optional["Context"] = None
) -> None:
super().__init__(message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename: str, hint: t.Optional[str] = None) -> None:
if hint is None:
hint = _("unknown error")
super().__init__(hint)
self.ui_filename = os.fsdecode(filename)
self.filename = filename
def format_message(self) -> str:
return _("Could not open file {filename!r}: {message}").format(
filename=self.ui_filename, message=self.message
)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
class Exit(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
__slots__ = ("exit_code",)
def __init__(self, code: int = 0) -> None:
self.exit_code = code
| 9,167 | Python | 30.833333 | 85 | 0.586997 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/parser.py | """
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
Click uses parts of optparse written by Gregory P. Ward and maintained
by the Python Software Foundation. This is limited to code in parser.py.
Copyright 2001-2006 Gregory P. Ward. All rights reserved.
Copyright 2002-2006 Python Software Foundation. All rights reserved.
"""
# This code uses parts of optparse written by Gregory P. Ward and
# maintained by the Python Software Foundation.
# Copyright 2001-2006 Gregory P. Ward
# Copyright 2002-2006 Python Software Foundation
import typing as t
from collections import deque
from gettext import gettext as _
from gettext import ngettext
from .exceptions import BadArgumentUsage
from .exceptions import BadOptionUsage
from .exceptions import NoSuchOption
from .exceptions import UsageError
if t.TYPE_CHECKING:
import typing_extensions as te
from .core import Argument as CoreArgument
from .core import Context
from .core import Option as CoreOption
from .core import Parameter as CoreParameter
V = t.TypeVar("V")
# Sentinel value that indicates an option was passed as a flag without a
# value but is not a flag option. Option.consume_value uses this to
# prompt or use the flag_value.
_flag_needs_value = object()
def _unpack_args(
args: t.Sequence[str], nargs_spec: t.Sequence[int]
) -> t.Tuple[t.Sequence[t.Union[str, t.Sequence[t.Optional[str]], None]], t.List[str]]:
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv: t.List[t.Union[str, t.Tuple[t.Optional[str], ...], None]] = []
spos: t.Optional[int] = None
def _fetch(c: "te.Deque[V]") -> t.Optional[V]:
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs is None:
continue
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError("Cannot have two nargs < 0")
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1 :] = reversed(rv[spos + 1 :])
return tuple(rv), list(args)
def split_opt(opt: str) -> t.Tuple[str, str]:
first = opt[:1]
if first.isalnum():
return "", opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt: str, ctx: t.Optional["Context"]) -> str:
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return f"{prefix}{ctx.token_normalize_func(opt)}"
def split_arg_string(string: str) -> t.List[str]:
"""Split an argument string as with :func:`shlex.split`, but don't
fail if the string is incomplete. Ignores a missing closing quote or
incomplete escape sequence and uses the partial token as-is.
.. code-block:: python
split_arg_string("example 'my file")
["example", "my file"]
split_arg_string("example my\\")
["example", "my"]
:param string: String to split.
"""
import shlex
lex = shlex.shlex(string, posix=True)
lex.whitespace_split = True
lex.commenters = ""
out = []
try:
for token in lex:
out.append(token)
except ValueError:
# Raised when end-of-string is reached in an invalid state. Use
# the partial token as-is. The quote or escape character is in
# lex.state, not lex.token.
out.append(lex.token)
return out
class Option:
def __init__(
self,
obj: "CoreOption",
opts: t.Sequence[str],
dest: t.Optional[str],
action: t.Optional[str] = None,
nargs: int = 1,
const: t.Optional[t.Any] = None,
):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError(f"Invalid start character for option ({opt})")
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self) -> bool:
return self.action in ("store", "append")
def process(self, value: str, state: "ParsingState") -> None:
if self.action == "store":
state.opts[self.dest] = value # type: ignore
elif self.action == "store_const":
state.opts[self.dest] = self.const # type: ignore
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value) # type: ignore
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const) # type: ignore
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore
else:
raise ValueError(f"unknown action '{self.action}'")
state.order.append(self.obj)
class Argument:
def __init__(self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(
self,
value: t.Union[t.Optional[str], t.Sequence[t.Optional[str]]],
state: "ParsingState",
) -> None:
if self.nargs > 1:
assert value is not None
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage(
_("Argument {name!r} takes {nargs} values.").format(
name=self.dest, nargs=self.nargs
)
)
if self.nargs == -1 and self.obj.envvar is not None and value == ():
# Replace empty tuple with None so that a value from the
# environment may be tried.
value = None
state.opts[self.dest] = value # type: ignore
state.order.append(self.obj)
class ParsingState:
def __init__(self, rargs: t.List[str]) -> None:
self.opts: t.Dict[str, t.Any] = {}
self.largs: t.List[str] = []
self.rargs = rargs
self.order: t.List["CoreParameter"] = []
class OptionParser:
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx: t.Optional["Context"] = None) -> None:
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt: t.Dict[str, Option] = {}
self._long_opt: t.Dict[str, Option] = {}
self._opt_prefixes = {"-", "--"}
self._args: t.List[Argument] = []
def add_option(
self,
obj: "CoreOption",
opts: t.Sequence[str],
dest: t.Optional[str],
action: t.Optional[str] = None,
nargs: int = 1,
const: t.Optional[t.Any] = None,
) -> None:
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``append_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(obj, opts, dest, action=action, nargs=nargs, const=const)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(
self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1
) -> None:
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
self._args.append(Argument(obj, dest=dest, nargs=nargs))
def parse_args(
self, args: t.List[str]
) -> t.Tuple[t.Dict[str, t.Any], t.List[str], t.List["CoreParameter"]]:
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state: ParsingState) -> None:
pargs, args = _unpack_args(
state.largs + state.rargs, [x.nargs for x in self._args]
)
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state: ParsingState) -> None:
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == "--":
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(
self, opt: str, explicit_value: t.Optional[str], state: ParsingState
) -> None:
if opt not in self._long_opt:
from difflib import get_close_matches
possibilities = get_close_matches(opt, self._long_opt)
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
value = self._get_value_from_state(opt, option, state)
elif explicit_value is not None:
raise BadOptionUsage(
opt, _("Option {name!r} does not take a value.").format(name=opt)
)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg: str, state: ParsingState) -> None:
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(f"{prefix}{ch}", self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
value = self._get_value_from_state(opt, option, state)
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(f"{prefix}{''.join(unknown_options)}")
def _get_value_from_state(
self, option_name: str, option: Option, state: ParsingState
) -> t.Any:
nargs = option.nargs
if len(state.rargs) < nargs:
if option.obj._flag_needs_value:
# Option allows omitting the value.
value = _flag_needs_value
else:
raise BadOptionUsage(
option_name,
ngettext(
"Option {name!r} requires an argument.",
"Option {name!r} requires {nargs} arguments.",
nargs,
).format(name=option_name, nargs=nargs),
)
elif nargs == 1:
next_rarg = state.rargs[0]
if (
option.obj._flag_needs_value
and isinstance(next_rarg, str)
and next_rarg[:1] in self._opt_prefixes
and len(next_rarg) > 1
):
# The next arg looks like the start of an option, don't
# use it as the value if omitting the value is allowed.
value = _flag_needs_value
else:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
return value
def _process_opts(self, arg: str, state: ParsingState) -> None:
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if "=" in arg:
long_opt, explicit_value = arg.split("=", 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
self._match_short_opt(arg, state)
return
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| 19,044 | Python | 34.933962 | 87 | 0.5795 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/_compat.py | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
CYGWIN = sys.platform.startswith("cygwin")
MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version)
# Determine local App Engine environment, per Google's own suggestion
APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get(
"SERVER_SOFTWARE", ""
)
WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
auto_wrap_for_ansi: t.Optional[t.Callable[[t.TextIO], t.TextIO]] = None
_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
def get_filesystem_encoding() -> str:
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(
stream: t.BinaryIO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_readable: bool = False,
force_writable: bool = False,
) -> t.TextIO:
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = "replace"
return _NonClosingTextIOWrapper(
stream,
encoding,
errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable,
)
def is_ascii_encoding(encoding: str) -> bool:
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False
def get_best_encoding(stream: t.IO) -> str:
"""Returns the default stream encoding if not found."""
rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return "utf-8"
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(
self,
stream: t.BinaryIO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_readable: bool = False,
force_writable: bool = False,
**extra: t.Any,
) -> None:
self._stream = stream = t.cast(
t.BinaryIO, _FixupStream(stream, force_readable, force_writable)
)
super().__init__(stream, encoding, errors, **extra)
def __del__(self) -> None:
try:
self.detach()
except Exception:
pass
def isatty(self) -> bool:
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream:
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(
self,
stream: t.BinaryIO,
force_readable: bool = False,
force_writable: bool = False,
):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name: str) -> t.Any:
return getattr(self._stream, name)
def read1(self, size: int) -> bytes:
f = getattr(self._stream, "read1", None)
if f is not None:
return t.cast(bytes, f(size))
return self._stream.read(size)
def readable(self) -> bool:
if self._force_readable:
return True
x = getattr(self._stream, "readable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self) -> bool:
if self._force_writable:
return True
x = getattr(self._stream, "writable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.write("") # type: ignore
except Exception:
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self) -> bool:
x = getattr(self._stream, "seekable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
def _is_binary_reader(stream: t.IO, default: bool = False) -> bool:
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream: t.IO, default: bool = False) -> bool:
try:
stream.write(b"")
except Exception:
try:
stream.write("")
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream: t.IO) -> t.Optional[t.BinaryIO]:
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return t.cast(t.BinaryIO, stream)
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return t.cast(t.BinaryIO, buf)
return None
def _find_binary_writer(stream: t.IO) -> t.Optional[t.BinaryIO]:
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return t.cast(t.BinaryIO, stream)
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return t.cast(t.BinaryIO, buf)
return None
def _stream_is_misconfigured(stream: t.TextIO) -> bool:
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: t.Optional[str]) -> bool:
"""A stream attribute is compatible if it is equal to the
desired value or the desired value is unset and the attribute
has a value.
"""
stream_value = getattr(stream, attr, None)
return stream_value == value or (value is None and stream_value is not None)
def _is_compatible_text_stream(
stream: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> bool:
"""Check if a stream's encoding and errors attributes are
compatible with the desired values.
"""
return _is_compat_stream_attr(
stream, "encoding", encoding
) and _is_compat_stream_attr(stream, "errors", errors)
def _force_correct_text_stream(
text_stream: t.IO,
encoding: t.Optional[str],
errors: t.Optional[str],
is_binary: t.Callable[[t.IO, bool], bool],
find_binary: t.Callable[[t.IO], t.Optional[t.BinaryIO]],
force_readable: bool = False,
force_writable: bool = False,
) -> t.TextIO:
if is_binary(text_stream, False):
binary_reader = t.cast(t.BinaryIO, text_stream)
else:
text_stream = t.cast(t.TextIO, text_stream)
# If the stream looks compatible, and won't default to a
# misconfigured ascii encoding, return it as-is.
if _is_compatible_text_stream(text_stream, encoding, errors) and not (
encoding is None and _stream_is_misconfigured(text_stream)
):
return text_stream
# Otherwise, get the underlying binary reader.
possible_binary_reader = find_binary(text_stream)
# If that's not possible, silently use the original reader
# and get mojibake instead of exceptions.
if possible_binary_reader is None:
return text_stream
binary_reader = possible_binary_reader
# Default errors to replace instead of strict in order to get
# something that works.
if errors is None:
errors = "replace"
# Wrap the binary stream in a text stream with the correct
# encoding parameters.
return _make_text_stream(
binary_reader,
encoding,
errors,
force_readable=force_readable,
force_writable=force_writable,
)
def _force_correct_text_reader(
text_reader: t.IO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_readable: bool = False,
) -> t.TextIO:
return _force_correct_text_stream(
text_reader,
encoding,
errors,
_is_binary_reader,
_find_binary_reader,
force_readable=force_readable,
)
def _force_correct_text_writer(
text_writer: t.IO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_writable: bool = False,
) -> t.TextIO:
return _force_correct_text_stream(
text_writer,
encoding,
errors,
_is_binary_writer,
_find_binary_writer,
force_writable=force_writable,
)
def get_binary_stdin() -> t.BinaryIO:
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
return reader
def get_binary_stdout() -> t.BinaryIO:
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError("Was not able to determine binary stream for sys.stdout.")
return writer
def get_binary_stderr() -> t.BinaryIO:
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
return writer
def get_text_stdin(
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
) -> t.TextIO:
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True)
def get_text_stdout(
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
) -> t.TextIO:
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True)
def get_text_stderr(
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
) -> t.TextIO:
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True)
def _wrap_io_open(
file: t.Union[str, os.PathLike, int],
mode: str,
encoding: t.Optional[str],
errors: t.Optional[str],
) -> t.IO:
"""Handles not passing ``encoding`` and ``errors`` in binary mode."""
if "b" in mode:
return open(file, mode)
return open(file, mode, encoding=encoding, errors=errors)
def open_stream(
filename: str,
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
atomic: bool = False,
) -> t.Tuple[t.IO, bool]:
binary = "b" in mode
# Standard streams first. These are simple because they ignore the
# atomic flag. Use fsdecode to handle Path("-").
if os.fsdecode(filename) == "-":
if any(m in mode for m in ["w", "a", "x"]):
if binary:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if binary:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
return _wrap_io_open(filename, mode, encoding, errors), True
# Some usability stuff for atomic writes
if "a" in mode:
raise ValueError(
"Appending to an existing file is not supported, because that"
" would involve an expensive `copy`-operation to a temporary"
" file. Open the file in normal `w`-mode and copy explicitly"
" if that's what you're after."
)
if "x" in mode:
raise ValueError("Use the `overwrite`-parameter instead.")
if "w" not in mode:
raise ValueError("Atomic writes only make sense with `w`-mode.")
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import errno
import random
try:
perm: t.Optional[int] = os.stat(filename).st_mode
except OSError:
perm = None
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
if binary:
flags |= getattr(os, "O_BINARY", 0)
while True:
tmp_filename = os.path.join(
os.path.dirname(filename),
f".__atomic-write{random.randrange(1 << 32):08x}",
)
try:
fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
break
except OSError as e:
if e.errno == errno.EEXIST or (
os.name == "nt"
and e.errno == errno.EACCES
and os.path.isdir(e.filename)
and os.access(e.filename, os.W_OK)
):
continue
raise
if perm is not None:
os.chmod(tmp_filename, perm) # in case perm includes bits in umask
f = _wrap_io_open(fd, mode, encoding, errors)
af = _AtomicFile(f, tmp_filename, os.path.realpath(filename))
return t.cast(t.IO, af), True
class _AtomicFile:
def __init__(self, f: t.IO, tmp_filename: str, real_filename: str) -> None:
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self) -> str:
return self._real_filename
def close(self, delete: bool = False) -> None:
if self.closed:
return
self._f.close()
os.replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name: str) -> t.Any:
return getattr(self._f, name)
def __enter__(self) -> "_AtomicFile":
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.close(delete=exc_type is not None)
def __repr__(self) -> str:
return repr(self._f)
def strip_ansi(value: str) -> str:
return _ansi_re.sub("", value)
def _is_jupyter_kernel_output(stream: t.IO) -> bool:
while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
stream = stream._stream
return stream.__class__.__module__.startswith("ipykernel.")
def should_strip_ansi(
stream: t.Optional[t.IO] = None, color: t.Optional[bool] = None
) -> bool:
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream) and not _is_jupyter_kernel_output(stream)
return not color
# On Windows, wrap the output streams with colorama to support ANSI
# color codes.
# NOTE: double check is needed so mypy does not analyze this on Linux
if sys.platform.startswith("win") and WIN:
from ._winconsole import _get_windows_console_stream
def _get_argv_encoding() -> str:
import locale
return locale.getpreferredencoding()
_ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
def auto_wrap_for_ansi(
stream: t.TextIO, color: t.Optional[bool] = None
) -> t.TextIO:
"""Support ANSI color and style codes on Windows by wrapping a
stream with colorama.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
import colorama
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = t.cast(t.TextIO, ansi_wrapper.stream)
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except BaseException:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
else:
def _get_argv_encoding() -> str:
return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
def _get_windows_console_stream(
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> t.Optional[t.TextIO]:
return None
def term_len(x: str) -> int:
return len(strip_ansi(x))
def isatty(stream: t.IO) -> bool:
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(
src_func: t.Callable[[], t.TextIO], wrapper_func: t.Callable[[], t.TextIO]
) -> t.Callable[[], t.TextIO]:
cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
def func() -> t.TextIO:
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
binary_streams: t.Mapping[str, t.Callable[[], t.BinaryIO]] = {
"stdin": get_binary_stdin,
"stdout": get_binary_stdout,
"stderr": get_binary_stderr,
}
text_streams: t.Mapping[
str, t.Callable[[t.Optional[str], t.Optional[str]], t.TextIO]
] = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
| 18,810 | Python | 29.001595 | 88 | 0.616108 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/termui.py | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
V = t.TypeVar("V")
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func: t.Callable[[str], str] = input
_ansi_colors = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"reset": 39,
"bright_black": 90,
"bright_red": 91,
"bright_green": 92,
"bright_yellow": 93,
"bright_blue": 94,
"bright_magenta": 95,
"bright_cyan": 96,
"bright_white": 97,
}
_ansi_reset_all = "\033[0m"
def hidden_prompt_func(prompt: str) -> str:
import getpass
return getpass.getpass(prompt)
def _build_prompt(
text: str,
suffix: str,
show_default: bool = False,
default: t.Optional[t.Any] = None,
show_choices: bool = True,
type: t.Optional[ParamType] = None,
) -> str:
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += f" ({', '.join(map(str, type.choices))})"
if default is not None and show_default:
prompt = f"{prompt} [{_format_default(default)}]"
return f"{prompt}{suffix}"
def _format_default(default: t.Any) -> t.Any:
if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
return default.name # type: ignore
return default
def prompt(
text: str,
default: t.Optional[t.Any] = None,
hide_input: bool = False,
confirmation_prompt: t.Union[bool, str] = False,
type: t.Optional[t.Union[ParamType, t.Any]] = None,
value_proc: t.Optional[t.Callable[[str], t.Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
) -> t.Any:
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: Prompt a second time to confirm the
value. Can be set to a string instead of ``True`` to customize
the message.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice.
For example if type is a Choice of either day or week,
show_choices is true and text is "Group by" then the
prompt will be "Group by (day, week): ".
.. versionadded:: 8.0
``confirmation_prompt`` can be a custom string.
.. versionadded:: 7.0
Added the ``show_choices`` parameter.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
"""
def prompt_func(text: str) -> str:
f = hidden_prompt_func if hide_input else visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
return f(" ")
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort() from None
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(
text, prompt_suffix, show_default, default, show_choices, type
)
if confirmation_prompt:
if confirmation_prompt is True:
confirmation_prompt = _("Repeat for confirmation")
confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
value = default
break
try:
result = value_proc(value)
except UsageError as e:
if hide_input:
echo(_("Error: The value you entered was invalid."), err=err)
else:
echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func(confirmation_prompt)
is_empty = not value and not value2
if value2 or is_empty:
break
if value == value2:
return result
echo(_("Error: The two entered values do not match."), err=err)
def confirm(
text: str,
default: t.Optional[bool] = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
) -> bool:
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
:param text: the question to ask.
:param default: The default value to use when no input is given. If
``None``, repeat until input is given.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
.. versionchanged:: 8.0
Repeat until input is given if ``default`` is ``None``.
.. versionadded:: 4.0
Added the ``err`` parameter.
"""
prompt = _build_prompt(
text,
prompt_suffix,
show_default,
"y/n" if default is None else ("Y/n" if default else "y/N"),
)
while True:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
value = visible_prompt_func(" ").lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort() from None
if value in ("y", "yes"):
rv = True
elif value in ("n", "no"):
rv = False
elif default is not None and value == "":
rv = default
else:
echo(_("Error: invalid input"), err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def echo_via_pager(
text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
color: t.Optional[bool] = None,
) -> None:
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
elif isinstance(text_or_generator, str):
i = [text_or_generator]
else:
i = iter(t.cast(t.Iterable[str], text_or_generator))
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, str) else str(el) for el in i)
from ._termui_impl import pager
return pager(itertools.chain(text_generator, "\n"), color)
def progressbar(
iterable: t.Optional[t.Iterable[V]] = None,
length: t.Optional[int] = None,
label: t.Optional[str] = None,
show_eta: bool = True,
show_percent: t.Optional[bool] = None,
show_pos: bool = False,
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
fill_char: str = "#",
empty_char: str = "-",
bar_template: str = "%(label)s [%(bar)s] %(info)s",
info_sep: str = " ",
width: int = 36,
file: t.Optional[t.TextIO] = None,
color: t.Optional[bool] = None,
update_min_steps: int = 1,
) -> "ProgressBar[V]":
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already created. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
Note: The progress bar is currently designed for use cases where the
total progress can be expected to take at least several seconds.
Because of this, the ProgressBar class object won't display
progress that is considered too fast, and progress where the time
between steps is less than a second.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
The ``update()`` method also takes an optional value specifying the
``current_item`` at the new position. This is useful when used
together with ``item_show_func`` to customize the output for each
manual step::
with click.progressbar(
length=total_size,
label='Unzipping archive',
item_show_func=lambda a: a.filename
) as bar:
for archive in zip_file:
archive.extract()
bar.update(archive.size, archive)
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: A function called with the current item which
can return a string to show next to the progress bar. If the
function returns ``None`` nothing is shown. The current item can
be ``None``, such as when entering and exiting the bar.
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: The file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
:param update_min_steps: Render only when this many updates have
completed. This allows tuning for very fast iterators.
.. versionchanged:: 8.0
Output is shown even if execution time is less than 0.5 seconds.
.. versionchanged:: 8.0
``item_show_func`` shows the current item, not the previous one.
.. versionchanged:: 8.0
Labels are echoed if the output is not a TTY. Reverts a change
in 7.0 that removed all output.
.. versionadded:: 8.0
Added the ``update_min_steps`` parameter.
.. versionchanged:: 4.0
Added the ``color`` parameter. Added the ``update`` method to
the object.
.. versionadded:: 2.0
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(
iterable=iterable,
length=length,
show_eta=show_eta,
show_percent=show_percent,
show_pos=show_pos,
item_show_func=item_show_func,
fill_char=fill_char,
empty_char=empty_char,
bar_template=bar_template,
info_sep=info_sep,
file=file,
label=label,
width=width,
color=color,
update_min_steps=update_min_steps,
)
def clear() -> None:
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
if WIN:
os.system("cls")
else:
sys.stdout.write("\033[2J\033[1;1H")
def _interpret_color(
color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0
) -> str:
if isinstance(color, int):
return f"{38 + offset};5;{color:d}"
if isinstance(color, (tuple, list)):
r, g, b = color
return f"{38 + offset};2;{r:d};{g:d};{b:d}"
return str(_ansi_colors[color] + offset)
def style(
text: t.Any,
fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bold: t.Optional[bool] = None,
dim: t.Optional[bool] = None,
underline: t.Optional[bool] = None,
overline: t.Optional[bool] = None,
italic: t.Optional[bool] = None,
blink: t.Optional[bool] = None,
reverse: t.Optional[bool] = None,
strikethrough: t.Optional[bool] = None,
reset: bool = True,
) -> str:
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
If the terminal supports it, color may also be specified as:
- An integer in the interval [0, 255]. The terminal must support
8-bit/256-color mode.
- An RGB tuple of three integers in [0, 255]. The terminal must
support 24-bit/true-color mode.
See https://en.wikipedia.org/wiki/ANSI_color and
https://gist.github.com/XVilka/8346728 for more information.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param overline: if provided this will enable or disable overline.
:param italic: if provided this will enable or disable italic.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param strikethrough: if provided this will enable or disable
striking through text.
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string.
.. versionchanged:: 8.0
Added support for 256 and RGB color codes.
.. versionchanged:: 8.0
Added the ``strikethrough``, ``italic``, and ``overline``
parameters.
.. versionchanged:: 7.0
Added support for bright colors.
.. versionadded:: 2.0
"""
if not isinstance(text, str):
text = str(text)
bits = []
if fg:
try:
bits.append(f"\033[{_interpret_color(fg)}m")
except KeyError:
raise TypeError(f"Unknown color {fg!r}") from None
if bg:
try:
bits.append(f"\033[{_interpret_color(bg, 10)}m")
except KeyError:
raise TypeError(f"Unknown color {bg!r}") from None
if bold is not None:
bits.append(f"\033[{1 if bold else 22}m")
if dim is not None:
bits.append(f"\033[{2 if dim else 22}m")
if underline is not None:
bits.append(f"\033[{4 if underline else 24}m")
if overline is not None:
bits.append(f"\033[{53 if overline else 55}m")
if italic is not None:
bits.append(f"\033[{3 if italic else 23}m")
if blink is not None:
bits.append(f"\033[{5 if blink else 25}m")
if reverse is not None:
bits.append(f"\033[{7 if reverse else 27}m")
if strikethrough is not None:
bits.append(f"\033[{9 if strikethrough else 29}m")
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
def unstyle(text: str) -> str:
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.AnyStr]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
**styles: t.Any,
) -> None:
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
Non-string types will be converted to :class:`str`. However,
:class:`bytes` are passed directly to :meth:`echo` without applying
style. If you want to style bytes that represent text, call
:meth:`bytes.decode` first.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string. Bytes are
passed through without style applied.
.. versionadded:: 2.0
"""
if message is not None and not isinstance(message, (bytes, bytearray)):
message = style(message, **styles)
return echo(message, file=file, nl=nl, err=err, color=color)
def edit(
text: t.Optional[t.AnyStr] = None,
editor: t.Optional[str] = None,
env: t.Optional[t.Mapping[str, str]] = None,
require_save: bool = True,
extension: str = ".txt",
filename: t.Optional[str] = None,
) -> t.Optional[t.AnyStr]:
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
if filename is None:
return ed.edit(text)
ed.edit_file(filename)
return None
def launch(url: str, wait: bool = False, locate: bool = False) -> int:
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('https://click.palletsprojects.com/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: Wait for the program to exit before returning. This
only works if the launched program blocks. In particular,
``xdg-open`` on Linux does not block.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar: t.Optional[t.Callable[[bool], str]] = None
def getchar(echo: bool = False) -> str:
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
Note for Windows: in rare cases when typing non-ASCII characters, this
function might wait for a second character and then return both at once.
This is because certain Unicode characters look like special-key markers.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
global _getchar
if _getchar is None:
from ._termui_impl import getchar as f
_getchar = f
return _getchar(echo)
def raw_terminal() -> t.ContextManager[int]:
from ._termui_impl import raw_terminal as f
return f()
def pause(info: t.Optional[str] = None, err: bool = False) -> None:
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: The message to print before pausing. Defaults to
``"Press any key to continue..."``.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
if info is None:
info = _("Press any key to continue...")
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| 28,355 | Python | 34.984772 | 87 | 0.626944 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/__init__.py | """
Click is a simple Python module inspired by the stdlib optparse to make
writing command line scripts fun. Unlike other modules, it's based
around a simple API that does not come with too much magic and is
composable.
"""
from .core import Argument as Argument
from .core import BaseCommand as BaseCommand
from .core import Command as Command
from .core import CommandCollection as CommandCollection
from .core import Context as Context
from .core import Group as Group
from .core import MultiCommand as MultiCommand
from .core import Option as Option
from .core import Parameter as Parameter
from .decorators import argument as argument
from .decorators import command as command
from .decorators import confirmation_option as confirmation_option
from .decorators import group as group
from .decorators import help_option as help_option
from .decorators import make_pass_decorator as make_pass_decorator
from .decorators import option as option
from .decorators import pass_context as pass_context
from .decorators import pass_obj as pass_obj
from .decorators import password_option as password_option
from .decorators import version_option as version_option
from .exceptions import Abort as Abort
from .exceptions import BadArgumentUsage as BadArgumentUsage
from .exceptions import BadOptionUsage as BadOptionUsage
from .exceptions import BadParameter as BadParameter
from .exceptions import ClickException as ClickException
from .exceptions import FileError as FileError
from .exceptions import MissingParameter as MissingParameter
from .exceptions import NoSuchOption as NoSuchOption
from .exceptions import UsageError as UsageError
from .formatting import HelpFormatter as HelpFormatter
from .formatting import wrap_text as wrap_text
from .globals import get_current_context as get_current_context
from .parser import OptionParser as OptionParser
from .termui import clear as clear
from .termui import confirm as confirm
from .termui import echo_via_pager as echo_via_pager
from .termui import edit as edit
from .termui import getchar as getchar
from .termui import launch as launch
from .termui import pause as pause
from .termui import progressbar as progressbar
from .termui import prompt as prompt
from .termui import secho as secho
from .termui import style as style
from .termui import unstyle as unstyle
from .types import BOOL as BOOL
from .types import Choice as Choice
from .types import DateTime as DateTime
from .types import File as File
from .types import FLOAT as FLOAT
from .types import FloatRange as FloatRange
from .types import INT as INT
from .types import IntRange as IntRange
from .types import ParamType as ParamType
from .types import Path as Path
from .types import STRING as STRING
from .types import Tuple as Tuple
from .types import UNPROCESSED as UNPROCESSED
from .types import UUID as UUID
from .utils import echo as echo
from .utils import format_filename as format_filename
from .utils import get_app_dir as get_app_dir
from .utils import get_binary_stream as get_binary_stream
from .utils import get_text_stream as get_text_stream
from .utils import open_file as open_file
__version__ = "8.1.3"
| 3,138 | Python | 41.418918 | 71 | 0.821542 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/core.py | import enum
import errno
import inspect
import os
import sys
import typing as t
from collections import abc
from contextlib import contextmanager
from contextlib import ExitStack
from functools import partial
from functools import update_wrapper
from gettext import gettext as _
from gettext import ngettext
from itertools import repeat
from . import types
from .exceptions import Abort
from .exceptions import BadParameter
from .exceptions import ClickException
from .exceptions import Exit
from .exceptions import MissingParameter
from .exceptions import UsageError
from .formatting import HelpFormatter
from .formatting import join_options
from .globals import pop_context
from .globals import push_context
from .parser import _flag_needs_value
from .parser import OptionParser
from .parser import split_opt
from .termui import confirm
from .termui import prompt
from .termui import style
from .utils import _detect_program_name
from .utils import _expand_args
from .utils import echo
from .utils import make_default_short_help
from .utils import make_str
from .utils import PacifyFlushWrapper
if t.TYPE_CHECKING:
import typing_extensions as te
from .shell_completion import CompletionItem
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
V = t.TypeVar("V")
def _complete_visible_commands(
ctx: "Context", incomplete: str
) -> t.Iterator[t.Tuple[str, "Command"]]:
"""List all the subcommands of a group that start with the
incomplete value and aren't hidden.
:param ctx: Invocation context for the group.
:param incomplete: Value being completed. May be empty.
"""
multi = t.cast(MultiCommand, ctx.command)
for name in multi.list_commands(ctx):
if name.startswith(incomplete):
command = multi.get_command(ctx, name)
if command is not None and not command.hidden:
yield name, command
def _check_multicommand(
base_command: "MultiCommand", cmd_name: str, cmd: "Command", register: bool = False
) -> None:
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = (
"It is not possible to add multi commands as children to"
" another multi command that is in chain mode."
)
else:
hint = (
"Found a multi command as subcommand to a multi command"
" that is in chain mode. This is not supported."
)
raise RuntimeError(
f"{hint}. Command {base_command.name!r} is set to chain and"
f" {cmd_name!r} was added as a subcommand but it in itself is a"
f" multi command. ({cmd_name!r} is a {type(cmd).__name__}"
f" within a chained {type(base_command).__name__} named"
f" {base_command.name!r})."
)
def batch(iterable: t.Iterable[V], batch_size: int) -> t.List[t.Tuple[V, ...]]:
return list(zip(*repeat(iter(iterable), batch_size)))
@contextmanager
def augment_usage_errors(
ctx: "Context", param: t.Optional["Parameter"] = None
) -> t.Iterator[None]:
"""Context manager that attaches extra information to exceptions."""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(
invocation_order: t.Sequence["Parameter"],
declaration_order: t.Sequence["Parameter"],
) -> t.List["Parameter"]:
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item: "Parameter") -> t.Tuple[bool, float]:
try:
idx: float = invocation_order.index(item)
except ValueError:
idx = float("inf")
return not item.is_eager, idx
return sorted(declaration_order, key=sort_key)
class ParameterSource(enum.Enum):
"""This is an :class:`~enum.Enum` that indicates the source of a
parameter's value.
Use :meth:`click.Context.get_parameter_source` to get the
source for a parameter by name.
.. versionchanged:: 8.0
Use :class:`~enum.Enum` and drop the ``validate`` method.
.. versionchanged:: 8.0
Added the ``PROMPT`` value.
"""
COMMANDLINE = enum.auto()
"""The value was provided by the command line args."""
ENVIRONMENT = enum.auto()
"""The value was provided with an environment variable."""
DEFAULT = enum.auto()
"""Used the default specified by the parameter."""
DEFAULT_MAP = enum.auto()
"""Used a default provided by :attr:`Context.default_map`."""
PROMPT = enum.auto()
"""Used a prompt to confirm a default or provide a value."""
class Context:
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
:param show_default: Show the default value for commands. If this
value is not set, it defaults to the value from the parent
context. ``Command.show_default`` overrides this default for the
specific command.
.. versionchanged:: 8.1
The ``show_default`` parameter is overridden by
``Command.show_default``, instead of the other way around.
.. versionchanged:: 8.0
The ``show_default`` parameter defaults to the value from the
parent context.
.. versionchanged:: 7.1
Added the ``show_default`` parameter.
.. versionchanged:: 4.0
Added the ``color``, ``ignore_unknown_options``, and
``max_content_width`` parameters.
.. versionchanged:: 3.0
Added the ``allow_extra_args`` and ``allow_interspersed_args``
parameters.
.. versionchanged:: 2.0
Added the ``resilient_parsing``, ``help_option_names``, and
``token_normalize_func`` parameters.
"""
#: The formatter class to create with :meth:`make_formatter`.
#:
#: .. versionadded:: 8.0
formatter_class: t.Type["HelpFormatter"] = HelpFormatter
def __init__(
self,
command: "Command",
parent: t.Optional["Context"] = None,
info_name: t.Optional[str] = None,
obj: t.Optional[t.Any] = None,
auto_envvar_prefix: t.Optional[str] = None,
default_map: t.Optional[t.Dict[str, t.Any]] = None,
terminal_width: t.Optional[int] = None,
max_content_width: t.Optional[int] = None,
resilient_parsing: bool = False,
allow_extra_args: t.Optional[bool] = None,
allow_interspersed_args: t.Optional[bool] = None,
ignore_unknown_options: t.Optional[bool] = None,
help_option_names: t.Optional[t.List[str]] = None,
token_normalize_func: t.Optional[t.Callable[[str], str]] = None,
color: t.Optional[bool] = None,
show_default: t.Optional[bool] = None,
) -> None:
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: Map of parameter names to their parsed values. Parameters
#: with ``expose_value=False`` are not stored.
self.params: t.Dict[str, t.Any] = {}
#: the leftover arguments.
self.args: t.List[str] = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args: t.List[str] = []
#: the collected prefixes of the command's options.
self._opt_prefixes: t.Set[str] = set(parent._opt_prefixes) if parent else set()
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj: t.Any = obj
self._meta: t.Dict[str, t.Any] = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and info_name is not None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map: t.Optional[t.Dict[str, t.Any]] = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`result_callback`.
self.invoked_subcommand: t.Optional[str] = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width: t.Optional[int] = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width: t.Optional[int] = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args: bool = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options: bool = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names: t.List[str] = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func: t.Optional[
t.Callable[[str], str]
] = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing: bool = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = (
f"{parent.auto_envvar_prefix}_{self.info_name.upper()}"
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
if auto_envvar_prefix is not None:
auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
self.auto_envvar_prefix: t.Optional[str] = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color: t.Optional[bool] = color
if show_default is None and parent is not None:
show_default = parent.show_default
#: Show option default values when formatting help text.
self.show_default: t.Optional[bool] = show_default
self._close_callbacks: t.List[t.Callable[[], t.Any]] = []
self._depth = 0
self._parameter_source: t.Dict[str, ParameterSource] = {}
self._exit_stack = ExitStack()
def to_info_dict(self) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation. This traverses the entire CLI
structure.
.. code-block:: python
with Context(cli) as ctx:
info = ctx.to_info_dict()
.. versionadded:: 8.0
"""
return {
"command": self.command.to_info_dict(self),
"info_name": self.info_name,
"allow_extra_args": self.allow_extra_args,
"allow_interspersed_args": self.allow_interspersed_args,
"ignore_unknown_options": self.ignore_unknown_options,
"auto_envvar_prefix": self.auto_envvar_prefix,
}
def __enter__(self) -> "Context":
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup: bool = True) -> t.Iterator["Context"]:
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self) -> t.Dict[str, t.Any]:
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = f'{__name__}.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self) -> HelpFormatter:
"""Creates the :class:`~click.HelpFormatter` for the help and
usage output.
To quickly customize the formatter class used without overriding
this method, set the :attr:`formatter_class` attribute.
.. versionchanged:: 8.0
Added the :attr:`formatter_class` attribute.
"""
return self.formatter_class(
width=self.terminal_width, max_width=self.max_content_width
)
def with_resource(self, context_manager: t.ContextManager[V]) -> V:
"""Register a resource as if it were used in a ``with``
statement. The resource will be cleaned up when the context is
popped.
Uses :meth:`contextlib.ExitStack.enter_context`. It calls the
resource's ``__enter__()`` method and returns the result. When
the context is popped, it closes the stack, which calls the
resource's ``__exit__()`` method.
To register a cleanup function for something that isn't a
context manager, use :meth:`call_on_close`. Or use something
from :mod:`contextlib` to turn it into a context manager first.
.. code-block:: python
@click.group()
@click.option("--name")
@click.pass_context
def cli(ctx):
ctx.obj = ctx.with_resource(connect_db(name))
:param context_manager: The context manager to enter.
:return: Whatever ``context_manager.__enter__()`` returns.
.. versionadded:: 8.0
"""
return self._exit_stack.enter_context(context_manager)
def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
"""Register a function to be called when the context tears down.
This can be used to close resources opened during the script
execution. Resources that support Python's context manager
protocol which would be used in a ``with`` statement should be
registered with :meth:`with_resource` instead.
:param f: The function to execute on teardown.
"""
return self._exit_stack.callback(f)
def close(self) -> None:
"""Invoke all close callbacks registered with
:meth:`call_on_close`, and exit all context managers entered
with :meth:`with_resource`.
"""
self._exit_stack.close()
# In case the context is reused, create a new exit stack.
self._exit_stack = ExitStack()
@property
def command_path(self) -> str:
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
parent_command_path = [self.parent.command_path]
if isinstance(self.parent.command, Command):
for param in self.parent.command.get_params(self):
parent_command_path.extend(param.get_usage_pieces(self))
rv = f"{' '.join(parent_command_path)} {rv}"
return rv.lstrip()
def find_root(self) -> "Context":
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type: t.Type[V]) -> t.Optional[V]:
"""Finds the closest object of a given type."""
node: t.Optional["Context"] = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
return None
def ensure_object(self, object_type: t.Type[V]) -> V:
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
@t.overload
def lookup_default(
self, name: str, call: "te.Literal[True]" = True
) -> t.Optional[t.Any]:
...
@t.overload
def lookup_default(
self, name: str, call: "te.Literal[False]" = ...
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
...
def lookup_default(self, name: str, call: bool = True) -> t.Optional[t.Any]:
"""Get the default for a parameter from :attr:`default_map`.
:param name: Name of the parameter.
:param call: If the default is a callable, call it. Disable to
return the callable instead.
.. versionchanged:: 8.0
Added the ``call`` parameter.
"""
if self.default_map is not None:
value = self.default_map.get(name)
if call and callable(value):
return value()
return value
return None
def fail(self, message: str) -> "te.NoReturn":
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self) -> "te.NoReturn":
"""Aborts the script."""
raise Abort()
def exit(self, code: int = 0) -> "te.NoReturn":
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self) -> str:
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self) -> str:
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def _make_sub_context(self, command: "Command") -> "Context":
"""Create a new context of the same type as this context, but
for a new command.
:meta private:
"""
return type(self)(command, info_name=command.name, parent=self)
def invoke(
__self, # noqa: B902
__callback: t.Union["Command", t.Callable[..., t.Any]],
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
.. versionchanged:: 8.0
All ``kwargs`` are tracked in :attr:`params` so they will be
passed if :meth:`forward` is called at multiple levels.
"""
if isinstance(__callback, Command):
other_cmd = __callback
if other_cmd.callback is None:
raise TypeError(
"The given command does not have a callback that can be invoked."
)
else:
__callback = other_cmd.callback
ctx = __self._make_sub_context(other_cmd)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.type_cast_value( # type: ignore
ctx, param.get_default(ctx)
)
# Track all kwargs as params, so that forward() will pass
# them on in subsequent calls.
ctx.params.update(kwargs)
else:
ctx = __self
with augment_usage_errors(__self):
with ctx:
return __callback(*args, **kwargs)
def forward(
__self, __cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902
) -> t.Any:
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
.. versionchanged:: 8.0
All ``kwargs`` are tracked in :attr:`params` so they will be
passed if ``forward`` is called at multiple levels.
"""
# Can only forward to other commands, not direct callbacks.
if not isinstance(__cmd, Command):
raise TypeError("Callback is not a command.")
for param in __self.params:
if param not in kwargs:
kwargs[param] = __self.params[param]
return __self.invoke(__cmd, *args, **kwargs)
def set_parameter_source(self, name: str, source: ParameterSource) -> None:
"""Set the source of a parameter. This indicates the location
from which the value of the parameter was obtained.
:param name: The name of the parameter.
:param source: A member of :class:`~click.core.ParameterSource`.
"""
self._parameter_source[name] = source
def get_parameter_source(self, name: str) -> t.Optional[ParameterSource]:
"""Get the source of a parameter. This indicates the location
from which the value of the parameter was obtained.
This can be useful for determining when a user specified a value
on the command line that is the same as the default value. It
will be :attr:`~click.core.ParameterSource.DEFAULT` only if the
value was actually taken from the default.
:param name: The name of the parameter.
:rtype: ParameterSource
.. versionchanged:: 8.0
Returns ``None`` if the parameter was not provided from any
source.
"""
return self._parameter_source.get(name)
class BaseCommand:
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: The context class to create with :meth:`make_context`.
#:
#: .. versionadded:: 8.0
context_class: t.Type[Context] = Context
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(
self,
name: t.Optional[str],
context_settings: t.Optional[t.Dict[str, t.Any]] = None,
) -> None:
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings: t.Dict[str, t.Any] = context_settings
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation. This traverses the entire structure
below this command.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
:param ctx: A :class:`Context` representing this command.
.. versionadded:: 8.0
"""
return {"name": self.name}
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.name}>"
def get_usage(self, ctx: Context) -> str:
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx: Context) -> str:
raise NotImplementedError("Base commands cannot get help")
def make_context(
self,
info_name: t.Optional[str],
args: t.List[str],
parent: t.Optional[Context] = None,
**extra: t.Any,
) -> Context:
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
To quickly customize the context class used without overriding
this method, set the :attr:`context_class` attribute.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the command.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
.. versionchanged:: 8.0
Added the :attr:`context_class` attribute.
"""
for key, value in self.context_settings.items():
if key not in extra:
extra[key] = value
ctx = self.context_class(
self, info_name=info_name, parent=parent, **extra # type: ignore
)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError("Base commands do not know how to parse arguments.")
def invoke(self, ctx: Context) -> t.Any:
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invokable by default")
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of chained multi-commands.
Any command could be part of a chained multi-command, so sibling
commands are valid at any point during command completion. Other
command classes will return more completions.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
results: t.List["CompletionItem"] = []
while ctx.parent is not None:
ctx = ctx.parent
if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
results.extend(
CompletionItem(name, help=command.get_short_help_str())
for name, command in _complete_visible_commands(ctx, incomplete)
if name not in ctx.protected_args
)
return results
@t.overload
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: "te.Literal[True]" = True,
**extra: t.Any,
) -> "te.NoReturn":
...
@t.overload
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: bool = ...,
**extra: t.Any,
) -> t.Any:
...
def main(
self,
args: t.Optional[t.Sequence[str]] = None,
prog_name: t.Optional[str] = None,
complete_var: t.Optional[str] = None,
standalone_mode: bool = True,
windows_expand_args: bool = True,
**extra: t.Any,
) -> t.Any:
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param windows_expand_args: Expand glob patterns, user dir, and
env vars in command line args on Windows.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
.. versionchanged:: 8.0.1
Added the ``windows_expand_args`` parameter to allow
disabling command line arg expansion on Windows.
.. versionchanged:: 8.0
When taking arguments from ``sys.argv`` on Windows, glob
patterns, user dir, and env vars are expanded.
.. versionchanged:: 3.0
Added the ``standalone_mode`` parameter.
"""
if args is None:
args = sys.argv[1:]
if os.name == "nt" and windows_expand_args:
args = _expand_args(args)
else:
args = list(args)
if prog_name is None:
prog_name = _detect_program_name()
# Process shell completion requests and exit early.
self._main_shell_completion(extra, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort() from None
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except OSError as e:
if e.errno == errno.EPIPE:
sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout))
sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr))
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo(_("Aborted!"), file=sys.stderr)
sys.exit(1)
def _main_shell_completion(
self,
ctx_args: t.Dict[str, t.Any],
prog_name: str,
complete_var: t.Optional[str] = None,
) -> None:
"""Check if the shell is asking for tab completion, process
that, then exit early. Called from :meth:`main` before the
program is invoked.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction. Defaults to
``_{PROG_NAME}_COMPLETE``.
"""
if complete_var is None:
complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
instruction = os.environ.get(complete_var)
if not instruction:
return
from .shell_completion import shell_complete
rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction)
sys.exit(rv)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is disabled by default.
If enabled this will add ``--help`` as argument
if no arguments are passed
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
.. versionchanged:: 8.1
``help``, ``epilog``, and ``short_help`` are stored unprocessed,
all formatting is done when outputting help text, not at init,
and is done even if not using the ``@command`` decorator.
.. versionchanged:: 8.0
Added a ``repr`` showing the command name.
.. versionchanged:: 7.1
Added the ``no_args_is_help`` parameter.
.. versionchanged:: 2.0
Added the ``context_settings`` parameter.
"""
def __init__(
self,
name: t.Optional[str],
context_settings: t.Optional[t.Dict[str, t.Any]] = None,
callback: t.Optional[t.Callable[..., t.Any]] = None,
params: t.Optional[t.List["Parameter"]] = None,
help: t.Optional[str] = None,
epilog: t.Optional[str] = None,
short_help: t.Optional[str] = None,
options_metavar: t.Optional[str] = "[OPTIONS]",
add_help_option: bool = True,
no_args_is_help: bool = False,
hidden: bool = False,
deprecated: bool = False,
) -> None:
super().__init__(name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params: t.List["Parameter"] = params or []
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.no_args_is_help = no_args_is_help
self.hidden = hidden
self.deprecated = deprecated
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict(ctx)
info_dict.update(
params=[param.to_info_dict() for param in self.get_params(ctx)],
help=self.help,
epilog=self.epilog,
short_help=self.short_help,
hidden=self.hidden,
deprecated=self.deprecated,
)
return info_dict
def get_usage(self, ctx: Context) -> str:
"""Formats the usage line into a string and returns it.
Calls :meth:`format_usage` internally.
"""
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx: Context) -> t.List["Parameter"]:
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = [*rv, help_option]
return rv
def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the usage line into the formatter.
This is a low-level method called by :meth:`get_usage`.
"""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx: Context) -> t.List[str]:
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar] if self.options_metavar else []
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx: Context) -> t.List[str]:
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return list(all_names)
def get_help_option(self, ctx: Context) -> t.Optional["Option"]:
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return None
def show_help(ctx: Context, param: "Parameter", value: str) -> None:
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help=_("Show this message and exit."),
)
def make_parser(self, ctx: Context) -> OptionParser:
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx: Context) -> str:
"""Formats the help into a string and returns it.
Calls :meth:`format_help` internally.
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit: int = 45) -> str:
"""Gets short help for the command or makes it by shortening the
long help string.
"""
if self.short_help:
text = inspect.cleandoc(self.short_help)
elif self.help:
text = make_default_short_help(self.help, limit)
else:
text = ""
if self.deprecated:
text = _("(Deprecated) {text}").format(text=text)
return text.strip()
def format_help(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the help into the formatter if it exists.
This is a low-level method called by :meth:`get_help`.
This calls the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the help text to the formatter if it exists."""
text = self.help if self.help is not None else ""
if self.deprecated:
text = _("(Deprecated) {text}").format(text=text)
if text:
text = inspect.cleandoc(text).partition("\f")[0]
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(text)
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section(_("Options")):
formatter.write_dl(opts)
def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
epilog = inspect.cleandoc(self.epilog)
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(epilog)
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
ngettext(
"Got unexpected extra argument ({args})",
"Got unexpected extra arguments ({args})",
len(args),
).format(args=" ".join(map(str, args)))
)
ctx.args = args
ctx._opt_prefixes.update(parser._opt_prefixes)
return args
def invoke(self, ctx: Context) -> t.Any:
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.deprecated:
message = _(
"DeprecationWarning: The command {name!r} is deprecated."
).format(name=self.name)
echo(style(message, fg="red"), err=True)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of options and chained multi-commands.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
results: t.List["CompletionItem"] = []
if incomplete and not incomplete[0].isalnum():
for param in self.get_params(ctx):
if (
not isinstance(param, Option)
or param.hidden
or (
not param.multiple
and ctx.get_parameter_source(param.name) # type: ignore
is ParameterSource.COMMANDLINE
)
):
continue
results.extend(
CompletionItem(name, help=param.help)
for name in [*param.opts, *param.secondary_opts]
if name.startswith(incomplete)
)
results.extend(super().shell_complete(ctx, incomplete))
return results
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: The result callback to attach to this multi
command. This can be set or changed later with the
:meth:`result_callback` decorator.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name: t.Optional[str] = None,
invoke_without_command: bool = False,
no_args_is_help: t.Optional[bool] = None,
subcommand_metavar: t.Optional[str] = None,
chain: bool = False,
result_callback: t.Optional[t.Callable[..., t.Any]] = None,
**attrs: t.Any,
) -> None:
super().__init__(name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
else:
subcommand_metavar = "COMMAND [ARGS]..."
self.subcommand_metavar = subcommand_metavar
self.chain = chain
# The result callback that is stored. This can be set or
# overridden with the :func:`result_callback` decorator.
self._result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot have"
" optional arguments."
)
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict(ctx)
commands = {}
for name in self.list_commands(ctx):
command = self.get_command(ctx, name)
if command is None:
continue
sub_ctx = ctx._make_sub_context(command)
with sub_ctx.scope(cleanup=False):
commands[name] = command.to_info_dict(sub_ctx)
info_dict.update(commands=commands, chain=self.chain)
return info_dict
def collect_usage_pieces(self, ctx: Context) -> t.List[str]:
rv = super().collect_usage_pieces(ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
super().format_options(ctx, formatter)
self.format_commands(ctx, formatter)
def result_callback(self, replace: bool = False) -> t.Callable[[F], F]:
"""Adds a result callback to the command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.result_callback()
def process_result(result, input):
return result + input
:param replace: if set to `True` an already existing result
callback will be removed.
.. versionchanged:: 8.0
Renamed from ``resultcallback``.
.. versionadded:: 3.0
"""
def decorator(f: F) -> F:
old_callback = self._result_callback
if old_callback is None or replace:
self._result_callback = f
return f
def function(__value, *args, **kwargs): # type: ignore
inner = old_callback(__value, *args, **kwargs) # type: ignore
return f(inner, *args, **kwargs)
self._result_callback = rv = update_wrapper(t.cast(F, function), f)
return rv
return decorator
def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section(_("Commands")):
formatter.write_dl(rows)
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = super().parse_args(ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx: Context) -> t.Any:
def _process_result(value: t.Any) -> t.Any:
if self._result_callback is not None:
value = ctx.invoke(self._result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
if self.invoke_without_command:
# No subcommand was invoked, so the result callback is
# invoked with the group return value for regular
# groups, or an empty list for chained groups.
with ctx:
rv = super().invoke(ctx)
return _process_result([] if self.chain else rv)
ctx.fail(_("Missing command."))
# Fetch args back out
args = [*ctx.protected_args, *ctx.args]
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
ctx.invoked_subcommand = cmd_name
super().invoke(ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
super().invoke(ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
assert cmd is not None
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(
self, ctx: Context, args: t.List[str]
) -> t.Tuple[t.Optional[str], t.Optional[Command], t.List[str]]:
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name))
return cmd_name if cmd else None, cmd, args[1:]
def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]:
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError
def list_commands(self, ctx: Context) -> t.List[str]:
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of options, subcommands, and chained
multi-commands.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
results = [
CompletionItem(name, help=command.get_short_help_str())
for name, command in _complete_visible_commands(ctx, incomplete)
]
results.extend(super().shell_complete(ctx, incomplete))
return results
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is
the most common way to implement nesting in Click.
:param name: The name of the group command.
:param commands: A dict mapping names to :class:`Command` objects.
Can also be a list of :class:`Command`, which will use
:attr:`Command.name` to create the dict.
:param attrs: Other command arguments described in
:class:`MultiCommand`, :class:`Command`, and
:class:`BaseCommand`.
.. versionchanged:: 8.0
The ``commmands`` argument can be a list of command objects.
"""
#: If set, this is used by the group's :meth:`command` decorator
#: as the default :class:`Command` class. This is useful to make all
#: subcommands use a custom command class.
#:
#: .. versionadded:: 8.0
command_class: t.Optional[t.Type[Command]] = None
#: If set, this is used by the group's :meth:`group` decorator
#: as the default :class:`Group` class. This is useful to make all
#: subgroups use a custom group class.
#:
#: If set to the special value :class:`type` (literally
#: ``group_class = type``), this group's class will be used as the
#: default class. This makes a custom group class continue to make
#: custom groups.
#:
#: .. versionadded:: 8.0
group_class: t.Optional[t.Union[t.Type["Group"], t.Type[type]]] = None
# Literal[type] isn't valid, so use Type[type]
def __init__(
self,
name: t.Optional[str] = None,
commands: t.Optional[t.Union[t.Dict[str, Command], t.Sequence[Command]]] = None,
**attrs: t.Any,
) -> None:
super().__init__(name, **attrs)
if commands is None:
commands = {}
elif isinstance(commands, abc.Sequence):
commands = {c.name: c for c in commands if c.name is not None}
#: The registered subcommands by their exported names.
self.commands: t.Dict[str, Command] = commands
def add_command(self, cmd: Command, name: t.Optional[str] = None) -> None:
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
@t.overload
def command(self, __func: t.Callable[..., t.Any]) -> Command:
...
@t.overload
def command(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], Command]:
...
def command(
self, *args: t.Any, **kwargs: t.Any
) -> t.Union[t.Callable[[t.Callable[..., t.Any]], Command], Command]:
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` and
immediately registers the created command with this group by
calling :meth:`add_command`.
To customize the command class used, set the
:attr:`command_class` attribute.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
.. versionchanged:: 8.0
Added the :attr:`command_class` attribute.
"""
from .decorators import command
if self.command_class and kwargs.get("cls") is None:
kwargs["cls"] = self.command_class
func: t.Optional[t.Callable] = None
if args and callable(args[0]):
assert (
len(args) == 1 and not kwargs
), "Use 'command(**kwargs)(callable)' to provide arguments."
(func,) = args
args = ()
def decorator(f: t.Callable[..., t.Any]) -> Command:
cmd: Command = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
if func is not None:
return decorator(func)
return decorator
@t.overload
def group(self, __func: t.Callable[..., t.Any]) -> "Group":
...
@t.overload
def group(
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], "Group"]:
...
def group(
self, *args: t.Any, **kwargs: t.Any
) -> t.Union[t.Callable[[t.Callable[..., t.Any]], "Group"], "Group"]:
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` and
immediately registers the created group with this group by
calling :meth:`add_command`.
To customize the group class used, set the :attr:`group_class`
attribute.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
.. versionchanged:: 8.0
Added the :attr:`group_class` attribute.
"""
from .decorators import group
func: t.Optional[t.Callable] = None
if args and callable(args[0]):
assert (
len(args) == 1 and not kwargs
), "Use 'group(**kwargs)(callable)' to provide arguments."
(func,) = args
args = ()
if self.group_class is not None and kwargs.get("cls") is None:
if self.group_class is type:
kwargs["cls"] = type(self)
else:
kwargs["cls"] = self.group_class
def decorator(f: t.Callable[..., t.Any]) -> "Group":
cmd: Group = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
if func is not None:
return decorator(func)
return decorator
def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]:
return self.commands.get(cmd_name)
def list_commands(self, ctx: Context) -> t.List[str]:
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(
self,
name: t.Optional[str] = None,
sources: t.Optional[t.List[MultiCommand]] = None,
**attrs: t.Any,
) -> None:
super().__init__(name, **attrs)
#: The list of registered multi commands.
self.sources: t.List[MultiCommand] = sources or []
def add_source(self, multi_cmd: MultiCommand) -> None:
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]:
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
return None
def list_commands(self, ctx: Context) -> t.List[str]:
rv: t.Set[str] = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
def _check_iter(value: t.Any) -> t.Iterator[t.Any]:
"""Check if the value is iterable but not a string. Raises a type
error, or return an iterator over the value.
"""
if isinstance(value, str):
raise TypeError
return iter(value)
class Parameter:
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: A function to further process or validate the value
after type conversion. It is called as ``f(ctx, param, value)``
and must return the value. It is called for all sources,
including prompts.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple). If ``nargs=-1``, all remaining
parameters are collected.
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
:param shell_complete: A function that returns custom shell
completions. Used instead of the param's type completion if
given. Takes ``ctx, param, incomplete`` and must return a list
of :class:`~click.shell_completion.CompletionItem` or a list of
strings.
.. versionchanged:: 8.0
``process_value`` validates required parameters and bounded
``nargs``, and invokes the parameter callback before returning
the value. This allows the callback to validate prompts.
``full_process_value`` is removed.
.. versionchanged:: 8.0
``autocompletion`` is renamed to ``shell_complete`` and has new
semantics described above. The old name is deprecated and will
be removed in 8.1, until then it will be wrapped to match the
new requirements.
.. versionchanged:: 8.0
For ``multiple=True, nargs>1``, the default must be a list of
tuples.
.. versionchanged:: 8.0
Setting a default is no longer required for ``nargs>1``, it will
default to ``None``. ``multiple=True`` or ``nargs=-1`` will
default to ``()``.
.. versionchanged:: 7.1
Empty environment variables are ignored rather than taking the
empty string value. This makes it possible for scripts to clear
variables if they can't unset them.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. The old callback format will still work, but it will
raise a warning to give you a chance to migrate the code easier.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls: t.Optional[t.Sequence[str]] = None,
type: t.Optional[t.Union[types.ParamType, t.Any]] = None,
required: bool = False,
default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None,
callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None,
nargs: t.Optional[int] = None,
multiple: bool = False,
metavar: t.Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None,
shell_complete: t.Optional[
t.Callable[
[Context, "Parameter", str],
t.Union[t.List["CompletionItem"], t.List[str]],
]
] = None,
) -> None:
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = types.convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = multiple
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self._custom_shell_complete = shell_complete
if __debug__:
if self.type.is_composite and nargs != self.type.arity:
raise ValueError(
f"'nargs' must be {self.type.arity} (or None) for"
f" type {self.type!r}, but it was {nargs}."
)
# Skip no default or callable default.
check_default = default if not callable(default) else None
if check_default is not None:
if multiple:
try:
# Only check the first value against nargs.
check_default = next(_check_iter(check_default), None)
except TypeError:
raise ValueError(
"'default' must be a list when 'multiple' is true."
) from None
# Can be None for multiple with empty default.
if nargs != 1 and check_default is not None:
try:
_check_iter(check_default)
except TypeError:
if multiple:
message = (
"'default' must be a list of lists when 'multiple' is"
" true and 'nargs' != 1."
)
else:
message = "'default' must be a list when 'nargs' != 1."
raise ValueError(message) from None
if nargs > 1 and len(check_default) != nargs:
subject = "item length" if multiple else "length"
raise ValueError(
f"'default' {subject} must match nargs={nargs}."
)
def to_info_dict(self) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
return {
"name": self.name,
"param_type_name": self.param_type_name,
"opts": self.opts,
"secondary_opts": self.secondary_opts,
"type": self.type.to_info_dict(),
"required": self.required,
"nargs": self.nargs,
"multiple": self.multiple,
"default": self.default,
"envvar": self.envvar,
}
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.name}>"
def _parse_decls(
self, decls: t.Sequence[str], expose_value: bool
) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]:
raise NotImplementedError()
@property
def human_readable_name(self) -> str:
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name # type: ignore
def make_metavar(self) -> str:
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
@t.overload
def get_default(
self, ctx: Context, call: "te.Literal[True]" = True
) -> t.Optional[t.Any]:
...
@t.overload
def get_default(
self, ctx: Context, call: bool = ...
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
...
def get_default(
self, ctx: Context, call: bool = True
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
"""Get the default for the parameter. Tries
:meth:`Context.lookup_default` first, then the local default.
:param ctx: Current context.
:param call: If the default is a callable, call it. Disable to
return the callable instead.
.. versionchanged:: 8.0.2
Type casting is no longer performed when getting a default.
.. versionchanged:: 8.0.1
Type casting can fail in resilient parsing mode. Invalid
defaults will not prevent showing help text.
.. versionchanged:: 8.0
Looks at ``ctx.default_map`` first.
.. versionchanged:: 8.0
Added the ``call`` parameter.
"""
value = ctx.lookup_default(self.name, call=False) # type: ignore
if value is None:
value = self.default
if call and callable(value):
value = value()
return value
def add_to_parser(self, parser: OptionParser, ctx: Context) -> None:
raise NotImplementedError()
def consume_value(
self, ctx: Context, opts: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, ParameterSource]:
value = opts.get(self.name) # type: ignore
source = ParameterSource.COMMANDLINE
if value is None:
value = self.value_from_envvar(ctx)
source = ParameterSource.ENVIRONMENT
if value is None:
value = ctx.lookup_default(self.name) # type: ignore
source = ParameterSource.DEFAULT_MAP
if value is None:
value = self.get_default(ctx)
source = ParameterSource.DEFAULT
return value, source
def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
"""Convert and validate a value against the option's
:attr:`type`, :attr:`multiple`, and :attr:`nargs`.
"""
if value is None:
return () if self.multiple or self.nargs == -1 else None
def check_iter(value: t.Any) -> t.Iterator:
try:
return _check_iter(value)
except TypeError:
# This should only happen when passing in args manually,
# the parser should construct an iterable when parsing
# the command line.
raise BadParameter(
_("Value must be an iterable."), ctx=ctx, param=self
) from None
if self.nargs == 1 or self.type.is_composite:
convert: t.Callable[[t.Any], t.Any] = partial(
self.type, param=self, ctx=ctx
)
elif self.nargs == -1:
def convert(value: t.Any) -> t.Tuple:
return tuple(self.type(x, self, ctx) for x in check_iter(value))
else: # nargs > 1
def convert(value: t.Any) -> t.Tuple:
value = tuple(check_iter(value))
if len(value) != self.nargs:
raise BadParameter(
ngettext(
"Takes {nargs} values but 1 was given.",
"Takes {nargs} values but {len} were given.",
len(value),
).format(nargs=self.nargs, len=len(value)),
ctx=ctx,
param=self,
)
return tuple(self.type(x, self, ctx) for x in value)
if self.multiple:
return tuple(convert(x) for x in check_iter(value))
return convert(value)
def value_is_missing(self, value: t.Any) -> bool:
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def process_value(self, ctx: Context, value: t.Any) -> t.Any:
value = self.type_cast_value(ctx, value)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
if self.callback is not None:
value = self.callback(ctx, self, value)
return value
def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]:
if self.envvar is None:
return None
if isinstance(self.envvar, str):
rv = os.environ.get(self.envvar)
if rv:
return rv
else:
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv:
return rv
return None
def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]:
rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(
self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str]
) -> t.Tuple[t.Any, t.List[str]]:
with augment_usage_errors(ctx, param=self):
value, source = self.consume_value(ctx, opts)
ctx.set_parameter_source(self.name, source) # type: ignore
try:
value = self.process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.expose_value:
ctx.params[self.name] = value # type: ignore
return value, args
def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]:
pass
def get_usage_pieces(self, ctx: Context) -> t.List[str]:
return []
def get_error_hint(self, ctx: Context) -> str:
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join(f"'{x}'" for x in hint_list)
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. If a
``shell_complete`` function was given during init, it is used.
Otherwise, the :attr:`type`
:meth:`~click.types.ParamType.shell_complete` function is used.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
if self._custom_shell_complete is not None:
results = self._custom_shell_complete(ctx, self, incomplete)
if results and isinstance(results[0], str):
from click.shell_completion import CompletionItem
results = [CompletionItem(c) for c in results]
return t.cast(t.List["CompletionItem"], results)
return self.type.shell_complete(ctx, self, incomplete)
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: Show the default value for this option in its
help text. Values are not shown by default, unless
:attr:`Context.show_default` is ``True``. If this value is a
string, it shows that string in parentheses instead of the
actual value. This is particularly useful for dynamic options.
For single option boolean flags, the default remains hidden if
its value is ``False``.
:param show_envvar: Controls if an environment variable should be
shown on the help page. Normally, environment variables are not
shown.
:param prompt: If set to ``True`` or a non empty string then the
user will be prompted for input. If set to ``True`` the prompt
will be the option name capitalized.
:param confirmation_prompt: Prompt a second time to confirm the
value if it was prompted for. Can be set to a string instead of
``True`` to customize the message.
:param prompt_required: If set to ``False``, the user will be
prompted for input only when the option was specified as a flag
without a value.
:param hide_input: If this is ``True`` then the input on the prompt
will be hidden from the user. This is useful for password input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
.. versionchanged:: 8.1.0
Help text indentation is cleaned here instead of only in the
``@option`` decorator.
.. versionchanged:: 8.1.0
The ``show_default`` parameter overrides
``Context.show_default``.
.. versionchanged:: 8.1.0
The default of a single option boolean flag is not shown if the
default value is ``False``.
.. versionchanged:: 8.0.1
``type`` is detected from ``flag_value`` if given.
"""
param_type_name = "option"
def __init__(
self,
param_decls: t.Optional[t.Sequence[str]] = None,
show_default: t.Union[bool, str, None] = None,
prompt: t.Union[bool, str] = False,
confirmation_prompt: t.Union[bool, str] = False,
prompt_required: bool = True,
hide_input: bool = False,
is_flag: t.Optional[bool] = None,
flag_value: t.Optional[t.Any] = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
type: t.Optional[t.Union[types.ParamType, t.Any]] = None,
help: t.Optional[str] = None,
hidden: bool = False,
show_choices: bool = True,
show_envvar: bool = False,
**attrs: t.Any,
) -> None:
if help:
help = inspect.cleandoc(help)
default_is_missing = "default" not in attrs
super().__init__(param_decls, type=type, multiple=multiple, **attrs)
if prompt is True:
if self.name is None:
raise TypeError("'name' is required with 'prompt=True'.")
prompt_text: t.Optional[str] = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.prompt_required = prompt_required
self.hide_input = hide_input
self.hidden = hidden
# If prompt is enabled but not required, then the option can be
# used as a flag to indicate using prompt or flag_value.
self._flag_needs_value = self.prompt is not None and not self.prompt_required
if is_flag is None:
if flag_value is not None:
# Implicitly a flag because flag_value was set.
is_flag = True
elif self._flag_needs_value:
# Not a flag, but when used as a flag it shows a prompt.
is_flag = False
else:
# Implicitly a flag because flag options were given.
is_flag = bool(self.secondary_opts)
elif is_flag is False and not self._flag_needs_value:
# Not a flag, and prompt is not enabled, can be used as a
# flag if flag_value is set.
self._flag_needs_value = flag_value is not None
if is_flag and default_is_missing and not self.required:
self.default: t.Union[t.Any, t.Callable[[], t.Any]] = False
if flag_value is None:
flag_value = not self.default
if is_flag and type is None:
# Re-guess the type from the flag value instead of the
# default.
self.type = types.convert_type(None, flag_value)
self.is_flag: bool = is_flag
self.is_bool_flag = is_flag and isinstance(self.type, types.BoolParamType)
self.flag_value: t.Any = flag_value
# Counting
self.count = count
if count:
if type is None:
self.type = types.IntRange(min=0)
if default_is_missing:
self.default = 0
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
if __debug__:
if self.nargs == -1:
raise TypeError("nargs=-1 is not supported for options.")
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError("'prompt' is not valid for non-boolean flag.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Secondary flag is not valid for non-boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError(
"'prompt' with 'hide_input' is not valid for boolean flag."
)
if self.count:
if self.multiple:
raise TypeError("'count' is not valid with 'multiple'.")
if self.is_flag:
raise TypeError("'count' is not valid with 'is_flag'.")
if self.multiple and self.is_flag:
raise TypeError("'multiple' is not valid with 'is_flag', use 'count'.")
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(
help=self.help,
prompt=self.prompt,
is_flag=self.is_flag,
flag_value=self.flag_value,
count=self.count,
hidden=self.hidden,
)
return info_dict
def _parse_decls(
self, decls: t.Sequence[str], expose_value: bool
) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]:
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if decl.isidentifier():
if name is not None:
raise TypeError(f"Name '{name}' defined twice")
name = decl
else:
split_char = ";" if decl[:1] == "/" else "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
if first == second:
raise ValueError(
f"Boolean option {decl!r} cannot use the"
" same flag for true/false."
)
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not name.isidentifier():
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError("Could not determine name for option")
if not opts and not secondary_opts:
raise TypeError(
f"No options defined but a name was passed ({name})."
" Did you mean to declare an argument instead? Did"
f" you mean to pass '--{name}'?"
)
return name, opts, secondary_opts
def add_to_parser(self, parser: OptionParser, ctx: Context) -> None:
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
action = f"{action}_const"
if self.is_bool_flag and self.secondary_opts:
parser.add_option(
obj=self, opts=self.opts, dest=self.name, action=action, const=True
)
parser.add_option(
obj=self,
opts=self.secondary_opts,
dest=self.name,
action=action,
const=False,
)
else:
parser.add_option(
obj=self,
opts=self.opts,
dest=self.name,
action=action,
const=self.flag_value,
)
else:
parser.add_option(
obj=self,
opts=self.opts,
dest=self.name,
action=action,
nargs=self.nargs,
)
def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]:
if self.hidden:
return None
any_prefix_is_slash = False
def _write_opts(opts: t.Sequence[str]) -> str:
nonlocal any_prefix_is_slash
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash = True
if not self.is_flag and not self.count:
rv += f" {self.make_metavar()}"
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
if envvar is not None:
var_str = (
envvar
if isinstance(envvar, str)
else ", ".join(str(d) for d in envvar)
)
extra.append(_("env var: {var}").format(var=var_str))
# Temporarily enable resilient parsing to avoid type casting
# failing for the default. Might be possible to extend this to
# help formatting in general.
resilient = ctx.resilient_parsing
ctx.resilient_parsing = True
try:
default_value = self.get_default(ctx, call=False)
finally:
ctx.resilient_parsing = resilient
show_default = False
show_default_is_str = False
if self.show_default is not None:
if isinstance(self.show_default, str):
show_default_is_str = show_default = True
else:
show_default = self.show_default
elif ctx.show_default is not None:
show_default = ctx.show_default
if show_default_is_str or (show_default and (default_value is not None)):
if show_default_is_str:
default_string = f"({self.show_default})"
elif isinstance(default_value, (list, tuple)):
default_string = ", ".join(str(d) for d in default_value)
elif inspect.isfunction(default_value):
default_string = _("(dynamic)")
elif self.is_bool_flag and self.secondary_opts:
# For boolean flags that have distinct True/False opts,
# use the opt without prefix instead of the value.
default_string = split_opt(
(self.opts if self.default else self.secondary_opts)[0]
)[1]
elif self.is_bool_flag and not self.secondary_opts and not default_value:
default_string = ""
else:
default_string = str(default_value)
if default_string:
extra.append(_("default: {default}").format(default=default_string))
if (
isinstance(self.type, types._NumberRangeBase)
# skip count with default range type
and not (self.count and self.type.min == 0 and self.type.max is None)
):
range_str = self.type._describe_range()
if range_str:
extra.append(range_str)
if self.required:
extra.append(_("required"))
if extra:
extra_str = "; ".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return ("; " if any_prefix_is_slash else " / ").join(rv), help
@t.overload
def get_default(
self, ctx: Context, call: "te.Literal[True]" = True
) -> t.Optional[t.Any]:
...
@t.overload
def get_default(
self, ctx: Context, call: bool = ...
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
...
def get_default(
self, ctx: Context, call: bool = True
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
# If we're a non boolean flag our default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value # type: ignore
return None
return super().get_default(ctx, call=call)
def prompt_for_value(self, ctx: Context) -> t.Any:
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
assert self.prompt is not None
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(
self.prompt,
default=default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
)
def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]:
rv = super().resolve_envvar_value(ctx)
if rv is not None:
return rv
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
rv = os.environ.get(envvar)
if rv:
return rv
return None
def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]:
rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def consume_value(
self, ctx: Context, opts: t.Mapping[str, "Parameter"]
) -> t.Tuple[t.Any, ParameterSource]:
value, source = super().consume_value(ctx, opts)
# The parser will emit a sentinel value if the option can be
# given as a flag without a value. This is different from None
# to distinguish from the flag not being given at all.
if value is _flag_needs_value:
if self.prompt is not None and not ctx.resilient_parsing:
value = self.prompt_for_value(ctx)
source = ParameterSource.PROMPT
else:
value = self.flag_value
source = ParameterSource.COMMANDLINE
elif (
self.multiple
and value is not None
and any(v is _flag_needs_value for v in value)
):
value = [self.flag_value if v is _flag_needs_value else v for v in value]
source = ParameterSource.COMMANDLINE
# The value wasn't set, or used the param's default, prompt if
# prompting is enabled.
elif (
source in {None, ParameterSource.DEFAULT}
and self.prompt is not None
and (self.required or self.prompt_required)
and not ctx.resilient_parsing
):
value = self.prompt_for_value(ctx)
source = ParameterSource.PROMPT
return value, source
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = "argument"
def __init__(
self,
param_decls: t.Sequence[str],
required: t.Optional[bool] = None,
**attrs: t.Any,
) -> None:
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
if "multiple" in attrs:
raise TypeError("__init__() got an unexpected keyword argument 'multiple'.")
super().__init__(param_decls, required=required, **attrs)
if __debug__:
if self.default is not None and self.nargs == -1:
raise TypeError("'default' is not supported for nargs=-1.")
@property
def human_readable_name(self) -> str:
if self.metavar is not None:
return self.metavar
return self.name.upper() # type: ignore
def make_metavar(self) -> str:
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper() # type: ignore
if not self.required:
var = f"[{var}]"
if self.nargs != 1:
var += "..."
return var
def _parse_decls(
self, decls: t.Sequence[str], expose_value: bool
) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]:
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one parameter declaration, got"
f" {len(decls)}."
)
return name, [arg], []
def get_usage_pieces(self, ctx: Context) -> t.List[str]:
return [self.make_metavar()]
def get_error_hint(self, ctx: Context) -> str:
return f"'{self.make_metavar()}'"
def add_to_parser(self, parser: OptionParser, ctx: Context) -> None:
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
| 112,782 | Python | 36.606869 | 88 | 0.574826 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/utils.py | import os
import re
import sys
import typing as t
from functools import update_wrapper
from types import ModuleType
from ._compat import _default_text_stderr
from ._compat import _default_text_stdout
from ._compat import _find_binary_writer
from ._compat import auto_wrap_for_ansi
from ._compat import binary_streams
from ._compat import get_filesystem_encoding
from ._compat import open_stream
from ._compat import should_strip_ansi
from ._compat import strip_ansi
from ._compat import text_streams
from ._compat import WIN
from .globals import resolve_color_default
if t.TYPE_CHECKING:
import typing_extensions as te
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def _posixify(name: str) -> str:
return "-".join(name.split()).lower()
def safecall(func: F) -> F:
"""Wraps a function so that it swallows exceptions."""
def wrapper(*args, **kwargs): # type: ignore
try:
return func(*args, **kwargs)
except Exception:
pass
return update_wrapper(t.cast(F, wrapper), func)
def make_str(value: t.Any) -> str:
"""Converts a value into a valid string."""
if isinstance(value, bytes):
try:
return value.decode(get_filesystem_encoding())
except UnicodeError:
return value.decode("utf-8", "replace")
return str(value)
def make_default_short_help(help: str, max_length: int = 45) -> str:
"""Returns a condensed version of help string."""
# Consider only the first paragraph.
paragraph_end = help.find("\n\n")
if paragraph_end != -1:
help = help[:paragraph_end]
# Collapse newlines, tabs, and spaces.
words = help.split()
if not words:
return ""
# The first paragraph started with a "no rewrap" marker, ignore it.
if words[0] == "\b":
words = words[1:]
total_length = 0
last_index = len(words) - 1
for i, word in enumerate(words):
total_length += len(word) + (i > 0)
if total_length > max_length: # too long, truncate
break
if word[-1] == ".": # sentence end, truncate without "..."
return " ".join(words[: i + 1])
if total_length == max_length and i != last_index:
break # not at sentence end, truncate with "..."
else:
return " ".join(words) # no truncation needed
# Account for the length of the suffix.
total_length += len("...")
# remove words until the length is short enough
while i > 0:
total_length -= len(words[i]) + (i > 0)
if total_length <= max_length:
break
i -= 1
return " ".join(words[:i]) + "..."
class LazyFile:
"""A lazy file works like a regular file but it does not fully open
the file but it does perform some basic checks early to see if the
filename parameter does make sense. This is useful for safely opening
files for writing.
"""
def __init__(
self,
filename: str,
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
atomic: bool = False,
):
self.name = filename
self.mode = mode
self.encoding = encoding
self.errors = errors
self.atomic = atomic
self._f: t.Optional[t.IO]
if filename == "-":
self._f, self.should_close = open_stream(filename, mode, encoding, errors)
else:
if "r" in mode:
# Open and close the file in case we're opening it for
# reading so that we can catch at least some errors in
# some cases early.
open(filename, mode).close()
self._f = None
self.should_close = True
def __getattr__(self, name: str) -> t.Any:
return getattr(self.open(), name)
def __repr__(self) -> str:
if self._f is not None:
return repr(self._f)
return f"<unopened file '{self.name}' {self.mode}>"
def open(self) -> t.IO:
"""Opens the file if it's not yet open. This call might fail with
a :exc:`FileError`. Not handling this error will produce an error
that Click shows.
"""
if self._f is not None:
return self._f
try:
rv, self.should_close = open_stream(
self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
)
except OSError as e: # noqa: E402
from .exceptions import FileError
raise FileError(self.name, hint=e.strerror) from e
self._f = rv
return rv
def close(self) -> None:
"""Closes the underlying file, no matter what."""
if self._f is not None:
self._f.close()
def close_intelligently(self) -> None:
"""This function only closes the file if it was opened by the lazy
file wrapper. For instance this will never close stdin.
"""
if self.should_close:
self.close()
def __enter__(self) -> "LazyFile":
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.close_intelligently()
def __iter__(self) -> t.Iterator[t.AnyStr]:
self.open()
return iter(self._f) # type: ignore
class KeepOpenFile:
def __init__(self, file: t.IO) -> None:
self._file = file
def __getattr__(self, name: str) -> t.Any:
return getattr(self._file, name)
def __enter__(self) -> "KeepOpenFile":
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
pass
def __repr__(self) -> str:
return repr(self._file)
def __iter__(self) -> t.Iterator[t.AnyStr]:
return iter(self._file)
def echo(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: t.Optional[t.Union[str, bytes]] = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
def get_binary_stream(name: "te.Literal['stdin', 'stdout', 'stderr']") -> t.BinaryIO:
"""Returns a system stream for byte processing.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener()
def get_text_stream(
name: "te.Literal['stdin', 'stdout', 'stderr']",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
) -> t.TextIO:
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts for already
correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener(encoding, errors)
def open_file(
filename: str,
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
lazy: bool = False,
atomic: bool = False,
) -> t.IO:
"""Open a file, with extra behavior to handle ``'-'`` to indicate
a standard stream, lazy open on write, and atomic write. Similar to
the behavior of the :class:`~click.File` param type.
If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is
wrapped so that using it in a context manager will not close it.
This makes it possible to use the function without accidentally
closing a standard stream:
.. code-block:: python
with open_file(filename) as f:
...
:param filename: The name of the file to open, or ``'-'`` for
``stdin``/``stdout``.
:param mode: The mode in which to open the file.
:param encoding: The encoding to decode or encode a file opened in
text mode.
:param errors: The error handling mode.
:param lazy: Wait to open the file until it is accessed. For read
mode, the file is temporarily opened to raise access errors
early, then closed until it is read again.
:param atomic: Write to a temporary file and replace the given file
on close.
.. versionadded:: 3.0
"""
if lazy:
return t.cast(t.IO, LazyFile(filename, mode, encoding, errors, atomic=atomic))
f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
if not should_close:
f = t.cast(t.IO, KeepOpenFile(f))
return f
def format_filename(
filename: t.Union[str, bytes, os.PathLike], shorten: bool = False
) -> str:
"""Formats a filename for user display. The main purpose of this
function is to ensure that the filename can be displayed at all. This
will decode the filename to unicode if necessary in a way that it will
not fail. Optionally, it can shorten the filename to not include the
full path to the filename.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
return os.fsdecode(filename)
def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Windows (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Windows (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no affect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser("~")
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
if sys.platform == "darwin":
return os.path.join(
os.path.expanduser("~/Library/Application Support"), app_name
)
return os.path.join(
os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
_posixify(app_name),
)
class PacifyFlushWrapper:
"""This wrapper is used to catch and suppress BrokenPipeErrors resulting
from ``.flush()`` being called on broken pipe during the shutdown/final-GC
of the Python interpreter. Notably ``.flush()`` is always called on
``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
other cleanup code, and the case where the underlying file is not a broken
pipe, all calls and attributes are proxied.
"""
def __init__(self, wrapped: t.IO) -> None:
self.wrapped = wrapped
def flush(self) -> None:
try:
self.wrapped.flush()
except OSError as e:
import errno
if e.errno != errno.EPIPE:
raise
def __getattr__(self, attr: str) -> t.Any:
return getattr(self.wrapped, attr)
def _detect_program_name(
path: t.Optional[str] = None, _main: t.Optional[ModuleType] = None
) -> str:
"""Determine the command used to run the program, for use in help
text. If a file or entry point was executed, the file name is
returned. If ``python -m`` was used to execute a module or package,
``python -m name`` is returned.
This doesn't try to be too precise, the goal is to give a concise
name for help text. Files are only shown as their name without the
path. ``python`` is only shown for modules, and the full path to
``sys.executable`` is not shown.
:param path: The Python file being executed. Python puts this in
``sys.argv[0]``, which is used by default.
:param _main: The ``__main__`` module. This should only be passed
during internal testing.
.. versionadded:: 8.0
Based on command args detection in the Werkzeug reloader.
:meta private:
"""
if _main is None:
_main = sys.modules["__main__"]
if not path:
path = sys.argv[0]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(_main, "__package__", None) is None or (
os.name == "nt"
and _main.__package__ == ""
and not os.path.exists(path)
and os.path.exists(f"{path}.exe")
):
# Executed a file, like "python app.py".
return os.path.basename(path)
# Executed a module, like "python -m example".
# Rewritten by Python from "-m script" to "/path/to/script.py".
# Need to look at main module to determine how it was executed.
py_module = t.cast(str, _main.__package__)
name = os.path.splitext(os.path.basename(path))[0]
# A submodule like "example.cli".
if name != "__main__":
py_module = f"{py_module}.{name}"
return f"python -m {py_module.lstrip('.')}"
def _expand_args(
args: t.Iterable[str],
*,
user: bool = True,
env: bool = True,
glob_recursive: bool = True,
) -> t.List[str]:
"""Simulate Unix shell expansion with Python functions.
See :func:`glob.glob`, :func:`os.path.expanduser`, and
:func:`os.path.expandvars`.
This is intended for use on Windows, where the shell does not do any
expansion. It may not exactly match what a Unix shell would do.
:param args: List of command line arguments to expand.
:param user: Expand user home directory.
:param env: Expand environment variables.
:param glob_recursive: ``**`` matches directories recursively.
.. versionchanged:: 8.1
Invalid glob patterns are treated as empty expansions rather
than raising an error.
.. versionadded:: 8.0
:meta private:
"""
from glob import glob
out = []
for arg in args:
if user:
arg = os.path.expanduser(arg)
if env:
arg = os.path.expandvars(arg)
try:
matches = glob(arg, recursive=glob_recursive)
except re.error:
matches = []
if not matches:
out.append(arg)
else:
out.extend(matches)
return out
| 18,682 | Python | 31.156626 | 87 | 0.609357 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/decorators.py | import inspect
import types
import typing as t
from functools import update_wrapper
from gettext import gettext as _
from .core import Argument
from .core import Command
from .core import Context
from .core import Group
from .core import Option
from .core import Parameter
from .globals import get_current_context
from .utils import echo
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
FC = t.TypeVar("FC", bound=t.Union[t.Callable[..., t.Any], Command])
def pass_context(f: F) -> F:
"""Marks a callback as wanting to receive the current context
object as first argument.
"""
def new_func(*args, **kwargs): # type: ignore
return f(get_current_context(), *args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
def pass_obj(f: F) -> F:
"""Similar to :func:`pass_context`, but only pass the object on the
context onwards (:attr:`Context.obj`). This is useful if that object
represents the state of a nested system.
"""
def new_func(*args, **kwargs): # type: ignore
return f(get_current_context().obj, *args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
def make_pass_decorator(
object_type: t.Type, ensure: bool = False
) -> "t.Callable[[F], F]":
"""Given an object type this creates a decorator that will work
similar to :func:`pass_obj` but instead of passing the object of the
current context, it will find the innermost context of type
:func:`object_type`.
This generates a decorator that works roughly like this::
from functools import update_wrapper
def decorator(f):
@pass_context
def new_func(ctx, *args, **kwargs):
obj = ctx.find_object(object_type)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
:param object_type: the type of the object to pass.
:param ensure: if set to `True`, a new object will be created and
remembered on the context if it's not there yet.
"""
def decorator(f: F) -> F:
def new_func(*args, **kwargs): # type: ignore
ctx = get_current_context()
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if obj is None:
raise RuntimeError(
"Managed to invoke callback without a context"
f" object of type {object_type.__name__!r}"
" existing."
)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
return decorator
def pass_meta_key(
key: str, *, doc_description: t.Optional[str] = None
) -> "t.Callable[[F], F]":
"""Create a decorator that passes a key from
:attr:`click.Context.meta` as the first argument to the decorated
function.
:param key: Key in ``Context.meta`` to pass.
:param doc_description: Description of the object being passed,
inserted into the decorator's docstring. Defaults to "the 'key'
key from Context.meta".
.. versionadded:: 8.0
"""
def decorator(f: F) -> F:
def new_func(*args, **kwargs): # type: ignore
ctx = get_current_context()
obj = ctx.meta[key]
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
if doc_description is None:
doc_description = f"the {key!r} key from :attr:`click.Context.meta`"
decorator.__doc__ = (
f"Decorator that passes {doc_description} as the first argument"
" to the decorated function."
)
return decorator
CmdType = t.TypeVar("CmdType", bound=Command)
@t.overload
def command(
__func: t.Callable[..., t.Any],
) -> Command:
...
@t.overload
def command(
name: t.Optional[str] = None,
**attrs: t.Any,
) -> t.Callable[..., Command]:
...
@t.overload
def command(
name: t.Optional[str] = None,
cls: t.Type[CmdType] = ...,
**attrs: t.Any,
) -> t.Callable[..., CmdType]:
...
def command(
name: t.Union[str, t.Callable[..., t.Any], None] = None,
cls: t.Optional[t.Type[Command]] = None,
**attrs: t.Any,
) -> t.Union[Command, t.Callable[..., Command]]:
r"""Creates a new :class:`Command` and uses the decorated function as
callback. This will also automatically attach all decorated
:func:`option`\s and :func:`argument`\s as parameters to the command.
The name of the command defaults to the name of the function with
underscores replaced by dashes. If you want to change that, you can
pass the intended name as the first argument.
All keyword arguments are forwarded to the underlying command class.
For the ``params`` argument, any decorated params are appended to
the end of the list.
Once decorated the function turns into a :class:`Command` instance
that can be invoked as a command line utility or be attached to a
command :class:`Group`.
:param name: the name of the command. This defaults to the function
name with underscores replaced by dashes.
:param cls: the command class to instantiate. This defaults to
:class:`Command`.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
.. versionchanged:: 8.1
The ``params`` argument can be used. Decorated params are
appended to the end of the list.
"""
func: t.Optional[t.Callable[..., t.Any]] = None
if callable(name):
func = name
name = None
assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class."
assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments."
if cls is None:
cls = Command
def decorator(f: t.Callable[..., t.Any]) -> Command:
if isinstance(f, Command):
raise TypeError("Attempted to convert a callback into a command twice.")
attr_params = attrs.pop("params", None)
params = attr_params if attr_params is not None else []
try:
decorator_params = f.__click_params__ # type: ignore
except AttributeError:
pass
else:
del f.__click_params__ # type: ignore
params.extend(reversed(decorator_params))
if attrs.get("help") is None:
attrs["help"] = f.__doc__
cmd = cls( # type: ignore[misc]
name=name or f.__name__.lower().replace("_", "-"), # type: ignore[arg-type]
callback=f,
params=params,
**attrs,
)
cmd.__doc__ = f.__doc__
return cmd
if func is not None:
return decorator(func)
return decorator
@t.overload
def group(
__func: t.Callable[..., t.Any],
) -> Group:
...
@t.overload
def group(
name: t.Optional[str] = None,
**attrs: t.Any,
) -> t.Callable[[F], Group]:
...
def group(
name: t.Union[str, t.Callable[..., t.Any], None] = None, **attrs: t.Any
) -> t.Union[Group, t.Callable[[F], Group]]:
"""Creates a new :class:`Group` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Group`.
.. versionchanged:: 8.1
This decorator can be applied without parentheses.
"""
if attrs.get("cls") is None:
attrs["cls"] = Group
if callable(name):
grp: t.Callable[[F], Group] = t.cast(Group, command(**attrs))
return grp(name)
return t.cast(Group, command(name, **attrs))
def _param_memo(f: FC, param: Parameter) -> None:
if isinstance(f, Command):
f.params.append(param)
else:
if not hasattr(f, "__click_params__"):
f.__click_params__ = [] # type: ignore
f.__click_params__.append(param) # type: ignore
def argument(*param_decls: str, **attrs: t.Any) -> t.Callable[[FC], FC]:
"""Attaches an argument to the command. All positional arguments are
passed as parameter declarations to :class:`Argument`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Argument` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the argument class to instantiate. This defaults to
:class:`Argument`.
"""
def decorator(f: FC) -> FC:
ArgumentClass = attrs.pop("cls", None) or Argument
_param_memo(f, ArgumentClass(param_decls, **attrs))
return f
return decorator
def option(*param_decls: str, **attrs: t.Any) -> t.Callable[[FC], FC]:
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Option`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Option` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the option class to instantiate. This defaults to
:class:`Option`.
"""
def decorator(f: FC) -> FC:
# Issue 926, copy attrs, so pre-defined options can re-use the same cls=
option_attrs = attrs.copy()
OptionClass = option_attrs.pop("cls", None) or Option
_param_memo(f, OptionClass(param_decls, **option_attrs))
return f
return decorator
def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
"""Add a ``--yes`` option which shows a prompt before continuing if
not passed. If the prompt is declined, the program will exit.
:param param_decls: One or more option names. Defaults to the single
value ``"--yes"``.
:param kwargs: Extra arguments are passed to :func:`option`.
"""
def callback(ctx: Context, param: Parameter, value: bool) -> None:
if not value:
ctx.abort()
if not param_decls:
param_decls = ("--yes",)
kwargs.setdefault("is_flag", True)
kwargs.setdefault("callback", callback)
kwargs.setdefault("expose_value", False)
kwargs.setdefault("prompt", "Do you want to continue?")
kwargs.setdefault("help", "Confirm the action without prompting.")
return option(*param_decls, **kwargs)
def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
"""Add a ``--password`` option which prompts for a password, hiding
input and asking to enter the value again for confirmation.
:param param_decls: One or more option names. Defaults to the single
value ``"--password"``.
:param kwargs: Extra arguments are passed to :func:`option`.
"""
if not param_decls:
param_decls = ("--password",)
kwargs.setdefault("prompt", True)
kwargs.setdefault("confirmation_prompt", True)
kwargs.setdefault("hide_input", True)
return option(*param_decls, **kwargs)
def version_option(
version: t.Optional[str] = None,
*param_decls: str,
package_name: t.Optional[str] = None,
prog_name: t.Optional[str] = None,
message: t.Optional[str] = None,
**kwargs: t.Any,
) -> t.Callable[[FC], FC]:
"""Add a ``--version`` option which immediately prints the version
number and exits the program.
If ``version`` is not provided, Click will try to detect it using
:func:`importlib.metadata.version` to get the version for the
``package_name``. On Python < 3.8, the ``importlib_metadata``
backport must be installed.
If ``package_name`` is not provided, Click will try to detect it by
inspecting the stack frames. This will be used to detect the
version, so it must match the name of the installed package.
:param version: The version number to show. If not provided, Click
will try to detect it.
:param param_decls: One or more option names. Defaults to the single
value ``"--version"``.
:param package_name: The package name to detect the version from. If
not provided, Click will try to detect it.
:param prog_name: The name of the CLI to show in the message. If not
provided, it will be detected from the command.
:param message: The message to show. The values ``%(prog)s``,
``%(package)s``, and ``%(version)s`` are available. Defaults to
``"%(prog)s, version %(version)s"``.
:param kwargs: Extra arguments are passed to :func:`option`.
:raise RuntimeError: ``version`` could not be detected.
.. versionchanged:: 8.0
Add the ``package_name`` parameter, and the ``%(package)s``
value for messages.
.. versionchanged:: 8.0
Use :mod:`importlib.metadata` instead of ``pkg_resources``. The
version is detected based on the package name, not the entry
point name. The Python package name must match the installed
package name, or be passed with ``package_name=``.
"""
if message is None:
message = _("%(prog)s, version %(version)s")
if version is None and package_name is None:
frame = inspect.currentframe()
f_back = frame.f_back if frame is not None else None
f_globals = f_back.f_globals if f_back is not None else None
# break reference cycle
# https://docs.python.org/3/library/inspect.html#the-interpreter-stack
del frame
if f_globals is not None:
package_name = f_globals.get("__name__")
if package_name == "__main__":
package_name = f_globals.get("__package__")
if package_name:
package_name = package_name.partition(".")[0]
def callback(ctx: Context, param: Parameter, value: bool) -> None:
if not value or ctx.resilient_parsing:
return
nonlocal prog_name
nonlocal version
if prog_name is None:
prog_name = ctx.find_root().info_name
if version is None and package_name is not None:
metadata: t.Optional[types.ModuleType]
try:
from importlib import metadata # type: ignore
except ImportError:
# Python < 3.8
import importlib_metadata as metadata # type: ignore
try:
version = metadata.version(package_name) # type: ignore
except metadata.PackageNotFoundError: # type: ignore
raise RuntimeError(
f"{package_name!r} is not installed. Try passing"
" 'package_name' instead."
) from None
if version is None:
raise RuntimeError(
f"Could not determine the version for {package_name!r} automatically."
)
echo(
t.cast(str, message)
% {"prog": prog_name, "package": package_name, "version": version},
color=ctx.color,
)
ctx.exit()
if not param_decls:
param_decls = ("--version",)
kwargs.setdefault("is_flag", True)
kwargs.setdefault("expose_value", False)
kwargs.setdefault("is_eager", True)
kwargs.setdefault("help", _("Show the version and exit."))
kwargs["callback"] = callback
return option(*param_decls, **kwargs)
def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
"""Add a ``--help`` option which immediately prints the help page
and exits the program.
This is usually unnecessary, as the ``--help`` option is added to
each command automatically unless ``add_help_option=False`` is
passed.
:param param_decls: One or more option names. Defaults to the single
value ``"--help"``.
:param kwargs: Extra arguments are passed to :func:`option`.
"""
def callback(ctx: Context, param: Parameter, value: bool) -> None:
if not value or ctx.resilient_parsing:
return
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
if not param_decls:
param_decls = ("--help",)
kwargs.setdefault("is_flag", True)
kwargs.setdefault("expose_value", False)
kwargs.setdefault("is_eager", True)
kwargs.setdefault("help", _("Show this message and exit."))
kwargs["callback"] = callback
return option(*param_decls, **kwargs)
| 16,350 | Python | 31.833333 | 88 | 0.613639 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/_termui_impl.py | """
This module contains implementations for the termui module. To keep the
import time of Click down, some infrequently used functionality is
placed in this module and only imported as needed.
"""
import contextlib
import math
import os
import sys
import time
import typing as t
from gettext import gettext as _
from ._compat import _default_text_stdout
from ._compat import CYGWIN
from ._compat import get_best_encoding
from ._compat import isatty
from ._compat import open_stream
from ._compat import strip_ansi
from ._compat import term_len
from ._compat import WIN
from .exceptions import ClickException
from .utils import echo
V = t.TypeVar("V")
if os.name == "nt":
BEFORE_BAR = "\r"
AFTER_BAR = "\n"
else:
BEFORE_BAR = "\r\033[?25l"
AFTER_BAR = "\033[?25h\n"
class ProgressBar(t.Generic[V]):
def __init__(
self,
iterable: t.Optional[t.Iterable[V]],
length: t.Optional[int] = None,
fill_char: str = "#",
empty_char: str = " ",
bar_template: str = "%(bar)s",
info_sep: str = " ",
show_eta: bool = True,
show_percent: t.Optional[bool] = None,
show_pos: bool = False,
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
label: t.Optional[str] = None,
file: t.Optional[t.TextIO] = None,
color: t.Optional[bool] = None,
update_min_steps: int = 1,
width: int = 30,
) -> None:
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ""
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.update_min_steps = update_min_steps
self._completed_intervals = 0
self.width = width
self.autowidth = width == 0
if length is None:
from operator import length_hint
length = length_hint(iterable, -1)
if length == -1:
length = None
if iterable is None:
if length is None:
raise TypeError("iterable or length is required")
iterable = t.cast(t.Iterable[V], range(length))
self.iter = iter(iterable)
self.length = length
self.pos = 0
self.avg: t.List[float] = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width: t.Optional[int] = None
self.entered = False
self.current_item: t.Optional[V] = None
self.is_hidden = not isatty(self.file)
self._last_line: t.Optional[str] = None
def __enter__(self) -> "ProgressBar":
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.render_finish()
def __iter__(self) -> t.Iterator[V]:
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
self.render_progress()
return self.generator()
def __next__(self) -> V:
# Iteration is defined in terms of a generator function,
# returned by iter(self); use that to define next(). This works
# because `self.iter` is an iterable consumed by that generator,
# so it is re-entry safe. Calling `next(self.generator())`
# twice works and does "what you want".
return next(iter(self))
def render_finish(self) -> None:
if self.is_hidden:
return
self.file.write(AFTER_BAR)
self.file.flush()
@property
def pct(self) -> float:
if self.finished:
return 1.0
return min(self.pos / (float(self.length or 1) or 1), 1.0)
@property
def time_per_iteration(self) -> float:
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
@property
def eta(self) -> float:
if self.length is not None and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self) -> str:
if self.eta_known:
t = int(self.eta)
seconds = t % 60
t //= 60
minutes = t % 60
t //= 60
hours = t % 24
t //= 24
if t > 0:
return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
else:
return f"{hours:02}:{minutes:02}:{seconds:02}"
return ""
def format_pos(self) -> str:
pos = str(self.pos)
if self.length is not None:
pos += f"/{self.length}"
return pos
def format_pct(self) -> str:
return f"{int(self.pct * 100): 4}%"[1:]
def format_bar(self) -> str:
if self.length is not None:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
elif self.finished:
bar = self.fill_char * self.width
else:
chars = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
chars[
int(
(math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
* self.width
)
] = self.fill_char
bar = "".join(chars)
return bar
def format_progress_line(self) -> str:
show_percent = self.show_percent
info_bits = []
if self.length is not None and show_percent is None:
show_percent = not self.show_pos
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (
self.bar_template
% {
"label": self.label,
"bar": self.format_bar(),
"info": self.info_sep.join(info_bits),
}
).rstrip()
def render_progress(self) -> None:
import shutil
if self.is_hidden:
# Only output the label as it changes if the output is not a
# TTY. Use file=stderr if you expect to be piping stdout.
if self._last_line != self.label:
self._last_line = self.label
echo(self.label, file=self.file, color=self.color)
return
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, shutil.get_terminal_size().columns - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(" " * self.max_width) # type: ignore
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(" " * (clear_width - line_len))
line = "".join(buf)
# Render the line only if it changed.
if line != self._last_line:
self._last_line = line
echo(line, file=self.file, color=self.color, nl=False)
self.file.flush()
def make_step(self, n_steps: int) -> None:
self.pos += n_steps
if self.length is not None and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
# self.avg is a rolling list of length <= 7 of steps where steps are
# defined as time elapsed divided by the total progress through
# self.length.
if self.pos:
step = (time.time() - self.start) / self.pos
else:
step = time.time() - self.start
self.avg = self.avg[-6:] + [step]
self.eta_known = self.length is not None
def update(self, n_steps: int, current_item: t.Optional[V] = None) -> None:
"""Update the progress bar by advancing a specified number of
steps, and optionally set the ``current_item`` for this new
position.
:param n_steps: Number of steps to advance.
:param current_item: Optional item to set as ``current_item``
for the updated position.
.. versionchanged:: 8.0
Added the ``current_item`` optional parameter.
.. versionchanged:: 8.0
Only render when the number of steps meets the
``update_min_steps`` threshold.
"""
if current_item is not None:
self.current_item = current_item
self._completed_intervals += n_steps
if self._completed_intervals >= self.update_min_steps:
self.make_step(self._completed_intervals)
self.render_progress()
self._completed_intervals = 0
def finish(self) -> None:
self.eta_known = False
self.current_item = None
self.finished = True
def generator(self) -> t.Iterator[V]:
"""Return a generator which yields the items added to the bar
during construction, and updates the progress bar *after* the
yielded block returns.
"""
# WARNING: the iterator interface for `ProgressBar` relies on
# this and only works because this is a simple generator which
# doesn't create or manage additional state. If this function
# changes, the impact should be evaluated both against
# `iter(bar)` and `next(bar)`. `next()` in particular may call
# `self.generator()` repeatedly, and this must remain safe in
# order for that interface to work.
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
if self.is_hidden:
yield from self.iter
else:
for rv in self.iter:
self.current_item = rv
# This allows show_item_func to be updated before the
# item is processed. Only trigger at the beginning of
# the update interval.
if self._completed_intervals == 0:
self.render_progress()
yield rv
self.update(1)
self.finish()
self.render_progress()
def pager(generator: t.Iterable[str], color: t.Optional[bool] = None) -> None:
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, generator, color)
pager_cmd = (os.environ.get("PAGER", None) or "").strip()
if pager_cmd:
if WIN:
return _tempfilepager(generator, pager_cmd, color)
return _pipepager(generator, pager_cmd, color)
if os.environ.get("TERM") in ("dumb", "emacs"):
return _nullpager(stdout, generator, color)
if WIN or sys.platform.startswith("os2"):
return _tempfilepager(generator, "more <", color)
if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
return _pipepager(generator, "less", color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, "system") and os.system(f'more "{filename}"') == 0:
return _pipepager(generator, "more", color)
return _nullpager(stdout, generator, color)
finally:
os.unlink(filename)
def _pipepager(generator: t.Iterable[str], cmd: str, color: t.Optional[bool]) -> None:
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit("/", 1)[-1].split()
if color is None and cmd_detail[0] == "less":
less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}"
if not less_flags:
env["LESS"] = "-R"
color = True
elif "r" in less_flags or "R" in less_flags:
color = True
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
stdin = t.cast(t.BinaryIO, c.stdin)
encoding = get_best_encoding(stdin)
try:
for text in generator:
if not color:
text = strip_ansi(text)
stdin.write(text.encode(encoding, "replace"))
except (OSError, KeyboardInterrupt):
pass
else:
stdin.close()
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
def _tempfilepager(
generator: t.Iterable[str], cmd: str, color: t.Optional[bool]
) -> None:
"""Page through text by invoking a program on a temporary file."""
import tempfile
fd, filename = tempfile.mkstemp()
# TODO: This never terminates if the passed generator never terminates.
text = "".join(generator)
if not color:
text = strip_ansi(text)
encoding = get_best_encoding(sys.stdout)
with open_stream(filename, "wb")[0] as f:
f.write(text.encode(encoding))
try:
os.system(f'{cmd} "{filename}"')
finally:
os.close(fd)
os.unlink(filename)
def _nullpager(
stream: t.TextIO, generator: t.Iterable[str], color: t.Optional[bool]
) -> None:
"""Simply print unformatted text. This is the ultimate fallback."""
for text in generator:
if not color:
text = strip_ansi(text)
stream.write(text)
class Editor:
def __init__(
self,
editor: t.Optional[str] = None,
env: t.Optional[t.Mapping[str, str]] = None,
require_save: bool = True,
extension: str = ".txt",
) -> None:
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self) -> str:
if self.editor is not None:
return self.editor
for key in "VISUAL", "EDITOR":
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return "notepad"
for editor in "sensible-editor", "vim", "nano":
if os.system(f"which {editor} >/dev/null 2>&1") == 0:
return editor
return "vi"
def edit_file(self, filename: str) -> None:
import subprocess
editor = self.get_editor()
environ: t.Optional[t.Dict[str, str]] = None
if self.env:
environ = os.environ.copy()
environ.update(self.env)
try:
c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise ClickException(
_("{editor}: Editing failed").format(editor=editor)
)
except OSError as e:
raise ClickException(
_("{editor}: Editing failed: {e}").format(editor=editor, e=e)
) from e
def edit(self, text: t.Optional[t.AnyStr]) -> t.Optional[t.AnyStr]:
import tempfile
if not text:
data = b""
elif isinstance(text, (bytes, bytearray)):
data = text
else:
if text and not text.endswith("\n"):
text += "\n"
if WIN:
data = text.replace("\n", "\r\n").encode("utf-8-sig")
else:
data = text.encode("utf-8")
fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
f: t.BinaryIO
try:
with os.fdopen(fd, "wb") as f:
f.write(data)
# If the filesystem resolution is 1 second, like Mac OS
# 10.12 Extended, or 2 seconds, like FAT32, and the editor
# closes very fast, require_save can fail. Set the modified
# time to be 2 seconds in the past to work around this.
os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))
# Depending on the resolution, the exact value might not be
# recorded, so get the new recorded value.
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save and os.path.getmtime(name) == timestamp:
return None
with open(name, "rb") as f:
rv = f.read()
if isinstance(text, (bytes, bytearray)):
return rv
return rv.decode("utf-8-sig").replace("\r\n", "\n") # type: ignore
finally:
os.unlink(name)
def open_url(url: str, wait: bool = False, locate: bool = False) -> int:
import subprocess
def _unquote_file(url: str) -> str:
from urllib.parse import unquote
if url.startswith("file://"):
url = unquote(url[7:])
return url
if sys.platform == "darwin":
args = ["open"]
if wait:
args.append("-W")
if locate:
args.append("-R")
args.append(_unquote_file(url))
null = open("/dev/null", "w")
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url.replace('"', ""))
args = f'explorer /select,"{url}"'
else:
url = url.replace('"', "")
wait_str = "/WAIT" if wait else ""
args = f'start {wait_str} "" "{url}"'
return os.system(args)
elif CYGWIN:
if locate:
url = os.path.dirname(_unquote_file(url).replace('"', ""))
args = f'cygstart "{url}"'
else:
url = url.replace('"', "")
wait_str = "-w" if wait else ""
args = f'cygstart {wait_str} "{url}"'
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or "."
else:
url = _unquote_file(url)
c = subprocess.Popen(["xdg-open", url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(("http://", "https://")) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
def _translate_ch_to_exc(ch: str) -> t.Optional[BaseException]:
if ch == "\x03":
raise KeyboardInterrupt()
if ch == "\x04" and not WIN: # Unix-like, Ctrl+D
raise EOFError()
if ch == "\x1a" and WIN: # Windows, Ctrl+Z
raise EOFError()
return None
if WIN:
import msvcrt
@contextlib.contextmanager
def raw_terminal() -> t.Iterator[int]:
yield -1
def getchar(echo: bool) -> str:
# The function `getch` will return a bytes object corresponding to
# the pressed character. Since Windows 10 build 1803, it will also
# return \x00 when called a second time after pressing a regular key.
#
# `getwch` does not share this probably-bugged behavior. Moreover, it
# returns a Unicode object by default, which is what we want.
#
# Either of these functions will return \x00 or \xe0 to indicate
# a special key, and you need to call the same function again to get
# the "rest" of the code. The fun part is that \u00e0 is
# "latin small letter a with grave", so if you type that on a French
# keyboard, you _also_ get a \xe0.
# E.g., consider the Up arrow. This returns \xe0 and then \x48. The
# resulting Unicode string reads as "a with grave" + "capital H".
# This is indistinguishable from when the user actually types
# "a with grave" and then "capital H".
#
# When \xe0 is returned, we assume it's part of a special-key sequence
# and call `getwch` again, but that means that when the user types
# the \u00e0 character, `getchar` doesn't return until a second
# character is typed.
# The alternative is returning immediately, but that would mess up
# cross-platform handling of arrow keys and others that start with
# \xe0. Another option is using `getch`, but then we can't reliably
# read non-ASCII characters, because return values of `getch` are
# limited to the current 8-bit codepage.
#
# Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
# is doing the right thing in more situations than with `getch`.
func: t.Callable[[], str]
if echo:
func = msvcrt.getwche # type: ignore
else:
func = msvcrt.getwch # type: ignore
rv = func()
if rv in ("\x00", "\xe0"):
# \x00 and \xe0 are control characters that indicate special key,
# see above.
rv += func()
_translate_ch_to_exc(rv)
return rv
else:
import tty
import termios
@contextlib.contextmanager
def raw_terminal() -> t.Iterator[int]:
f: t.Optional[t.TextIO]
fd: int
if not isatty(sys.stdin):
f = open("/dev/tty")
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yield fd
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
sys.stdout.flush()
if f is not None:
f.close()
except termios.error:
pass
def getchar(echo: bool) -> str:
with raw_terminal() as fd:
ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace")
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
_translate_ch_to_exc(ch)
return ch
| 23,451 | Python | 31.662953 | 88 | 0.554177 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/_textwrap.py | import textwrap
import typing as t
from contextlib import contextmanager
class TextWrapper(textwrap.TextWrapper):
def _handle_long_word(
self,
reversed_chunks: t.List[str],
cur_line: t.List[str],
cur_len: int,
width: int,
) -> None:
space_left = max(width - cur_len, 1)
if self.break_long_words:
last = reversed_chunks[-1]
cut = last[:space_left]
res = last[space_left:]
cur_line.append(cut)
reversed_chunks[-1] = res
elif not cur_line:
cur_line.append(reversed_chunks.pop())
@contextmanager
def extra_indent(self, indent: str) -> t.Iterator[None]:
old_initial_indent = self.initial_indent
old_subsequent_indent = self.subsequent_indent
self.initial_indent += indent
self.subsequent_indent += indent
try:
yield
finally:
self.initial_indent = old_initial_indent
self.subsequent_indent = old_subsequent_indent
def indent_only(self, text: str) -> str:
rv = []
for idx, line in enumerate(text.splitlines()):
indent = self.initial_indent
if idx > 0:
indent = self.subsequent_indent
rv.append(f"{indent}{line}")
return "\n".join(rv)
| 1,353 | Python | 26.079999 | 60 | 0.563932 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/types.py | import os
import stat
import typing as t
from datetime import datetime
from gettext import gettext as _
from gettext import ngettext
from ._compat import _get_argv_encoding
from ._compat import get_filesystem_encoding
from ._compat import open_stream
from .exceptions import BadParameter
from .utils import LazyFile
from .utils import safecall
if t.TYPE_CHECKING:
import typing_extensions as te
from .core import Context
from .core import Parameter
from .shell_completion import CompletionItem
class ParamType:
"""Represents the type of a parameter. Validates and converts values
from the command line or Python into the correct type.
To implement a custom type, subclass and implement at least the
following:
- The :attr:`name` class attribute must be set.
- Calling an instance of the type with ``None`` must return
``None``. This is already implemented by default.
- :meth:`convert` must convert string values to the correct type.
- :meth:`convert` must accept values that are already the correct
type.
- It must be able to convert a value if the ``ctx`` and ``param``
arguments are ``None``. This can occur when converting prompt
input.
"""
is_composite: t.ClassVar[bool] = False
arity: t.ClassVar[int] = 1
#: the descriptive name of this type
name: str
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter: t.ClassVar[t.Optional[str]] = None
def to_info_dict(self) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
# The class name without the "ParamType" suffix.
param_type = type(self).__name__.partition("ParamType")[0]
param_type = param_type.partition("ParameterType")[0]
# Custom subclasses might not remember to set a name.
if hasattr(self, "name"):
name = self.name
else:
name = param_type
return {"param_type": param_type, "name": name}
def __call__(
self,
value: t.Any,
param: t.Optional["Parameter"] = None,
ctx: t.Optional["Context"] = None,
) -> t.Any:
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param: "Parameter") -> t.Optional[str]:
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param: "Parameter") -> t.Optional[str]:
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
"""Convert the value to the correct type. This is not called if
the value is ``None`` (the missing value).
This must accept string values from the command line, as well as
values that are already the correct type. It may also convert
other compatible types.
The ``param`` and ``ctx`` arguments may be ``None`` in certain
situations, such as when converting prompt input.
If the value cannot be converted, call :meth:`fail` with a
descriptive message.
:param value: The value to convert.
:param param: The parameter that is using this type to convert
its value. May be ``None``.
:param ctx: The current context that arrived at this value. May
be ``None``.
"""
return value
def split_envvar_value(self, rv: str) -> t.Sequence[str]:
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(
self,
message: str,
param: t.Optional["Parameter"] = None,
ctx: t.Optional["Context"] = None,
) -> "t.NoReturn":
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
def shell_complete(
self, ctx: "Context", param: "Parameter", incomplete: str
) -> t.List["CompletionItem"]:
"""Return a list of
:class:`~click.shell_completion.CompletionItem` objects for the
incomplete value. Most types do not provide completions, but
some do, and this allows custom types to provide custom
completions as well.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
return []
class CompositeParamType(ParamType):
is_composite = True
@property
def arity(self) -> int: # type: ignore
raise NotImplementedError()
class FuncParamType(ParamType):
def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None:
self.name = func.__name__
self.func = func
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["func"] = self.func
return info_dict
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
try:
return self.func(value)
except ValueError:
try:
value = str(value)
except UnicodeError:
value = value.decode("utf-8", "replace")
self.fail(value, param, ctx)
class UnprocessedParamType(ParamType):
name = "text"
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
return value
def __repr__(self) -> str:
return "UNPROCESSED"
class StringParamType(ParamType):
name = "text"
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = get_filesystem_encoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return str(value)
def __repr__(self) -> str:
return "STRING"
class Choice(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices: t.Sequence[str], case_sensitive: bool = True) -> None:
self.choices = choices
self.case_sensitive = case_sensitive
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["choices"] = self.choices
info_dict["case_sensitive"] = self.case_sensitive
return info_dict
def get_metavar(self, param: "Parameter") -> str:
choices_str = "|".join(self.choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
def get_missing_message(self, param: "Parameter") -> str:
return _("Choose from:\n\t{choices}").format(choices=",\n\t".join(self.choices))
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
choices_str = ", ".join(map(repr, self.choices))
self.fail(
ngettext(
"{value!r} is not {choice}.",
"{value!r} is not one of {choices}.",
len(self.choices),
).format(value=value, choice=choices_str, choices=choices_str),
param,
ctx,
)
def __repr__(self) -> str:
return f"Choice({list(self.choices)})"
def shell_complete(
self, ctx: "Context", param: "Parameter", incomplete: str
) -> t.List["CompletionItem"]:
"""Complete choices that start with the incomplete value.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
str_choices = map(str, self.choices)
if self.case_sensitive:
matched = (c for c in str_choices if c.startswith(incomplete))
else:
incomplete = incomplete.lower()
matched = (c for c in str_choices if c.lower().startswith(incomplete))
return [CompletionItem(c) for c in matched]
class DateTime(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats: t.Optional[t.Sequence[str]] = None):
self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["formats"] = self.formats
return info_dict
def get_metavar(self, param: "Parameter") -> str:
return f"[{'|'.join(self.formats)}]"
def _try_to_convert_date(self, value: t.Any, format: str) -> t.Optional[datetime]:
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
if isinstance(value, datetime):
return value
for format in self.formats:
converted = self._try_to_convert_date(value, format)
if converted is not None:
return converted
formats_str = ", ".join(map(repr, self.formats))
self.fail(
ngettext(
"{value!r} does not match the format {format}.",
"{value!r} does not match the formats {formats}.",
len(self.formats),
).format(value=value, format=formats_str, formats=formats_str),
param,
ctx,
)
def __repr__(self) -> str:
return "DateTime"
class _NumberParamTypeBase(ParamType):
_number_class: t.ClassVar[t.Type]
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
try:
return self._number_class(value)
except ValueError:
self.fail(
_("{value!r} is not a valid {number_type}.").format(
value=value, number_type=self.name
),
param,
ctx,
)
class _NumberRangeBase(_NumberParamTypeBase):
def __init__(
self,
min: t.Optional[float] = None,
max: t.Optional[float] = None,
min_open: bool = False,
max_open: bool = False,
clamp: bool = False,
) -> None:
self.min = min
self.max = max
self.min_open = min_open
self.max_open = max_open
self.clamp = clamp
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(
min=self.min,
max=self.max,
min_open=self.min_open,
max_open=self.max_open,
clamp=self.clamp,
)
return info_dict
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
import operator
rv = super().convert(value, param, ctx)
lt_min: bool = self.min is not None and (
operator.le if self.min_open else operator.lt
)(rv, self.min)
gt_max: bool = self.max is not None and (
operator.ge if self.max_open else operator.gt
)(rv, self.max)
if self.clamp:
if lt_min:
return self._clamp(self.min, 1, self.min_open) # type: ignore
if gt_max:
return self._clamp(self.max, -1, self.max_open) # type: ignore
if lt_min or gt_max:
self.fail(
_("{value} is not in the range {range}.").format(
value=rv, range=self._describe_range()
),
param,
ctx,
)
return rv
def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float:
"""Find the valid value to clamp to bound in the given
direction.
:param bound: The boundary value.
:param dir: 1 or -1 indicating the direction to move.
:param open: If true, the range does not include the bound.
"""
raise NotImplementedError
def _describe_range(self) -> str:
"""Describe the range for use in help text."""
if self.min is None:
op = "<" if self.max_open else "<="
return f"x{op}{self.max}"
if self.max is None:
op = ">" if self.min_open else ">="
return f"x{op}{self.min}"
lop = "<" if self.min_open else "<="
rop = "<" if self.max_open else "<="
return f"{self.min}{lop}x{rop}{self.max}"
def __repr__(self) -> str:
clamp = " clamped" if self.clamp else ""
return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
class IntParamType(_NumberParamTypeBase):
name = "integer"
_number_class = int
def __repr__(self) -> str:
return "INT"
class IntRange(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp( # type: ignore
self, bound: int, dir: "te.Literal[1, -1]", open: bool
) -> int:
if not open:
return bound
return bound + dir
class FloatParamType(_NumberParamTypeBase):
name = "float"
_number_class = float
def __repr__(self) -> str:
return "FLOAT"
class FloatRange(_NumberRangeBase, FloatParamType):
"""Restrict a :data:`click.FLOAT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing. This is not supported if either
boundary is marked ``open``.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "float range"
def __init__(
self,
min: t.Optional[float] = None,
max: t.Optional[float] = None,
min_open: bool = False,
max_open: bool = False,
clamp: bool = False,
) -> None:
super().__init__(
min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
)
if (min_open or max_open) and clamp:
raise TypeError("Clamping is not supported for open bounds.")
def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float:
if not open:
return bound
# Could use Python 3.9's math.nextafter here, but clamping an
# open float range doesn't seem to be particularly useful. It's
# left up to the user to write a callback to do it if needed.
raise RuntimeError("Clamping is not supported for open bounds.")
class BoolParamType(ParamType):
name = "boolean"
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
if value in {False, True}:
return bool(value)
norm = value.strip().lower()
if norm in {"1", "true", "t", "yes", "y", "on"}:
return True
if norm in {"0", "false", "f", "no", "n", "off"}:
return False
self.fail(
_("{value!r} is not a valid boolean.").format(value=value), param, ctx
)
def __repr__(self) -> str:
return "BOOL"
class UUIDParameterType(ParamType):
name = "uuid"
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
import uuid
if isinstance(value, uuid.UUID):
return value
value = value.strip()
try:
return uuid.UUID(value)
except ValueError:
self.fail(
_("{value!r} is not a valid UUID.").format(value=value), param, ctx
)
def __repr__(self) -> str:
return "UUID"
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = "filename"
envvar_list_splitter = os.path.pathsep
def __init__(
self,
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
lazy: t.Optional[bool] = None,
atomic: bool = False,
) -> None:
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(mode=self.mode, encoding=self.encoding)
return info_dict
def resolve_lazy_flag(self, value: t.Any) -> bool:
if self.lazy is not None:
return self.lazy
if value == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
try:
if hasattr(value, "read") or hasattr(value, "write"):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f: t.IO = t.cast(
t.IO,
LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
),
)
if ctx is not None:
ctx.call_on_close(f.close_intelligently) # type: ignore
return f
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except OSError as e: # noqa: B014
self.fail(f"'{os.fsdecode(value)}': {e.strerror}", param, ctx)
def shell_complete(
self, ctx: "Context", param: "Parameter", incomplete: str
) -> t.List["CompletionItem"]:
"""Return a special completion marker that tells the completion
system to use the shell to provide file path completions.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
return [CompletionItem(incomplete, type="file")]
class Path(ParamType):
"""The ``Path`` type is similar to the :class:`File` type, but
returns the filename instead of an open file. Various checks can be
enabled to validate the type of file and permissions.
:param exists: The file or directory needs to exist for the value to
be valid. If this is not set to ``True``, and the file does not
exist, then all further checks are silently skipped.
:param file_okay: Allow a file as a value.
:param dir_okay: Allow a directory as a value.
:param readable: if true, a readable check is performed.
:param writable: if true, a writable check is performed.
:param executable: if true, an executable check is performed.
:param resolve_path: Make the value absolute and resolve any
symlinks. A ``~`` is not expanded, as this is supposed to be
done by the shell only.
:param allow_dash: Allow a single dash as a value, which indicates
a standard stream (but does not open it). Use
:func:`~click.open_file` to handle opening this value.
:param path_type: Convert the incoming path value to this type. If
``None``, keep Python's default, which is ``str``. Useful to
convert to :class:`pathlib.Path`.
.. versionchanged:: 8.1
Added the ``executable`` parameter.
.. versionchanged:: 8.0
Allow passing ``type=pathlib.Path``.
.. versionchanged:: 6.0
Added the ``allow_dash`` parameter.
"""
envvar_list_splitter = os.path.pathsep
def __init__(
self,
exists: bool = False,
file_okay: bool = True,
dir_okay: bool = True,
writable: bool = False,
readable: bool = True,
resolve_path: bool = False,
allow_dash: bool = False,
path_type: t.Optional[t.Type] = None,
executable: bool = False,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.readable = readable
self.writable = writable
self.executable = executable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name = _("file")
elif self.dir_okay and not self.file_okay:
self.name = _("directory")
else:
self.name = _("path")
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(
exists=self.exists,
file_okay=self.file_okay,
dir_okay=self.dir_okay,
writable=self.writable,
readable=self.readable,
allow_dash=self.allow_dash,
)
return info_dict
def coerce_path_result(self, rv: t.Any) -> t.Any:
if self.type is not None and not isinstance(rv, self.type):
if self.type is str:
rv = os.fsdecode(rv)
elif self.type is bytes:
rv = os.fsencode(rv)
else:
rv = self.type(rv)
return rv
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
# os.path.realpath doesn't resolve symlinks on Windows
# until Python 3.8. Use pathlib for now.
import pathlib
rv = os.fsdecode(pathlib.Path(rv).resolve())
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
_("{name} {filename!r} does not exist.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
_("{name} {filename!r} is a file.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
_("{name} '{filename}' is a directory.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
if self.readable and not os.access(rv, os.R_OK):
self.fail(
_("{name} {filename!r} is not readable.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
if self.writable and not os.access(rv, os.W_OK):
self.fail(
_("{name} {filename!r} is not writable.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
if self.executable and not os.access(value, os.X_OK):
self.fail(
_("{name} {filename!r} is not executable.").format(
name=self.name.title(), filename=os.fsdecode(value)
),
param,
ctx,
)
return self.coerce_path_result(rv)
def shell_complete(
self, ctx: "Context", param: "Parameter", incomplete: str
) -> t.List["CompletionItem"]:
"""Return a special completion marker that tells the completion
system to use the shell to provide path completions for only
directories or any paths.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
type = "dir" if self.dir_okay and not self.file_okay else "file"
return [CompletionItem(incomplete, type=type)]
class Tuple(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types: t.Sequence[t.Union[t.Type, ParamType]]) -> None:
self.types = [convert_type(ty) for ty in types]
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["types"] = [t.to_info_dict() for t in self.types]
return info_dict
@property
def name(self) -> str: # type: ignore
return f"<{' '.join(ty.name for ty in self.types)}>"
@property
def arity(self) -> int: # type: ignore
return len(self.types)
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
len_type = len(self.types)
len_value = len(value)
if len_value != len_type:
self.fail(
ngettext(
"{len_type} values are required, but {len_value} was given.",
"{len_type} values are required, but {len_value} were given.",
len_value,
).format(len_type=len_type, len_value=len_value),
param=param,
ctx=ctx,
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty: t.Optional[t.Any], default: t.Optional[t.Any] = None) -> ParamType:
"""Find the most appropriate :class:`ParamType` for the given Python
type. If the type isn't provided, it can be inferred from a default
value.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, (tuple, list)):
# If the default is empty, ty will remain None and will
# return STRING.
if default:
item = default[0]
# A tuple of tuples needs to detect the inner types.
# Can't call convert recursively because that would
# incorrectly unwind the tuple to a single type.
if isinstance(item, (tuple, list)):
ty = tuple(map(type, item))
else:
ty = type(item)
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is str or ty is None:
return STRING
if ty is int:
return INT
if ty is float:
return FLOAT
if ty is bool:
return BOOL
if guessed_type:
return STRING
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
f"Attempted to use an uninstantiated parameter type ({ty})."
)
except TypeError:
# ty is an instance (correct), so issubclass fails.
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but
#: internally no string conversion takes place if the input was bytes.
#: This is usually useful when working with file paths as they can
#: appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
| 35,805 | Python | 32.33892 | 88 | 0.577126 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/formatting.py | import typing as t
from contextlib import contextmanager
from gettext import gettext as _
from ._compat import term_len
from .parser import split_opt
# Can force a width. This is used by the test system
FORCED_WIDTH: t.Optional[int] = None
def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:
widths: t.Dict[int, int] = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(
rows: t.Iterable[t.Tuple[str, str]], col_count: int
) -> t.Iterator[t.Tuple[str, ...]]:
for row in rows:
yield row + ("",) * (col_count - len(row))
def wrap_text(
text: str,
width: int = 78,
initial_indent: str = "",
subsequent_indent: str = "",
preserve_paragraphs: bool = False,
) -> str:
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(
width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False,
)
if not preserve_paragraphs:
return wrapper.fill(text)
p: t.List[t.Tuple[int, bool, str]] = []
buf: t.List[str] = []
indent = None
def _flush_par() -> None:
if not buf:
return
if buf[0].strip() == "\b":
p.append((indent or 0, True, "\n".join(buf[1:])))
else:
p.append((indent or 0, False, " ".join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(" " * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return "\n\n".join(rv)
class HelpFormatter:
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(
self,
indent_increment: int = 2,
width: t.Optional[int] = None,
max_width: t.Optional[int] = None,
) -> None:
import shutil
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer: t.List[str] = []
def write(self, string: str) -> None:
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self) -> None:
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self) -> None:
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(
self, prog: str, args: str = "", prefix: t.Optional[str] = None
) -> None:
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: The prefix for the first line. Defaults to
``"Usage: "``.
"""
if prefix is None:
prefix = f"{_('Usage:')} "
usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading: str) -> None:
"""Writes a heading into the buffer."""
self.write(f"{'':>{self.current_indent}}{heading}:\n")
def write_paragraph(self) -> None:
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text: str) -> None:
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
indent = " " * self.current_indent
self.write(
wrap_text(
text,
self.width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(
self,
rows: t.Sequence[t.Tuple[str, str]],
col_max: int = 30,
col_spacing: int = 2,
) -> None:
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write(f"{'':>{self.current_indent}}{first}")
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
else:
self.write("\n")
@contextmanager
def section(self, name: str) -> t.Iterator[None]:
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self) -> t.Iterator[None]:
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self) -> str:
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
return ", ".join(x[1] for x in rv), any_prefix_is_slash
| 9,706 | Python | 31.142384 | 87 | 0.556254 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/click/_winconsole.py | # This module is based on the excellent work by Adam Bartoš who
# provided a lot of what went into the implementation here in
# the discussion to issue1602 in the Python bug tracker.
#
# There are some general differences in regards to how this works
# compared to the original patches as we do not need to patch
# the entire interpreter but just work in our little world of
# echo and prompt.
import io
import sys
import time
import typing as t
from ctypes import byref
from ctypes import c_char
from ctypes import c_char_p
from ctypes import c_int
from ctypes import c_ssize_t
from ctypes import c_ulong
from ctypes import c_void_p
from ctypes import POINTER
from ctypes import py_object
from ctypes import Structure
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
from ctypes.wintypes import LPCWSTR
from ctypes.wintypes import LPWSTR
from ._compat import _NonClosingTextIOWrapper
assert sys.platform == "win32"
import msvcrt # noqa: E402
from ctypes import windll # noqa: E402
from ctypes import WINFUNCTYPE # noqa: E402
c_ssize_p = POINTER(c_ssize_t)
kernel32 = windll.kernel32
GetStdHandle = kernel32.GetStdHandle
ReadConsoleW = kernel32.ReadConsoleW
WriteConsoleW = kernel32.WriteConsoleW
GetConsoleMode = kernel32.GetConsoleMode
GetLastError = kernel32.GetLastError
GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
("CommandLineToArgvW", windll.shell32)
)
LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32))
STDIN_HANDLE = GetStdHandle(-10)
STDOUT_HANDLE = GetStdHandle(-11)
STDERR_HANDLE = GetStdHandle(-12)
PyBUF_SIMPLE = 0
PyBUF_WRITABLE = 1
ERROR_SUCCESS = 0
ERROR_NOT_ENOUGH_MEMORY = 8
ERROR_OPERATION_ABORTED = 995
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
EOF = b"\x1a"
MAX_BYTES_WRITTEN = 32767
try:
from ctypes import pythonapi
except ImportError:
# On PyPy we cannot get buffers so our ability to operate here is
# severely limited.
get_buffer = None
else:
class Py_buffer(Structure):
_fields_ = [
("buf", c_void_p),
("obj", py_object),
("len", c_ssize_t),
("itemsize", c_ssize_t),
("readonly", c_int),
("ndim", c_int),
("format", c_char_p),
("shape", c_ssize_p),
("strides", c_ssize_p),
("suboffsets", c_ssize_p),
("internal", c_void_p),
]
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
PyBuffer_Release = pythonapi.PyBuffer_Release
def get_buffer(obj, writable=False):
buf = Py_buffer()
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
PyBuffer_Release(byref(buf))
class _WindowsConsoleRawIOBase(io.RawIOBase):
def __init__(self, handle):
self.handle = handle
def isatty(self):
super().isatty()
return True
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
def readable(self):
return True
def readinto(self, b):
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError(
"cannot read odd number of bytes from UTF-16-LE encoded console"
)
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(
HANDLE(self.handle),
buffer,
code_units_to_be_read,
byref(code_units_read),
None,
)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError(f"Windows error: {GetLastError()}")
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
def writable(self):
return True
@staticmethod
def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return "ERROR_SUCCESS"
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return "ERROR_NOT_ENOUGH_MEMORY"
return f"Windows error {errno}"
def write(self, b):
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(
HANDLE(self.handle),
buf,
code_units_to_be_written,
byref(code_units_written),
None,
)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
class ConsoleStream:
def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None:
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self) -> str:
return self.buffer.name
def write(self, x: t.AnyStr) -> int:
if isinstance(x, str):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines: t.Iterable[t.AnyStr]) -> None:
for line in lines:
self.write(line)
def __getattr__(self, name: str) -> t.Any:
return getattr(self._text_stream, name)
def isatty(self) -> bool:
return self.buffer.isatty()
def __repr__(self):
return f"<ConsoleStream name={self.name!r} encoding={self.encoding!r}>"
def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO:
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
_stream_factories: t.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = {
0: _get_text_stdin,
1: _get_text_stdout,
2: _get_text_stderr,
}
def _is_console(f: t.TextIO) -> bool:
if not hasattr(f, "fileno"):
return False
try:
fileno = f.fileno()
except (OSError, io.UnsupportedOperation):
return False
handle = msvcrt.get_osfhandle(fileno)
return bool(GetConsoleMode(handle, byref(DWORD())))
def _get_windows_console_stream(
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> t.Optional[t.TextIO]:
if (
get_buffer is not None
and encoding in {"utf-16-le", None}
and errors in {"strict", None}
and _is_console(f)
):
func = _stream_factories.get(f.fileno())
if func is not None:
b = getattr(f, "buffer", None)
if b is None:
return None
return func(b)
| 7,859 | Python | 27.071428 | 83 | 0.624634 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyperclip/__init__.py | """
Pyperclip
A cross-platform clipboard module for Python, with copy & paste functions for plain text.
By Al Sweigart [email protected]
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
if not pyperclip.is_available():
print("Copy functionality unavailable!")
On Windows, no additional modules are needed.
On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
commands. (These commands should come with OS X.).
On Linux, install xclip or xsel via package manager. For example, in Debian:
sudo apt-get install xclip
sudo apt-get install xsel
Otherwise on Linux, you will need the gtk or PyQt5/PyQt4 modules installed.
gtk and PyQt4 modules are not available for Python 3,
and this module does not work with PyGObject yet.
Note: There seems to be a way to get gtk on Python 3, according to:
https://askubuntu.com/questions/697397/python3-is-not-supporting-gtk-module
Cygwin is currently not supported.
Security Note: This module runs programs with these names:
- which
- where
- pbcopy
- pbpaste
- xclip
- xsel
- klipper
- qdbus
A malicious user could rename or add programs with these names, tricking
Pyperclip into running them with whatever permissions the Python process has.
"""
__version__ = '1.8.0'
import contextlib
import ctypes
import os
import platform
import subprocess
import sys
import time
import warnings
from ctypes import c_size_t, sizeof, c_wchar_p, get_errno, c_wchar
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
HAS_DISPLAY = os.getenv("DISPLAY", False)
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error """
PY2 = sys.version_info[0] == 2
STR_OR_UNICODE = unicode if PY2 else str # For paste(): Python 3 uses str, Python 2 uses unicode.
ENCODING = 'utf-8'
# The "which" unix command finds where a command is.
if platform.system() == 'Windows':
WHICH_CMD = 'where'
else:
WHICH_CMD = 'which'
def _executable_exists(name):
return subprocess.call([WHICH_CMD, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
# Exceptions
class PyperclipException(RuntimeError):
pass
class PyperclipWindowsException(PyperclipException):
def __init__(self, message):
message += " (%s)" % ctypes.WinError()
super(PyperclipWindowsException, self).__init__(message)
class PyperclipTimeoutException(PyperclipException):
pass
def _stringifyText(text):
if PY2:
acceptedTypes = (unicode, str, int, float, bool)
else:
acceptedTypes = (str, int, float, bool)
if not isinstance(text, acceptedTypes):
raise PyperclipException('only str, int, float, and bool values can be copied to the clipboard, not %s' % (text.__class__.__name__))
return STR_OR_UNICODE(text)
def init_osx_pbcopy_clipboard():
def copy_osx_pbcopy(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_osx_pbcopy():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_osx_pbcopy, paste_osx_pbcopy
def init_osx_pyobjc_clipboard():
def copy_osx_pyobjc(text):
'''Copy string argument to clipboard'''
text = _stringifyText(text) # Converts non-str values to str.
newStr = Foundation.NSString.stringWithString_(text).nsstring()
newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
board = AppKit.NSPasteboard.generalPasteboard()
board.declareTypes_owner_([AppKit.NSStringPboardType], None)
board.setData_forType_(newData, AppKit.NSStringPboardType)
def paste_osx_pyobjc():
"Returns contents of clipboard"
board = AppKit.NSPasteboard.generalPasteboard()
content = board.stringForType_(AppKit.NSStringPboardType)
return content
return copy_osx_pyobjc, paste_osx_pyobjc
def init_gtk_clipboard():
global gtk
import gtk
def copy_gtk(text):
global cb
text = _stringifyText(text) # Converts non-str values to str.
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
global QApplication
# $DISPLAY should exist
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
try:
from qtpy.QtWidgets import QApplication
except:
try:
from PyQt5.QtWidgets import QApplication
except:
from PyQt4.QtGui import QApplication
app = QApplication.instance()
if app is None:
app = QApplication([])
def copy_qt(text):
text = _stringifyText(text) # Converts non-str values to str.
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return STR_OR_UNICODE(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
DEFAULT_SELECTION='c'
PRIMARY_SELECTION='p'
def copy_xclip(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection=DEFAULT_SELECTION
if primary:
selection=PRIMARY_SELECTION
p = subprocess.Popen(['xclip', '-selection', selection],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_xclip(primary=False):
selection=DEFAULT_SELECTION
if primary:
selection=PRIMARY_SELECTION
p = subprocess.Popen(['xclip', '-selection', selection, '-o'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
stdout, stderr = p.communicate()
# Intentionally ignore extraneous output on stderr when clipboard is empty
return stdout.decode(ENCODING)
return copy_xclip, paste_xclip
def init_xsel_clipboard():
DEFAULT_SELECTION='-b'
PRIMARY_SELECTION='-p'
def copy_xsel(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(['xsel', selection_flag, '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_xsel(primary=False):
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(['xsel', selection_flag, '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text.encode(ENCODING)],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode(ENCODING)
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_dev_clipboard_clipboard():
def copy_dev_clipboard(text):
text = _stringifyText(text) # Converts non-str values to str.
if text == '':
warnings.warn('Pyperclip cannot copy a blank string to the clipboard on Cygwin. This is effectively a no-op.')
if '\r' in text:
warnings.warn('Pyperclip cannot handle \\r characters on Cygwin.')
fo = open('/dev/clipboard', 'wt')
fo.write(text)
fo.close()
def paste_dev_clipboard():
fo = open('/dev/clipboard', 'rt')
content = fo.read()
fo.close()
return content
return copy_dev_clipboard, paste_dev_clipboard
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
# Windows-related clipboard functions:
class CheckedCall(object):
def __init__(self, f):
super(CheckedCall, self).__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
from ctypes.wintypes import (HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND,
HINSTANCE, HMENU, BOOL, UINT, HANDLE)
windll = ctypes.windll
msvcrt = ctypes.CDLL('msvcrt')
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
INT, INT, HWND, HMENU, HINSTANCE, LPVOID]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
wcslen = CheckedCall(msvcrt.wcslen)
wcslen.argtypes = [c_wchar_p]
wcslen.restype = UINT
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(0, b"STATIC", None, 0, 0, 0, 0, 0,
None, None, None, None)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
text = _stringifyText(text) # Converts non-str values to str.
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = wcslen(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE,
count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar))
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
def init_wsl_clipboard():
def copy_wsl(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(['clip.exe'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_wsl():
p = subprocess.Popen(['powershell.exe', '-command', 'Get-Clipboard'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
stdout, stderr = p.communicate()
# WSL appends "\r\n" to the contents.
return stdout[:-2].decode(ENCODING)
return copy_wsl, paste_wsl
# Automatic detection of clipboard mechanisms and importing is done in deteremine_clipboard():
def determine_clipboard():
'''
Determine the OS/platform and set the copy() and paste() functions
accordingly.
'''
global Foundation, AppKit, gtk, qtpy, PyQt4, PyQt5
# Setup for the CYGWIN platform:
if 'cygwin' in platform.system().lower(): # Cygwin has a variety of values returned by platform.system(), such as 'CYGWIN_NT-6.1'
# FIXME: pyperclip currently does not support Cygwin,
# see https://github.com/asweigart/pyperclip/issues/55
if os.path.exists('/dev/clipboard'):
warnings.warn('Pyperclip\'s support for Cygwin is not perfect, see https://github.com/asweigart/pyperclip/issues/55')
return init_dev_clipboard_clipboard()
# Setup for the WINDOWS platform:
elif os.name == 'nt' or platform.system() == 'Windows':
return init_windows_clipboard()
if platform.system() == 'Linux':
with open('/proc/version', 'r') as f:
if "Microsoft" in f.read():
return init_wsl_clipboard()
# Setup for the MAC OS X platform:
if os.name == 'mac' or platform.system() == 'Darwin':
try:
import Foundation # check if pyobjc is installed
import AppKit
except ImportError:
return init_osx_pbcopy_clipboard()
else:
return init_osx_pyobjc_clipboard()
# Setup for the LINUX platform:
if HAS_DISPLAY:
try:
import gtk # check if gtk is installed
except ImportError:
pass # We want to fail fast for all non-ImportError exceptions.
else:
return init_gtk_clipboard()
if _executable_exists("xsel"):
return init_xsel_clipboard()
if _executable_exists("xclip"):
return init_xclip_clipboard()
if _executable_exists("klipper") and _executable_exists("qdbus"):
return init_klipper_clipboard()
try:
# qtpy is a small abstraction layer that lets you write applications using a single api call to either PyQt or PySide.
# https://pypi.python.org/pypi/QtPy
import qtpy # check if qtpy is installed
except ImportError:
# If qtpy isn't installed, fall back on importing PyQt4.
try:
import PyQt5 # check if PyQt5 is installed
except ImportError:
try:
import PyQt4 # check if PyQt4 is installed
except ImportError:
pass # We want to fail fast for all non-ImportError exceptions.
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
return init_no_clipboard()
def set_clipboard(clipboard):
'''
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
the copy() and paste() functions interact with the operating system to
implement the copy/paste feature. The clipboard parameter must be one of:
- pbcopy
- pbobjc (default on Mac OS X)
- gtk
- qt
- xclip
- xsel
- klipper
- windows (default on Windows)
- no (this is what is set when no clipboard mechanism can be found)
'''
global copy, paste
clipboard_types = {'pbcopy': init_osx_pbcopy_clipboard,
'pyobjc': init_osx_pyobjc_clipboard,
'gtk': init_gtk_clipboard,
'qt': init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
'xclip': init_xclip_clipboard,
'xsel': init_xsel_clipboard,
'klipper': init_klipper_clipboard,
'windows': init_windows_clipboard,
'no': init_no_clipboard}
if clipboard not in clipboard_types:
raise ValueError('Argument must be one of %s' % (', '.join([repr(_) for _ in clipboard_types.keys()])))
# Sets pyperclip's copy() and paste() functions:
copy, paste = clipboard_types[clipboard]()
def lazy_load_stub_copy(text):
'''
A stub function for copy(), which will load the real copy() function when
called so that the real copy() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
'''
global copy, paste
copy, paste = determine_clipboard()
return copy(text)
def lazy_load_stub_paste():
'''
A stub function for paste(), which will load the real paste() function when
called so that the real paste() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
'''
global copy, paste
copy, paste = determine_clipboard()
return paste()
def is_available():
return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
# Initially, copy() and paste() are set to lazy loading wrappers which will
# set `copy` and `paste` to real functions the first time they're used, unless
# set_clipboard() or determine_clipboard() is called first.
copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
def waitForPaste(timeout=None):
"""This function call blocks until a non-empty text string exists on the
clipboard. It returns this text.
This function raises PyperclipTimeoutException if timeout was set to
a number of seconds that has elapsed without non-empty text being put on
the clipboard."""
startTime = time.time()
while True:
clipboardText = paste()
if clipboardText != '':
return clipboardText
time.sleep(0.01)
if timeout is not None and time.time() > startTime + timeout:
raise PyperclipTimeoutException('waitForPaste() timed out after ' + str(timeout) + ' seconds.')
def waitForNewPaste(timeout=None):
"""This function call blocks until a new text string exists on the
clipboard that is different from the text that was there when the function
was first called. It returns this text.
This function raises PyperclipTimeoutException if timeout was set to
a number of seconds that has elapsed without non-empty text being put on
the clipboard."""
startTime = time.time()
originalText = paste()
while True:
currentText = paste()
if currentText != originalText:
return currentText
time.sleep(0.01)
if timeout is not None and time.time() > startTime + timeout:
raise PyperclipTimeoutException('waitForNewPaste() timed out after ' + str(timeout) + ' seconds.')
__all__ = ['copy', 'paste', 'waitForPaste', 'waitForNewPaste' 'set_clipboard', 'determine_clipboard']
| 24,106 | Python | 33.686331 | 140 | 0.63424 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyperclip/__main__.py | import pyperclip
import sys
if len(sys.argv) > 1 and sys.argv[1] in ('-c', '--copy'):
pyperclip.copy(sys.stdin.read())
elif len(sys.argv) > 1 and sys.argv[1] in ('-p', '--paste'):
sys.stdout.write(pyperclip.paste())
else:
print('Usage: python -m pyperclip [-c | --copy] | [-p | --paste]')
print()
print('When copying, stdin will be placed on the clipboard.')
print('When pasting, the clipboard will be written to stdout.') | 447 | Python | 36.33333 | 70 | 0.635347 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/python.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Python source expertise for coverage.py"""
import os.path
import types
import zipimport
from coverage import env
from coverage.exceptions import CoverageException, NoSource
from coverage.files import canonical_filename, relative_filename
from coverage.misc import contract, expensive, isolate_module, join_regex
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
os = isolate_module(os)
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
source = f.read()
if env.IRONPYTHON:
# IronPython reads Unicode strings even for "rb" files.
source = bytes(source)
return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
raise NoSource(f"No source for code: '{filename}'.")
# Replace \f because of http://bugs.python.org/issue19035
source = source.replace(b'\f', b' ')
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except OSError:
continue
return data
return None
def source_for_file(filename):
"""Return the source filename for `filename`.
Given a file name being traced, return the best guess as to the source
file to attribute it to.
"""
if filename.endswith(".py"):
# .py files are themselves source files.
return filename
elif filename.endswith((".pyc", ".pyo")):
# Bytecode files probably have source files near them.
py_filename = filename[:-1]
if os.path.exists(py_filename):
# Found a .py file, use that.
return py_filename
if env.WINDOWS:
# On Windows, it could be a .pyw file.
pyw_filename = py_filename + "w"
if os.path.exists(pyw_filename):
return pyw_filename
# Didn't find source, but it's probably the .py file we want.
return py_filename
elif filename.endswith("$py.class"):
# Jython is easy to guess.
return filename[:-9] + ".py"
# No idea, just use the file name as-is.
return filename
def source_for_morf(morf):
"""Get the source filename for the module-or-file `morf`."""
if hasattr(morf, '__file__') and morf.__file__:
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
# This could be a PEP-420 namespace package.
raise CoverageException(f"Module {morf} has no file")
else:
filename = morf
filename = source_for_file(filename)
return filename
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
filename = source_for_morf(morf)
super().__init__(canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__.replace(".", os.sep)
if os.path.basename(filename).startswith('__init__.'):
name += os.sep + "__init__"
name += ".py"
else:
name = relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._excluded = None
def __repr__(self):
return f"<PythonFileReporter {self.filename!r}>"
@contract(returns='unicode')
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
self._parser.parse_source()
return self._parser
def lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.statements
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
def missing_arc_description(self, start, end, executed_arcs=None):
return self.parser.missing_arc_description(start, end, executed_arcs)
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
| 7,476 | Python | 29.149193 | 79 | 0.609952 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/templite.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""A simple Python template renderer, for a nano-subset of Django syntax.
For a detailed discussion of this code, see this chapter from 500 Lines:
http://aosabook.org/en/500L/a-template-engine.html
"""
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
import re
class TempliteSyntaxError(ValueError):
"""Raised when a template has a syntax error."""
pass
class TempliteValueError(ValueError):
"""Raised when an expression won't evaluate in a template."""
pass
class CodeBuilder:
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
self.indent_level = indent
def __str__(self):
return "".join(str(c) for c in self.code)
def add_line(self, line):
"""Add a line of source to the code.
Indentation and newline will be added for you, don't provide them.
"""
self.code.extend([" " * self.indent_level, line, "\n"])
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
section = CodeBuilder(self.indent_level)
self.code.append(section)
return section
INDENT_STEP = 4 # PEP8 says so!
def indent(self):
"""Increase the current indent for following lines."""
self.indent_level += self.INDENT_STEP
def dedent(self):
"""Decrease the current indent for following lines."""
self.indent_level -= self.INDENT_STEP
def get_globals(self):
"""Execute the code, and return a dict of globals it defines."""
# A check that the caller really finished all the blocks they started.
assert self.indent_level == 0
# Get the Python source as a single string.
python_source = str(self)
# Execute the source, defining globals, and return them.
global_namespace = {}
exec(python_source, global_namespace)
return global_namespace
class Templite:
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifier.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
and joined. Be careful, this could join words together!
Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
which will collapse the whitespace following the tag.
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string::
templite = Templite('''
<h1>Hello {{name|upper}}!</h1>
{% for topic in topics %}
<p>You are interested in {{topic}}.</p>
{% endif %}
''',
{'upper': str.upper},
)
text = templite.render({
'name': "Ned",
'topics': ['Python', 'Geometry', 'Juggling'],
})
"""
def __init__(self, text, *contexts):
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.context = {}
for context in contexts:
self.context.update(context)
self.all_vars = set()
self.loop_vars = set()
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
code.add_line("result = []")
code.add_line("append_result = result.append")
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("append_result(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("extend_result([%s])" % ", ".join(buffered))
del buffered[:]
ops_stack = []
# Split the text to form a list of tokens.
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
squash = in_joined = False
for token in tokens:
if token.startswith('{'):
start, end = 2, -2
squash = (token[-3] == '-')
if squash:
end = -3
if token.startswith('{#'):
# Comment: ignore it and move on.
continue
elif token.startswith('{{'):
# An expression to evaluate.
expr = self._expr_code(token[start:end].strip())
buffered.append("to_str(%s)" % expr)
else:
# token.startswith('{%')
# Action tag: split into words and parse further.
flush_output()
words = token[start:end].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
if len(words) != 2:
self._syntax_error("Don't understand if", token)
ops_stack.append('if')
code.add_line("if %s:" % self._expr_code(words[1]))
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
if len(words) != 4 or words[2] != 'in':
self._syntax_error("Don't understand for", token)
ops_stack.append('for')
self._variable(words[1], self.loop_vars)
code.add_line(
"for c_{} in {}:".format(
words[1],
self._expr_code(words[3])
)
)
code.indent()
elif words[0] == 'joined':
ops_stack.append('joined')
in_joined = True
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack.
if len(words) != 1:
self._syntax_error("Don't understand end", token)
end_what = words[0][3:]
if not ops_stack:
self._syntax_error("Too many ends", token)
start_what = ops_stack.pop()
if start_what != end_what:
self._syntax_error("Mismatched end tag", end_what)
if end_what == 'joined':
in_joined = False
else:
code.dedent()
else:
self._syntax_error("Don't understand tag", words[0])
else:
# Literal content. If it isn't empty, output it.
if in_joined:
token = re.sub(r"\s*\n\s*", "", token.strip())
elif squash:
token = token.lstrip()
if token:
buffered.append(repr(token))
if ops_stack:
self._syntax_error("Unmatched action tag", ops_stack[-1])
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
code.add_line('return "".join(result)')
code.dedent()
self._render_function = code.get_globals()['render_function']
def _expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self._expr_code(pipes[0])
for func in pipes[1:]:
self._variable(func, self.all_vars)
code = f"c_{func}({code})"
elif "." in expr:
dots = expr.split(".")
code = self._expr_code(dots[0])
args = ", ".join(repr(d) for d in dots[1:])
code = f"do_dots({code}, {args})"
else:
self._variable(expr, self.all_vars)
code = "c_%s" % expr
return code
def _syntax_error(self, msg, thing):
"""Raise a syntax error using `msg`, and showing `thing`."""
raise TempliteSyntaxError(f"{msg}: {thing!r}")
def _variable(self, name, vars_set):
"""Track that `name` is used as a variable.
Adds the name to `vars_set`, a set of variable names.
Raises an syntax error if `name` is not a valid name.
"""
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
self._syntax_error("Not a valid name", name)
vars_set.add(name)
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
render_context = dict(self.context)
if context:
render_context.update(context)
return self._render_function(render_context, self._do_dots)
def _do_dots(self, value, *dots):
"""Evaluate dotted expressions at run-time."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
try:
value = value[dot]
except (TypeError, KeyError) as exc:
raise TempliteValueError(
f"Couldn't evaluate {value!r}.{dot}"
) from exc
if callable(value):
value = value()
return value
| 10,374 | Python | 33.815436 | 83 | 0.506362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/misc.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Miscellaneous stuff for coverage.py."""
import contextlib
import errno
import hashlib
import importlib
import importlib.util
import inspect
import locale
import os
import os.path
import random
import re
import socket
import sys
import types
from coverage import env
from coverage.exceptions import CoverageException
# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of
# other packages were importing the exceptions from misc, so import them here.
# pylint: disable=unused-wildcard-import
from coverage.exceptions import * # pylint: disable=wildcard-import
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
class SysModuleSaver:
"""Saves the contents of sys.modules, and removes new modules later."""
def __init__(self):
self.old_modules = set(sys.modules)
def restore(self):
"""Remove any modules imported since this object started."""
new_modules = set(sys.modules) - self.old_modules
for m in new_modules:
del sys.modules[m]
@contextlib.contextmanager
def sys_modules_saved():
"""A context manager to remove any modules imported during a block."""
saver = SysModuleSaver()
try:
yield
finally:
saver.restore()
def import_third_party(modname):
"""Import a third-party module we need, but might not be installed.
This also cleans out the module after the import, so that coverage won't
appear to have imported it. This lets the third party use coverage for
their own tests.
Arguments:
modname (str): the name of the module to import.
Returns:
The imported module, or None if the module couldn't be imported.
"""
with sys_modules_saved():
try:
return importlib.import_module(modname)
except ImportError:
return None
def dummy_decorator_with_args(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of a decorator with arguments."""
def _decorator(func):
return func
return _decorator
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if env.USE_CONTRACTS:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
try:
raw_new_contract(*args, **kwargs)
except ValueError:
# During meta-coverage, this module is imported twice, and
# PyContracts doesn't like redefining contracts. It's OK.
pass
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
new_contract('unicode', lambda v: isinstance(v, str))
def one_of(argnames):
"""Ensure that only one of the argnames is non-None."""
def _decorator(func):
argnameset = {name.strip() for name in argnames.split(",")}
def _wrapper(*args, **kwargs):
vals = [kwargs.get(name) for name in argnameset]
assert sum(val is not None for val in vals) == 1
return func(*args, **kwargs)
return _wrapper
return _decorator
else: # pragma: not testing
# We aren't using real PyContracts, so just define our decorators as
# stunt-double no-ops.
contract = dummy_decorator_with_args
one_of = dummy_decorator_with_args
def new_contract(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of `new_contract`."""
pass
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapper(self):
if hasattr(self, attr):
raise AssertionError(f"Shouldn't have called {fn.__name__} more than once")
setattr(self, attr, True)
return fn(self)
return _wrapper
else:
return fn # pragma: not testing
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def ensure_dir(directory):
"""Make sure the directory exists.
If `directory` is None or empty, do nothing.
"""
if directory:
os.makedirs(directory, exist_ok=True)
def ensure_dir_for_file(path):
"""Make sure the directory for the path exists."""
ensure_dir(os.path.dirname(path))
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
def filename_suffix(suffix):
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
then build a suffix incorporating the hostname, process id, and a random
number.
Returns a string or None.
"""
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
dice = random.Random(os.urandom(8)).randint(0, 999999)
suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
return suffix
class Hasher:
"""Hashes Python data for fingerprinting."""
def __init__(self):
self.hash = hashlib.new("sha3_256")
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.hash.update(str(type(v)).encode("utf-8"))
if isinstance(v, str):
self.hash.update(v.encode("utf-8"))
elif isinstance(v, bytes):
self.hash.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.hash.update(str(v).encode("utf-8"))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
self.hash.update(b'.')
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.hash.hexdigest()[:32]
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = f"{klass.__module__}.{klass.__name__}"
raise NotImplementedError(
f"{thing} {name!r} needs to implement {func_name}()"
)
class DefaultValue:
"""A sentinel object to use for unusual default-value needs.
Construct with a string that will be used as the repr, for display in help
and Sphinx output.
"""
def __init__(self, display_as):
self.display_as = display_as
def __repr__(self):
return self.display_as
def substitute_variables(text, variables):
"""Substitute ``${VAR}`` variables in `text` with their values.
Variables in the text can take a number of shell-inspired forms::
$VAR
${VAR}
${VAR?} strict: an error if VAR isn't defined.
${VAR-missing} defaulted: "missing" if VAR isn't defined.
$$ just a dollar sign.
`variables` is a dictionary of variable values.
Returns the resulting text with values substituted.
"""
dollar_pattern = r"""(?x) # Use extended regex syntax
\$ # A dollar sign,
(?: # then
(?P<dollar>\$) | # a dollar sign, or
(?P<word1>\w+) | # a plain word, or
{ # a {-wrapped
(?P<word2>\w+) # word,
(?:
(?P<strict>\?) | # with a strict marker
-(?P<defval>[^}]*) # or a default value
)? # maybe.
}
)
"""
dollar_groups = ('dollar', 'word1', 'word2')
def dollar_replace(match):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks
if word == "$":
return "$"
elif word in variables:
return variables[word]
elif match.group('strict'):
msg = f"Variable {word} is undefined: {text!r}"
raise CoverageException(msg)
else:
return match.group('defval')
text = re.sub(dollar_pattern, dollar_replace, text)
return text
def format_local_datetime(dt):
"""Return a string with local timezone representing the date.
"""
return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
def import_local_file(modname, modfile=None):
"""Import a local file as a module.
Opens a file in the current directory named `modname`.py, imports it
as `modname`, and returns the module object. `modfile` is the file to
import if it isn't in the current directory.
"""
if modfile is None:
modfile = modname + '.py'
spec = importlib.util.spec_from_file_location(modname, modfile)
mod = importlib.util.module_from_spec(spec)
sys.modules[modname] = mod
spec.loader.exec_module(mod)
return mod
def human_key(s):
"""Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
def tryint(s):
"""If `s` is a number, return an int, else `s` unchanged."""
try:
return int(s)
except ValueError:
return s
return [tryint(c) for c in re.split(r"(\d+)", s)]
def human_sorted(strings):
"""Sort the given iterable of strings the way that humans expect.
Numeric components in the strings are sorted as numbers.
Returns the sorted list.
"""
return sorted(strings, key=human_key)
def human_sorted_items(items, reverse=False):
"""Sort the (string, value) items the way humans expect.
Returns the sorted list of items.
"""
return sorted(items, key=lambda pair: (human_key(pair[0]), pair[1]), reverse=reverse)
| 12,751 | Python | 29.507177 | 91 | 0.599012 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/control.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Core control stuff for coverage.py."""
import atexit
import collections
import contextlib
import os
import os.path
import platform
import sys
import time
import warnings
from coverage import env
from coverage.annotate import AnnotateReporter
from coverage.collector import Collector, CTracer
from coverage.config import read_coverage_config
from coverage.context import should_start_context_test_function, combine_context_switchers
from coverage.data import CoverageData, combine_parallel_data
from coverage.debug import DebugControl, short_stack, write_formatted_info
from coverage.disposition import disposition_debug_msg
from coverage.exceptions import CoverageException, CoverageWarning
from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory
from coverage.html import HtmlReporter
from coverage.inorout import InOrOut
from coverage.jsonreport import JsonReporter
from coverage.misc import bool_or_none, join_regex, human_sorted, human_sorted_items
from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module
from coverage.plugin import FileReporter
from coverage.plugin_support import Plugins
from coverage.python import PythonFileReporter
from coverage.report import render_report
from coverage.results import Analysis
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
try:
from coverage.multiproc import patch_multiprocessing
except ImportError: # pragma: only jython
# Jython has no multiprocessing module.
patch_multiprocessing = None
os = isolate_module(os)
@contextlib.contextmanager
def override_config(cov, **kwargs):
"""Temporarily tweak the configuration of `cov`.
The arguments are applied to `cov.config` with the `from_args` method.
At the end of the with-statement, the old configuration is restored.
"""
original_config = cov.config
cov.config = cov.config.copy()
try:
cov.config.from_args(**kwargs)
yield
finally:
cov.config = original_config
_DEFAULT_DATAFILE = DefaultValue("MISSING")
class Coverage:
"""Programmatic access to coverage.py.
To use::
from coverage import Coverage
cov = Coverage()
cov.start()
#.. call your code ..
cov.stop()
cov.html_report(directory='covhtml')
Note: in keeping with Python custom, names starting with underscore are
not part of the public API. They might stop working at any point. Please
limit yourself to documented methods to avoid problems.
"""
# The stack of started Coverage instances.
_instances = []
@classmethod
def current(cls):
"""Get the latest started `Coverage` instance, if any.
Returns: a `Coverage` instance, or None.
.. versionadded:: 5.0
"""
if cls._instances:
return cls._instances[-1]
else:
return None
def __init__(
self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, source_pkgs=None, omit=None, include=None, debug=None,
concurrency=None, check_preimported=False, context=None,
messages=False,
): # pylint: disable=too-many-arguments
"""
Many of these arguments duplicate and override values that can be
provided in a configuration file. Parameters that are missing here
will use values from the config file.
`data_file` is the base name of the data file to use. The config value
defaults to ".coverage". None can be provided to prevent writing a data
file. `data_suffix` is appended (with a dot) to `data_file` to create
the final file name. If `data_suffix` is simply True, then a suffix is
created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what configuration file to read:
* If it is ".coveragerc", it is interpreted as if it were True,
for backward compatibility.
* If it is a string, it is the name of the file to read. If the
file can't be read, it is an error.
* If it is True, then a few standard files names are tried
(".coveragerc", "setup.cfg", "tox.ini"). It is not an error for
these files to not be found.
* If it is False, then no configuration file is read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`source_pkgs` is a list of package names. It works the same as
`source`, but can be used to name packages where the name can also be
interpreted as a file path.
`include` and `omit` are lists of file name patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
`debug` is a list of strings indicating what debugging information is
desired.
`concurrency` is a string indicating the concurrency library being used
in the measured code. Without this, coverage.py will get incorrect
results if these libraries are in use. Valid strings are "greenlet",
"eventlet", "gevent", "multiprocessing", or "thread" (the default).
This can also be a list of these strings.
If `check_preimported` is true, then when coverage is started, the
already-imported files will be checked to see if they should be
measured by coverage. Importing measured files before coverage is
started can mean that code is missed.
`context` is a string to use as the :ref:`static context
<static_contexts>` label for collected data.
If `messages` is true, some messages will be printed to stdout
indicating what is happening.
.. versionadded:: 4.0
The `concurrency` parameter.
.. versionadded:: 4.2
The `concurrency` parameter can now be a list of strings.
.. versionadded:: 5.0
The `check_preimported` and `context` parameters.
.. versionadded:: 5.3
The `source_pkgs` parameter.
.. versionadded:: 6.0
The `messages` parameter.
"""
# data_file=None means no disk file at all. data_file missing means
# use the value from the config file.
self._no_disk = data_file is None
if data_file is _DEFAULT_DATAFILE:
data_file = None
self.config = None
# This is injectable by tests.
self._debug_file = None
self._auto_load = self._auto_save = auto_data
self._data_suffix_specified = data_suffix
# Is it ok for no data to be collected?
self._warn_no_data = True
self._warn_unimported_source = True
self._warn_preimported_source = check_preimported
self._no_warn_slugs = None
self._messages = messages
# A record of all the warnings that have been issued.
self._warnings = []
# Other instance attributes, set later.
self._data = self._collector = None
self._plugins = None
self._inorout = None
self._data_suffix = self._run_suffix = None
self._exclude_re = None
self._debug = None
self._file_mapper = None
# State machine variables:
# Have we initialized everything?
self._inited = False
self._inited_for_start = False
# Have we started collecting and not stopped it?
self._started = False
# Should we write the debug output?
self._should_write_debug = True
# Build our configuration from a number of sources.
self.config = read_coverage_config(
config_file=config_file, warn=self._warn,
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug,
report_omit=omit, report_include=include,
concurrency=concurrency, context=context,
)
# If we have sub-process measurement happening automatically, then we
# want any explicit creation of a Coverage object to mean, this process
# is already coverage-aware, so don't auto-measure it. By now, the
# auto-creation of a Coverage object has already happened. But we can
# find it and tell it not to save its data.
if not env.METACOV:
_prevent_sub_process_measurement()
def _init(self):
"""Set all the initial state.
This is called by the public methods to initialize state. This lets us
construct a :class:`Coverage` object, then tweak its state before this
function is called.
"""
if self._inited:
return
self._inited = True
# Create and configure the debugging controller. COVERAGE_DEBUG_FILE
# is an environment variable, the name of a file to append debug logs
# to.
self._debug = DebugControl(self.config.debug, self._debug_file)
if "multiprocessing" in (self.config.concurrency or ()):
# Multi-processing uses parallel for the subprocesses, so also use
# it for the main process.
self.config.parallel = True
# _exclude_re is a dict that maps exclusion list names to compiled regexes.
self._exclude_re = {}
set_relative_directory()
self._file_mapper = relative_filename if self.config.relative_files else abs_file
# Load plugins
self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug)
# Run configuring plugins.
for plugin in self._plugins.configurers:
# We need an object with set_option and get_option. Either self or
# self.config will do. Choosing randomly stops people from doing
# other things with those objects, against the public API. Yes,
# this is a bit childish. :)
plugin.configure([self, self.config][int(time.time()) % 2])
def _post_init(self):
"""Stuff to do after everything is initialized."""
if self._should_write_debug:
self._should_write_debug = False
self._write_startup_debug()
# '[run] _crash' will raise an exception if the value is close by in
# the call stack, for testing error handling.
if self.config._crash and self.config._crash in short_stack(limit=4):
raise Exception(f"Crashing because called by {self.config._crash}")
def _write_startup_debug(self):
"""Write out debug info at startup if needed."""
wrote_any = False
with self._debug.without_callers():
if self._debug.should('config'):
config_info = human_sorted_items(self.config.__dict__.items())
config_info = [(k, v) for k, v in config_info if not k.startswith('_')]
write_formatted_info(self._debug, "config", config_info)
wrote_any = True
if self._debug.should('sys'):
write_formatted_info(self._debug, "sys", self.sys_info())
for plugin in self._plugins:
header = "sys: " + plugin._coverage_plugin_name
info = plugin.sys_info()
write_formatted_info(self._debug, header, info)
wrote_any = True
if wrote_any:
write_formatted_info(self._debug, "end", ())
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`.
Calls `_should_trace_internal`, and returns the FileDisposition.
"""
disp = self._inorout.should_trace(filename, frame)
if self._debug.should('trace'):
self._debug.write(disposition_debug_msg(disp))
return disp
def _check_include_omit_etc(self, filename, frame):
"""Check a file name against the include/omit/etc, rules, verbosely.
Returns a boolean: True if the file should be traced, False if not.
"""
reason = self._inorout.check_include_omit_etc(filename, frame)
if self._debug.should('trace'):
if not reason:
msg = f"Including {filename!r}"
else:
msg = f"Not including {filename!r}: {reason}"
self._debug.write(msg)
return not reason
def _warn(self, msg, slug=None, once=False):
"""Use `msg` as a warning.
For warning suppression, use `slug` as the shorthand.
If `once` is true, only show this warning once (determined by the
slug.)
"""
if self._no_warn_slugs is None:
if self.config is not None:
self._no_warn_slugs = list(self.config.disable_warnings)
if self._no_warn_slugs is not None:
if slug in self._no_warn_slugs:
# Don't issue the warning
return
self._warnings.append(msg)
if slug:
msg = f"{msg} ({slug})"
if self._debug is not None and self._debug.should('pid'):
msg = f"[{os.getpid()}] {msg}"
warnings.warn(msg, category=CoverageWarning, stacklevel=2)
if once:
self._no_warn_slugs.append(slug)
def _message(self, msg):
"""Write a message to the user, if configured to do so."""
if self._messages:
print(msg)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option. The type depends on the option
selected.
As a special case, an `option_name` of ``"paths"`` will return an
OrderedDict with the entire ``[paths]`` section value.
.. versionadded:: 4.0
"""
return self.config.get_option(option_name)
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with ``"run:branch"``.
`value` is the new value for the option. This should be an
appropriate Python value. For example, use True for booleans, not the
string ``"True"``.
As an example, calling::
cov.set_option("run:branch", True)
has the same effect as this configuration file::
[run]
branch = True
As a special case, an `option_name` of ``"paths"`` will replace the
entire ``[paths]`` section. The value should be an OrderedDict.
.. versionadded:: 4.0
"""
self.config.set_option(option_name, value)
def load(self):
"""Load previously-collected coverage data from the data file."""
self._init()
if self._collector:
self._collector.reset()
should_skip = self.config.parallel and not os.path.exists(self.config.data_file)
if not should_skip:
self._init_data(suffix=None)
self._post_init()
if not should_skip:
self._data.read()
def _init_for_start(self):
"""Initialization for start()"""
# Construct the collector.
concurrency = self.config.concurrency or ()
if "multiprocessing" in concurrency:
if not patch_multiprocessing:
raise CoverageException( # pragma: only jython
"multiprocessing is not supported on this Python"
)
patch_multiprocessing(rcfile=self.config.config_file)
dycon = self.config.dynamic_context
if not dycon or dycon == "none":
context_switchers = []
elif dycon == "test_function":
context_switchers = [should_start_context_test_function]
else:
raise CoverageException(f"Don't understand dynamic_context setting: {dycon!r}")
context_switchers.extend(
plugin.dynamic_context for plugin in self._plugins.context_switchers
)
should_start_context = combine_context_switchers(context_switchers)
self._collector = Collector(
should_trace=self._should_trace,
check_include=self._check_include_omit_etc,
should_start_context=should_start_context,
file_mapper=self._file_mapper,
timid=self.config.timid,
branch=self.config.branch,
warn=self._warn,
concurrency=concurrency,
)
suffix = self._data_suffix_specified
if suffix or self.config.parallel:
if not isinstance(suffix, str):
# if data_suffix=True, use .machinename.pid.random
suffix = True
else:
suffix = None
self._init_data(suffix)
self._collector.use_data(self._data, self.config.context)
# Early warning if we aren't going to be able to support plugins.
if self._plugins.file_tracers and not self._collector.supports_plugins:
self._warn(
"Plugin file tracers ({}) aren't supported with {}".format(
", ".join(
plugin._coverage_plugin_name
for plugin in self._plugins.file_tracers
),
self._collector.tracer_name(),
)
)
for plugin in self._plugins.file_tracers:
plugin._coverage_enabled = False
# Create the file classifying substructure.
self._inorout = InOrOut(
warn=self._warn,
debug=(self._debug if self._debug.should('trace') else None),
)
self._inorout.configure(self.config)
self._inorout.plugins = self._plugins
self._inorout.disp_class = self._collector.file_disposition_class
# It's useful to write debug info after initing for start.
self._should_write_debug = True
atexit.register(self._atexit)
def _init_data(self, suffix):
"""Create a data file if we don't have one yet."""
if self._data is None:
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
ensure_dir_for_file(self.config.data_file)
self._data = CoverageData(
basename=self.config.data_file,
suffix=suffix,
warn=self._warn,
debug=self._debug,
no_disk=self._no_disk,
)
def start(self):
"""Start measuring code coverage.
Coverage measurement only occurs in functions called after
:meth:`start` is invoked. Statements in the same scope as
:meth:`start` won't be measured.
Once you invoke :meth:`start`, you must also call :meth:`stop`
eventually, or your process might not shut down cleanly.
"""
self._init()
if not self._inited_for_start:
self._inited_for_start = True
self._init_for_start()
self._post_init()
# Issue warnings for possible problems.
self._inorout.warn_conflicting_settings()
# See if we think some code that would eventually be measured has
# already been imported.
if self._warn_preimported_source:
self._inorout.warn_already_imported_files()
if self._auto_load:
self.load()
self._collector.start()
self._started = True
self._instances.append(self)
def stop(self):
"""Stop measuring code coverage."""
if self._instances:
if self._instances[-1] is self:
self._instances.pop()
if self._started:
self._collector.stop()
self._started = False
def _atexit(self):
"""Clean up on process shutdown."""
if self._debug.should("process"):
self._debug.write(f"atexit: pid: {os.getpid()}, instance: {self!r}")
if self._started:
self.stop()
if self._auto_save:
self.save()
def erase(self):
"""Erase previously collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self._init()
self._post_init()
if self._collector:
self._collector.reset()
self._init_data(suffix=None)
self._data.erase(parallel=self.config.parallel)
self._data = None
self._inited_for_start = False
def switch_context(self, new_context):
"""Switch to a new dynamic context.
`new_context` is a string to use as the :ref:`dynamic context
<dynamic_contexts>` label for collected data. If a :ref:`static
context <static_contexts>` is in use, the static and dynamic context
labels will be joined together with a pipe character.
Coverage collection must be started already.
.. versionadded:: 5.0
"""
if not self._started: # pragma: part started
raise CoverageException("Cannot switch context, coverage is not started")
if self._collector.should_start_context:
self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True)
self._collector.switch_context(new_context)
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
self._init()
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
self._init()
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See :meth:`exclude` for the
lists that are available, and their meaning.
"""
self._init()
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
data = self.get_data()
data.write()
def combine(self, data_paths=None, strict=False, keep=False):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
`data_paths` is a list of files or directories from which data should
be combined. If no list is passed, then the data files from the
directory indicated by the current data file (probably the current
directory) will be combined.
If `strict` is true, then it is an error to attempt to combine when
there are no data files to combine.
If `keep` is true, then original input data files won't be deleted.
.. versionadded:: 4.0
The `data_paths` parameter.
.. versionadded:: 4.3
The `strict` parameter.
.. versionadded: 5.5
The `keep` parameter.
"""
self._init()
self._init_data(suffix=None)
self._post_init()
self.get_data()
aliases = None
if self.config.paths:
aliases = PathAliases(relative=self.config.relative_files)
for paths in self.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
combine_parallel_data(
self._data,
aliases=aliases,
data_paths=data_paths,
strict=strict,
keep=keep,
message=self._message,
)
def get_data(self):
"""Get the collected data.
Also warn about various problems collecting data.
Returns a :class:`coverage.CoverageData`, the collected coverage data.
.. versionadded:: 4.0
"""
self._init()
self._init_data(suffix=None)
self._post_init()
for plugin in self._plugins:
if not plugin._coverage_enabled:
self._collector.plugin_was_disabled(plugin)
if self._collector and self._collector.flush_data():
self._post_save_work()
return self._data
def _post_save_work(self):
"""After saving data, look for warnings, post-work, etc.
Warn about things that should have happened but didn't.
Look for unexecuted files.
"""
# If there are still entries in the source_pkgs_unmatched list,
# then we never encountered those packages.
if self._warn_unimported_source:
self._inorout.warn_unimported_source()
# Find out if we got any data.
if not self._data and self._warn_no_data:
self._warn("No data was collected.", slug="no-data-collected")
# Touch all the files that could have executed, so that we can
# mark completely unexecuted files as 0% covered.
if self._data is not None:
file_paths = collections.defaultdict(list)
for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files():
file_path = self._file_mapper(file_path)
file_paths[plugin_name].append(file_path)
for plugin_name, paths in file_paths.items():
self._data.touch_files(paths, plugin_name)
if self.config.note:
self._warn("The '[run] note' setting is no longer supported.")
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a file name. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The file name for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
analysis = self._analyze(morf)
return (
analysis.filename,
sorted(analysis.statements),
sorted(analysis.excluded),
sorted(analysis.missing),
analysis.missing_formatted(),
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
# All reporting comes through here, so do reporting initialization.
self._init()
self._post_init()
data = self.get_data()
if not isinstance(it, FileReporter):
it = self._get_file_reporter(it)
return Analysis(data, self.config.precision, it, self._file_mapper)
def _get_file_reporter(self, morf):
"""Get a FileReporter for a module or file name."""
plugin = None
file_reporter = "python"
if isinstance(morf, str):
mapped_morf = self._file_mapper(morf)
plugin_name = self._data.file_tracer(mapped_morf)
if plugin_name:
plugin = self._plugins.get(plugin_name)
if plugin:
file_reporter = plugin.file_reporter(mapped_morf)
if file_reporter is None:
raise CoverageException(
"Plugin {!r} did not provide a file reporter for {!r}.".format(
plugin._coverage_plugin_name, morf
)
)
if file_reporter == "python":
file_reporter = PythonFileReporter(morf, self)
return file_reporter
def _get_file_reporters(self, morfs=None):
"""Get a list of FileReporters for a list of modules or file names.
For each module or file name in `morfs`, find a FileReporter. Return
the list of FileReporters.
If `morfs` is a single module or file name, this returns a list of one
FileReporter. If `morfs` is empty or None, then the list of all files
measured is used to find the FileReporters.
"""
if not morfs:
morfs = self._data.measured_files()
# Be sure we have a collection.
if not isinstance(morfs, (list, tuple, set)):
morfs = [morfs]
file_reporters = [self._get_file_reporter(morf) for morf in morfs]
return file_reporters
def report(
self, morfs=None, show_missing=None, ignore_errors=None,
file=None, omit=None, include=None, skip_covered=None,
contexts=None, skip_empty=None, precision=None, sort=None
):
"""Write a textual summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
If `show_missing` is true, then details of which lines or branches are
missing will be included in the report. If `ignore_errors` is true,
then a failure while reporting a single file will not stop the entire
report.
`file` is a file-like object, suitable for writing.
`include` is a list of file name patterns. Files that match will be
included in the report. Files matching `omit` will not be included in
the report.
If `skip_covered` is true, don't report on files with 100% coverage.
If `skip_empty` is true, don't report on empty files (those that have
no statements).
`contexts` is a list of regular expressions. Only data from
:ref:`dynamic contexts <dynamic_contexts>` that match one of those
expressions (using :func:`re.search <python:re.search>`) will be
included in the report.
`precision` is the number of digits to display after the decimal
point for percentages.
All of the arguments default to the settings read from the
:ref:`configuration file <config>`.
Returns a float, the total percentage covered.
.. versionadded:: 4.0
The `skip_covered` parameter.
.. versionadded:: 5.0
The `contexts` and `skip_empty` parameters.
.. versionadded:: 5.2
The `precision` parameter.
"""
with override_config(
self,
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
show_missing=show_missing, skip_covered=skip_covered,
report_contexts=contexts, skip_empty=skip_empty, precision=precision,
sort=sort
):
reporter = SummaryReporter(self)
return reporter.report(morfs, outfile=file)
def annotate(
self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None, contexts=None,
):
"""Annotate a list of modules.
.. note::
This method has been obsoleted by more modern reporting tools,
including the :meth:`html_report` method. It will be removed in a
future version.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See :meth:`report` for other arguments.
"""
print("The annotate command will be removed in a future version.")
print("Get in touch if you still use it: [email protected]")
with override_config(self,
ignore_errors=ignore_errors, report_omit=omit,
report_include=include, report_contexts=contexts,
):
reporter = AnnotateReporter(self)
reporter.report(morfs, directory=directory)
def html_report(
self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None, extra_css=None, title=None,
skip_covered=None, show_contexts=None, contexts=None,
skip_empty=None, precision=None,
):
"""Generate an HTML report.
The HTML is written to `directory`. The file "index.html" is the
overview starting point, with links to more detailed pages for
individual modules.
`extra_css` is a path to a file of other CSS to apply on the page.
It will be copied into the HTML directory.
`title` is a text string (not HTML) to use as the title of the HTML
report.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
.. note::
The HTML report files are generated incrementally based on the
source files and coverage results. If you modify the report files,
the changes will not be considered. You should be careful about
changing the files in the report folder.
"""
with override_config(self,
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
html_dir=directory, extra_css=extra_css, html_title=title,
html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts,
html_skip_empty=skip_empty, precision=precision,
):
reporter = HtmlReporter(self)
ret = reporter.report(morfs)
return ret
def xml_report(
self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None, contexts=None, skip_empty=None,
):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
"""
with override_config(self,
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty,
):
return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message)
def json_report(
self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None, contexts=None, pretty_print=None,
show_contexts=None
):
"""Generate a JSON report of coverage results.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See :meth:`report` for other arguments.
Returns a float, the total percentage covered.
.. versionadded:: 5.0
"""
with override_config(self,
ignore_errors=ignore_errors, report_omit=omit, report_include=include,
json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print,
json_show_contexts=show_contexts
):
return render_report(self.config.json_output, JsonReporter(self), morfs, self._message)
def sys_info(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
self._init()
self._post_init()
def plugin_info(plugins):
"""Make an entry for the sys_info from a list of plug-ins."""
entries = []
for plugin in plugins:
entry = plugin._coverage_plugin_name
if not plugin._coverage_enabled:
entry += " (disabled)"
entries.append(entry)
return entries
info = [
('coverage_version', covmod.__version__),
('coverage_module', covmod.__file__),
('tracer', self._collector.tracer_name() if self._collector else "-none-"),
('CTracer', 'available' if CTracer else "unavailable"),
('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
('plugins.configurers', plugin_info(self._plugins.configurers)),
('plugins.context_switchers', plugin_info(self._plugins.context_switchers)),
('configs_attempted', self.config.attempted_config_files),
('configs_read', self.config.config_files_read),
('config_file', self.config.config_file),
('config_contents',
repr(self.config._config_contents)
if self.config._config_contents
else '-none-'
),
('data_file', self._data.data_filename() if self._data is not None else "-none-"),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', platform.python_implementation()),
('executable', sys.executable),
('def_encoding', sys.getdefaultencoding()),
('fs_encoding', sys.getfilesystemencoding()),
('pid', os.getpid()),
('cwd', os.getcwd()),
('path', sys.path),
('environment', human_sorted(
f"{k} = {v}"
for k, v in os.environ.items()
if (
any(slug in k for slug in ("COV", "PY")) or
(k in ("HOME", "TEMP", "TMP"))
)
)),
('command_line', " ".join(getattr(sys, 'argv', ['-none-']))),
]
if self._inorout:
info.extend(self._inorout.sys_info())
info.extend(CoverageData.sys_info())
return info
# Mega debugging...
# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage.
if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging
from coverage.debug import decorate_methods, show_calls
Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage)
def process_startup():
"""Call this at Python start-up to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
Returns the :class:`Coverage` instance that was started, or None if it was
not started by this call.
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if not cps:
# No request for coverage, nothing to do.
return None
# This function can be called more than once in a process. This happens
# because some virtualenv configurations make the same directory visible
# twice in sys.path. This means that the .pth file will be found twice,
# and executed twice, executing this function twice. We set a global
# flag (an attribute on this function) to indicate that coverage.py has
# already been started, so we can avoid doing it twice.
#
# https://github.com/nedbat/coveragepy/issues/340 has more details.
if hasattr(process_startup, "coverage"):
# We've annotated this function before, so we must have already
# started coverage.py in this process. Nothing to do.
return None
cov = Coverage(config_file=cps)
process_startup.coverage = cov
cov._warn_no_data = False
cov._warn_unimported_source = False
cov._warn_preimported_source = False
cov._auto_save = True
cov.start()
return cov
def _prevent_sub_process_measurement():
"""Stop any subprocess auto-measurement from writing data."""
auto_created_coverage = getattr(process_startup, "coverage", None)
if auto_created_coverage is not None:
auto_created_coverage._auto_save = False
| 43,417 | Python | 36.046075 | 100 | 0.612525 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/multiproc.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Monkey-patching to add multiprocessing support for coverage.py"""
import multiprocessing
import multiprocessing.process
import os
import os.path
import sys
import traceback
from coverage.misc import contract
# An attribute that will be set on the module to indicate that it has been
# monkey-patched.
PATCHED_MARKER = "_coverage$patched"
OriginalProcess = multiprocessing.process.BaseProcess
original_bootstrap = OriginalProcess._bootstrap
class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method
"""A replacement for multiprocess.Process that starts coverage."""
def _bootstrap(self, *args, **kwargs):
"""Wrapper around _bootstrap to start coverage."""
try:
from coverage import Coverage # avoid circular import
cov = Coverage(data_suffix=True)
cov._warn_preimported_source = False
cov.start()
debug = cov._debug
if debug.should("multiproc"):
debug.write("Calling multiprocessing bootstrap")
except Exception:
print("Exception during multiprocessing bootstrap init:")
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
raise
try:
return original_bootstrap(self, *args, **kwargs)
finally:
if debug.should("multiproc"):
debug.write("Finished multiprocessing bootstrap")
cov.stop()
cov.save()
if debug.should("multiproc"):
debug.write("Saved multiprocessing data")
class Stowaway:
"""An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
def __init__(self, rcfile):
self.rcfile = rcfile
def __getstate__(self):
return {'rcfile': self.rcfile}
def __setstate__(self, state):
patch_multiprocessing(state['rcfile'])
@contract(rcfile=str)
def patch_multiprocessing(rcfile):
"""Monkey-patch the multiprocessing module.
This enables coverage measurement of processes started by multiprocessing.
This involves aggressive monkey-patching.
`rcfile` is the path to the rcfile being used.
"""
if hasattr(multiprocessing, PATCHED_MARKER):
return
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
# Set the value in ProcessWithCoverage that will be pickled into the child
# process.
os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
# When spawning processes rather than forking them, we have no state in the
# new process. We sneak in there with a Stowaway: we stuff one of our own
# objects into the data that gets pickled and sent to the sub-process. When
# the Stowaway is unpickled, it's __setstate__ method is called, which
# re-applies the monkey-patch.
# Windows only spawns, so this is needed to keep Windows working.
try:
from multiprocessing import spawn
original_get_preparation_data = spawn.get_preparation_data
except (ImportError, AttributeError):
pass
else:
def get_preparation_data_with_stowaway(name):
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
d['stowaway'] = Stowaway(rcfile)
return d
spawn.get_preparation_data = get_preparation_data_with_stowaway
setattr(multiprocessing, PATCHED_MARKER, True)
| 3,623 | Python | 33.846154 | 86 | 0.669335 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/cmdline.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Command-line support for coverage.py."""
import glob
import optparse # pylint: disable=deprecated-module
import os
import os.path
import shlex
import sys
import textwrap
import traceback
import coverage
from coverage import Coverage
from coverage import env
from coverage.collector import CTracer
from coverage.data import line_counts
from coverage.debug import info_formatter, info_header, short_stack
from coverage.exceptions import BaseCoverageException, ExceptionDuringRun, NoSource
from coverage.execfile import PyRunner
from coverage.misc import human_sorted
from coverage.results import Numbers, should_fail_under
class Opts:
"""A namespace class for individual options we'll build parsers from."""
append = optparse.make_option(
'-a', '--append', action='store_true',
help="Append coverage data to .coverage, otherwise it starts clean each time.",
)
keep = optparse.make_option(
'', '--keep', action='store_true',
help="Keep original coverage files, otherwise they are deleted.",
)
branch = optparse.make_option(
'', '--branch', action='store_true',
help="Measure branch coverage in addition to statement coverage.",
)
CONCURRENCY_CHOICES = [
"thread", "gevent", "greenlet", "eventlet", "multiprocessing",
]
concurrency = optparse.make_option(
'', '--concurrency', action='store', metavar="LIB",
choices=CONCURRENCY_CHOICES,
help=(
"Properly measure code using a concurrency library. " +
"Valid values are: {}."
).format(", ".join(CONCURRENCY_CHOICES)),
)
context = optparse.make_option(
'', '--context', action='store', metavar="LABEL",
help="The context label to record for this coverage run.",
)
contexts = optparse.make_option(
'', '--contexts', action='store',
metavar="REGEX1,REGEX2,...",
help=(
"Only display data from lines covered in the given contexts. " +
"Accepts Python regexes, which must be quoted."
),
)
debug = optparse.make_option(
'', '--debug', action='store', metavar="OPTS",
help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
)
directory = optparse.make_option(
'-d', '--directory', action='store', metavar="DIR",
help="Write the output files to DIR.",
)
fail_under = optparse.make_option(
'', '--fail-under', action='store', metavar="MIN", type="float",
help="Exit with a status of 2 if the total coverage is less than MIN.",
)
help = optparse.make_option(
'-h', '--help', action='store_true',
help="Get help on this command.",
)
ignore_errors = optparse.make_option(
'-i', '--ignore-errors', action='store_true',
help="Ignore errors while reading source files.",
)
include = optparse.make_option(
'', '--include', action='store',
metavar="PAT1,PAT2,...",
help=(
"Include only files whose paths match one of these patterns. " +
"Accepts shell-style wildcards, which must be quoted."
),
)
pylib = optparse.make_option(
'-L', '--pylib', action='store_true',
help=(
"Measure coverage even inside the Python installed library, " +
"which isn't done by default."
),
)
show_missing = optparse.make_option(
'-m', '--show-missing', action='store_true',
help="Show line numbers of statements in each module that weren't executed.",
)
module = optparse.make_option(
'-m', '--module', action='store_true',
help=(
"<pyfile> is an importable Python module, not a script path, " +
"to be run as 'python -m' would run it."
),
)
omit = optparse.make_option(
'', '--omit', action='store',
metavar="PAT1,PAT2,...",
help=(
"Omit files whose paths match one of these patterns. " +
"Accepts shell-style wildcards, which must be quoted."
),
)
output_xml = optparse.make_option(
'-o', '', action='store', dest="outfile",
metavar="OUTFILE",
help="Write the XML report to this file. Defaults to 'coverage.xml'",
)
output_json = optparse.make_option(
'-o', '', action='store', dest="outfile",
metavar="OUTFILE",
help="Write the JSON report to this file. Defaults to 'coverage.json'",
)
json_pretty_print = optparse.make_option(
'', '--pretty-print', action='store_true',
help="Format the JSON for human readers.",
)
parallel_mode = optparse.make_option(
'-p', '--parallel-mode', action='store_true',
help=(
"Append the machine name, process id and random number to the " +
".coverage data file name to simplify collecting data from " +
"many processes."
),
)
precision = optparse.make_option(
'', '--precision', action='store', metavar='N', type=int,
help=(
"Number of digits after the decimal point to display for " +
"reported coverage percentages."
),
)
quiet = optparse.make_option(
'-q', '--quiet', action='store_true',
help="Don't print messages about what is happening.",
)
rcfile = optparse.make_option(
'', '--rcfile', action='store',
help=(
"Specify configuration file. " +
"By default '.coveragerc', 'setup.cfg', 'tox.ini', and " +
"'pyproject.toml' are tried. [env: COVERAGE_RCFILE]"
),
)
show_contexts = optparse.make_option(
'--show-contexts', action='store_true',
help="Show contexts for covered lines.",
)
skip_covered = optparse.make_option(
'--skip-covered', action='store_true',
help="Skip files with 100% coverage.",
)
no_skip_covered = optparse.make_option(
'--no-skip-covered', action='store_false', dest='skip_covered',
help="Disable --skip-covered.",
)
skip_empty = optparse.make_option(
'--skip-empty', action='store_true',
help="Skip files with no code.",
)
sort = optparse.make_option(
'--sort', action='store', metavar='COLUMN',
help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " +
"Default is name."
)
source = optparse.make_option(
'', '--source', action='store', metavar="SRC1,SRC2,...",
help="A list of directories or importable names of code to measure.",
)
timid = optparse.make_option(
'', '--timid', action='store_true',
help=(
"Use a simpler but slower trace method. Try this if you get " +
"seemingly impossible results!"
),
)
title = optparse.make_option(
'', '--title', action='store', metavar="TITLE",
help="A text string to use as the title on the HTML.",
)
version = optparse.make_option(
'', '--version', action='store_true',
help="Display version information and exit.",
)
class CoverageOptionParser(optparse.OptionParser):
"""Base OptionParser for coverage.py.
Problems don't exit the program.
Defaults are initialized for all options.
"""
def __init__(self, *args, **kwargs):
super().__init__(
add_help_option=False, *args, **kwargs
)
self.set_defaults(
action=None,
append=None,
branch=None,
concurrency=None,
context=None,
contexts=None,
debug=None,
directory=None,
fail_under=None,
help=None,
ignore_errors=None,
include=None,
keep=None,
module=None,
omit=None,
parallel_mode=None,
precision=None,
pylib=None,
quiet=None,
rcfile=True,
show_contexts=None,
show_missing=None,
skip_covered=None,
skip_empty=None,
sort=None,
source=None,
timid=None,
title=None,
version=None,
)
self.disable_interspersed_args()
class OptionParserError(Exception):
"""Used to stop the optparse error handler ending the process."""
pass
def parse_args_ok(self, args=None, options=None):
"""Call optparse.parse_args, but return a triple:
(ok, options, args)
"""
try:
options, args = super().parse_args(args, options)
except self.OptionParserError:
return False, None, None
return True, options, args
def error(self, msg):
"""Override optparse.error so sys.exit doesn't get called."""
show_help(msg)
raise self.OptionParserError
class GlobalOptionParser(CoverageOptionParser):
"""Command-line parser for coverage.py global option arguments."""
def __init__(self):
super().__init__()
self.add_options([
Opts.help,
Opts.version,
])
class CmdOptionParser(CoverageOptionParser):
"""Parse one of the new-style commands for coverage.py."""
def __init__(self, action, options, defaults=None, usage=None, description=None):
"""Create an OptionParser for a coverage.py command.
`action` is the slug to put into `options.action`.
`options` is a list of Option's for the command.
`defaults` is a dict of default value for options.
`usage` is the usage string to display in help.
`description` is the description of the command, for the help text.
"""
if usage:
usage = "%prog " + usage
super().__init__(
usage=usage,
description=description,
)
self.set_defaults(action=action, **(defaults or {}))
self.add_options(options)
self.cmd = action
def __eq__(self, other):
# A convenience equality, so that I can put strings in unit test
# results, and they will compare equal to objects.
return (other == f"<CmdOptionParser:{self.cmd}>")
__hash__ = None # This object doesn't need to be hashed.
def get_prog_name(self):
"""Override of an undocumented function in optparse.OptionParser."""
program_name = super().get_prog_name()
# Include the sub-command for this parser as part of the command.
return f"{program_name} {self.cmd}"
GLOBAL_ARGS = [
Opts.debug,
Opts.help,
Opts.rcfile,
]
CMDS = {
'annotate': CmdOptionParser(
"annotate",
[
Opts.directory,
Opts.ignore_errors,
Opts.include,
Opts.omit,
] + GLOBAL_ARGS,
usage="[options] [modules]",
description=(
"Make annotated copies of the given files, marking statements that are executed " +
"with > and statements that are missed with !."
),
),
'combine': CmdOptionParser(
"combine",
[
Opts.append,
Opts.keep,
Opts.quiet,
] + GLOBAL_ARGS,
usage="[options] <path1> <path2> ... <pathN>",
description=(
"Combine data from multiple coverage files collected " +
"with 'run -p'. The combined results are written to a single " +
"file representing the union of the data. The positional " +
"arguments are data files or directories containing data files. " +
"If no paths are provided, data files in the default data file's " +
"directory are combined."
),
),
'debug': CmdOptionParser(
"debug", GLOBAL_ARGS,
usage="<topic>",
description=(
"Display information about the internals of coverage.py, " +
"for diagnosing problems. " +
"Topics are: " +
"'data' to show a summary of the collected data; " +
"'sys' to show installation information; " +
"'config' to show the configuration; " +
"'premain' to show what is calling coverage."
),
),
'erase': CmdOptionParser(
"erase", GLOBAL_ARGS,
description="Erase previously collected coverage data.",
),
'help': CmdOptionParser(
"help", GLOBAL_ARGS,
usage="[command]",
description="Describe how to use coverage.py",
),
'html': CmdOptionParser(
"html",
[
Opts.contexts,
Opts.directory,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.precision,
Opts.quiet,
Opts.show_contexts,
Opts.skip_covered,
Opts.no_skip_covered,
Opts.skip_empty,
Opts.title,
] + GLOBAL_ARGS,
usage="[options] [modules]",
description=(
"Create an HTML report of the coverage of the files. " +
"Each file gets its own page, with the source decorated to show " +
"executed, excluded, and missed lines."
),
),
'json': CmdOptionParser(
"json",
[
Opts.contexts,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.output_json,
Opts.json_pretty_print,
Opts.quiet,
Opts.show_contexts,
] + GLOBAL_ARGS,
usage="[options] [modules]",
description="Generate a JSON report of coverage results."
),
'report': CmdOptionParser(
"report",
[
Opts.contexts,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.precision,
Opts.sort,
Opts.show_missing,
Opts.skip_covered,
Opts.no_skip_covered,
Opts.skip_empty,
] + GLOBAL_ARGS,
usage="[options] [modules]",
description="Report coverage statistics on modules."
),
'run': CmdOptionParser(
"run",
[
Opts.append,
Opts.branch,
Opts.concurrency,
Opts.context,
Opts.include,
Opts.module,
Opts.omit,
Opts.pylib,
Opts.parallel_mode,
Opts.source,
Opts.timid,
] + GLOBAL_ARGS,
usage="[options] <pyfile> [program options]",
description="Run a Python program, measuring code execution."
),
'xml': CmdOptionParser(
"xml",
[
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.output_xml,
Opts.quiet,
Opts.skip_empty,
] + GLOBAL_ARGS,
usage="[options] [modules]",
description="Generate an XML report of coverage results."
),
}
def show_help(error=None, topic=None, parser=None):
"""Display an error message, or the named topic."""
assert error or topic or parser
program_path = sys.argv[0]
if program_path.endswith(os.path.sep + '__main__.py'):
# The path is the main module of a package; get that path instead.
program_path = os.path.dirname(program_path)
program_name = os.path.basename(program_path)
if env.WINDOWS:
# entry_points={'console_scripts':...} on Windows makes files
# called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
# invoke coverage-script.py, coverage3-script.py, and
# coverage-3.5-script.py. argv[0] is the .py file, but we want to
# get back to the original form.
auto_suffix = "-script.py"
if program_name.endswith(auto_suffix):
program_name = program_name[:-len(auto_suffix)]
help_params = dict(coverage.__dict__)
help_params['program_name'] = program_name
if CTracer is not None:
help_params['extension_modifier'] = 'with C extension'
else:
help_params['extension_modifier'] = 'without C extension'
if error:
print(error, file=sys.stderr)
print(f"Use '{program_name} help' for help.", file=sys.stderr)
elif parser:
print(parser.format_help().strip())
print()
else:
help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
if help_msg:
print(help_msg.format(**help_params))
else:
print(f"Don't know topic {topic!r}")
print("Full documentation is at {__url__}".format(**help_params))
OK, ERR, FAIL_UNDER = 0, 1, 2
class CoverageScript:
"""The command-line interface to coverage.py."""
def __init__(self):
self.global_option = False
self.coverage = None
def command_line(self, argv):
"""The bulk of the command line interface to coverage.py.
`argv` is the argument list to process.
Returns 0 if all is well, 1 if something went wrong.
"""
# Collect the command-line options.
if not argv:
show_help(topic='minimum_help')
return OK
# The command syntax we parse depends on the first argument. Global
# switch syntax always starts with an option.
self.global_option = argv[0].startswith('-')
if self.global_option:
parser = GlobalOptionParser()
else:
parser = CMDS.get(argv[0])
if not parser:
show_help(f"Unknown command: {argv[0]!r}")
return ERR
argv = argv[1:]
ok, options, args = parser.parse_args_ok(argv)
if not ok:
return ERR
# Handle help and version.
if self.do_help(options, args, parser):
return OK
# Listify the list options.
source = unshell_list(options.source)
omit = unshell_list(options.omit)
include = unshell_list(options.include)
debug = unshell_list(options.debug)
contexts = unshell_list(options.contexts)
# Do something.
self.coverage = Coverage(
data_suffix=options.parallel_mode,
cover_pylib=options.pylib,
timid=options.timid,
branch=options.branch,
config_file=options.rcfile,
source=source,
omit=omit,
include=include,
debug=debug,
concurrency=options.concurrency,
check_preimported=True,
context=options.context,
messages=not options.quiet,
)
if options.action == "debug":
return self.do_debug(args)
elif options.action == "erase":
self.coverage.erase()
return OK
elif options.action == "run":
return self.do_run(options, args)
elif options.action == "combine":
if options.append:
self.coverage.load()
data_dirs = args or None
self.coverage.combine(data_dirs, strict=True, keep=bool(options.keep))
self.coverage.save()
return OK
# Remaining actions are reporting, with some common options.
report_args = dict(
morfs=unglob_args(args),
ignore_errors=options.ignore_errors,
omit=omit,
include=include,
contexts=contexts,
)
# We need to be able to import from the current directory, because
# plugins may try to, for example, to read Django settings.
sys.path.insert(0, '')
self.coverage.load()
total = None
if options.action == "report":
total = self.coverage.report(
show_missing=options.show_missing,
skip_covered=options.skip_covered,
skip_empty=options.skip_empty,
precision=options.precision,
sort=options.sort,
**report_args
)
elif options.action == "annotate":
self.coverage.annotate(directory=options.directory, **report_args)
elif options.action == "html":
total = self.coverage.html_report(
directory=options.directory,
title=options.title,
skip_covered=options.skip_covered,
skip_empty=options.skip_empty,
show_contexts=options.show_contexts,
precision=options.precision,
**report_args
)
elif options.action == "xml":
outfile = options.outfile
total = self.coverage.xml_report(
outfile=outfile, skip_empty=options.skip_empty,
**report_args
)
elif options.action == "json":
outfile = options.outfile
total = self.coverage.json_report(
outfile=outfile,
pretty_print=options.pretty_print,
show_contexts=options.show_contexts,
**report_args
)
else:
# There are no other possible actions.
raise AssertionError
if total is not None:
# Apply the command line fail-under options, and then use the config
# value, so we can get fail_under from the config file.
if options.fail_under is not None:
self.coverage.set_option("report:fail_under", options.fail_under)
fail_under = self.coverage.get_option("report:fail_under")
precision = self.coverage.get_option("report:precision")
if should_fail_under(total, fail_under, precision):
msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format(
total=Numbers(precision=precision).display_covered(total),
fail_under=fail_under,
p=precision,
)
print("Coverage failure:", msg)
return FAIL_UNDER
return OK
def do_help(self, options, args, parser):
"""Deal with help requests.
Return True if it handled the request, False if not.
"""
# Handle help.
if options.help:
if self.global_option:
show_help(topic='help')
else:
show_help(parser=parser)
return True
if options.action == "help":
if args:
for a in args:
parser = CMDS.get(a)
if parser:
show_help(parser=parser)
else:
show_help(topic=a)
else:
show_help(topic='help')
return True
# Handle version.
if options.version:
show_help(topic='version')
return True
return False
def do_run(self, options, args):
"""Implementation of 'coverage run'."""
if not args:
if options.module:
# Specified -m with nothing else.
show_help("No module specified for -m")
return ERR
command_line = self.coverage.get_option("run:command_line")
if command_line is not None:
args = shlex.split(command_line)
if args and args[0] in {"-m", "--module"}:
options.module = True
args = args[1:]
if not args:
show_help("Nothing to do.")
return ERR
if options.append and self.coverage.get_option("run:parallel"):
show_help("Can't append to data files in parallel mode.")
return ERR
if options.concurrency == "multiprocessing":
# Can't set other run-affecting command line options with
# multiprocessing.
for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
# As it happens, all of these options have no default, meaning
# they will be None if they have not been specified.
if getattr(options, opt_name) is not None:
show_help(
"Options affecting multiprocessing must only be specified " +
"in a configuration file.\n" +
f"Remove --{opt_name} from the command line."
)
return ERR
os.environ["COVERAGE_RUN"] = "true"
runner = PyRunner(args, as_module=bool(options.module))
runner.prepare()
if options.append:
self.coverage.load()
# Run the script.
self.coverage.start()
code_ran = True
try:
runner.run()
except NoSource:
code_ran = False
raise
finally:
self.coverage.stop()
if code_ran:
self.coverage.save()
return OK
def do_debug(self, args):
"""Implementation of 'coverage debug'."""
if not args:
show_help("What information would you like: config, data, sys, premain?")
return ERR
for info in args:
if info == 'sys':
sys_info = self.coverage.sys_info()
print(info_header("sys"))
for line in info_formatter(sys_info):
print(f" {line}")
elif info == 'data':
self.coverage.load()
data = self.coverage.get_data()
print(info_header("data"))
print(f"path: {data.data_filename()}")
if data:
print(f"has_arcs: {data.has_arcs()!r}")
summary = line_counts(data, fullpath=True)
filenames = human_sorted(summary.keys())
print(f"\n{len(filenames)} files:")
for f in filenames:
line = f"{f}: {summary[f]} lines"
plugin = data.file_tracer(f)
if plugin:
line += f" [{plugin}]"
print(line)
else:
print("No data collected")
elif info == 'config':
print(info_header("config"))
config_info = self.coverage.config.__dict__.items()
for line in info_formatter(config_info):
print(f" {line}")
elif info == "premain":
print(info_header("premain"))
print(short_stack())
else:
show_help(f"Don't know what you mean by {info!r}")
return ERR
return OK
def unshell_list(s):
"""Turn a command-line argument into a list."""
if not s:
return None
if env.WINDOWS:
# When running coverage.py as coverage.exe, some of the behavior
# of the shell is emulated: wildcards are expanded into a list of
# file names. So you have to single-quote patterns on the command
# line, but (not) helpfully, the single quotes are included in the
# argument, so we have to strip them off here.
s = s.strip("'")
return s.split(',')
def unglob_args(args):
"""Interpret shell wildcards for platforms that need it."""
if env.WINDOWS:
globbed = []
for arg in args:
if '?' in arg or '*' in arg:
globbed.extend(glob.glob(arg))
else:
globbed.append(arg)
args = globbed
return args
HELP_TOPICS = {
'help': """\
Coverage.py, version {__version__} {extension_modifier}
Measure, collect, and report on code coverage in Python programs.
usage: {program_name} <command> [options] [args]
Commands:
annotate Annotate source files with execution information.
combine Combine a number of data files.
debug Display information about the internals of coverage.py
erase Erase previously collected coverage data.
help Get help on using coverage.py.
html Create an HTML report.
json Create a JSON report of coverage results.
report Report coverage stats on modules.
run Run a Python program and measure code execution.
xml Create an XML report of coverage results.
Use "{program_name} help <command>" for detailed help on any command.
""",
'minimum_help': """\
Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help.
""",
'version': """\
Coverage.py, version {__version__} {extension_modifier}
""",
}
def main(argv=None):
"""The main entry point to coverage.py.
This is installed as the script entry point.
"""
if argv is None:
argv = sys.argv[1:]
try:
status = CoverageScript().command_line(argv)
except ExceptionDuringRun as err:
# An exception was caught while running the product code. The
# sys.exc_info() return tuple is packed into an ExceptionDuringRun
# exception.
traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter
status = ERR
except BaseCoverageException as err:
# A controlled error inside coverage.py: print the message to the user.
msg = err.args[0]
print(msg)
status = ERR
except SystemExit as err:
# The user called `sys.exit()`. Exit with their argument, if any.
if err.args:
status = err.args[0]
else:
status = None
return status
# Profiling using ox_profile. Install it from GitHub:
# pip install git+https://github.com/emin63/ox_profile.git
#
# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile.
_profile = os.environ.get("COVERAGE_PROFILE", "")
if _profile: # pragma: debugging
from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error
original_main = main
def main(argv=None): # pylint: disable=function-redefined
"""A wrapper around main that profiles."""
profiler = SimpleLauncher.launch()
try:
return original_main(argv)
finally:
data, _ = profiler.query(re_filter='coverage', max_records=100)
print(profiler.show(query=data, limit=100, sep='', col=''))
profiler.cancel()
| 31,120 | Python | 32.571737 | 114 | 0.548907 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/inorout.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Determining whether files are being measured/reported or not."""
import importlib.util
import inspect
import itertools
import os
import platform
import re
import sys
import sysconfig
import traceback
from coverage import env
from coverage.disposition import FileDisposition, disposition_init
from coverage.exceptions import CoverageException
from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
from coverage.files import prep_patterns, find_python_files, canonical_filename
from coverage.misc import sys_modules_saved
from coverage.python import source_for_file, source_for_morf
# Pypy has some unusual stuff in the "stdlib". Consider those locations
# when deciding where the stdlib is. These modules are not used for anything,
# they are modules importable from the pypy lib directories, so that we can
# find those directories.
_structseq = _pypy_irc_topic = None
if env.PYPY:
try:
import _structseq
except ImportError:
pass
try:
import _pypy_irc_topic
except ImportError:
pass
def canonical_path(morf, directory=False):
"""Return the canonical path of the module or file `morf`.
If the module is a package, then return its directory. If it is a
module, then return its file, unless `directory` is True, in which
case return its enclosing directory.
"""
morf_path = canonical_filename(source_for_morf(morf))
if morf_path.endswith("__init__.py") or directory:
morf_path = os.path.split(morf_path)[0]
return morf_path
def name_for_module(filename, frame):
"""Get the name of the module for a filename and frame.
For configurability's sake, we allow __main__ modules to be matched by
their importable name.
If loaded via runpy (aka -m), we can usually recover the "original"
full dotted module name, otherwise, we resort to interpreting the
file name to get the module's name. In the case that the module name
can't be determined, None is returned.
"""
module_globals = frame.f_globals if frame is not None else {}
if module_globals is None: # pragma: only ironpython
# IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
module_globals = {}
dunder_name = module_globals.get('__name__', None)
if isinstance(dunder_name, str) and dunder_name != '__main__':
# This is the usual case: an imported module.
return dunder_name
loader = module_globals.get('__loader__', None)
for attrname in ('fullname', 'name'): # attribute renamed in py3.2
if hasattr(loader, attrname):
fullname = getattr(loader, attrname)
else:
continue
if isinstance(fullname, str) and fullname != '__main__':
# Module loaded via: runpy -m
return fullname
# Script as first argument to Python command line.
inspectedname = inspect.getmodulename(filename)
if inspectedname is not None:
return inspectedname
else:
return dunder_name
def module_is_namespace(mod):
"""Is the module object `mod` a PEP420 namespace module?"""
return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
def module_has_file(mod):
"""Does the module object `mod` have an existing __file__ ?"""
mod__file__ = getattr(mod, '__file__', None)
if mod__file__ is None:
return False
return os.path.exists(mod__file__)
def file_and_path_for_module(modulename):
"""Find the file and search path for `modulename`.
Returns:
filename: The filename of the module, or None.
path: A list (possibly empty) of directories to find submodules in.
"""
filename = None
path = []
try:
spec = importlib.util.find_spec(modulename)
except ImportError:
pass
else:
if spec is not None:
if spec.origin != "namespace":
filename = spec.origin
path = list(spec.submodule_search_locations or ())
return filename, path
def add_stdlib_paths(paths):
"""Add paths where the stdlib can be found to the set `paths`."""
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
modules_we_happen_to_have = [
inspect, itertools, os, platform, re, sysconfig, traceback,
_pypy_irc_topic, _structseq,
]
for m in modules_we_happen_to_have:
if m is not None and hasattr(m, "__file__"):
paths.add(canonical_path(m, directory=True))
if _structseq and not hasattr(_structseq, '__file__'):
# PyPy 2.4 has no __file__ in the builtin modules, but the code
# objects still have the file names. So dig into one to find
# the path to exclude. The "filename" might be synthetic,
# don't be fooled by those.
structseq_file = _structseq.structseq_new.__code__.co_filename
if not structseq_file.startswith("<"):
paths.add(canonical_path(structseq_file))
def add_third_party_paths(paths):
"""Add locations for third-party packages to the set `paths`."""
# Get the paths that sysconfig knows about.
scheme_names = set(sysconfig.get_scheme_names())
for scheme in scheme_names:
# https://foss.heptapod.net/pypy/pypy/-/issues/3433
better_scheme = "pypy_posix" if scheme == "pypy" else scheme
if os.name in better_scheme.split("_"):
config_paths = sysconfig.get_paths(scheme)
for path_name in ["platlib", "purelib", "scripts"]:
paths.add(config_paths[path_name])
def add_coverage_paths(paths):
"""Add paths where coverage.py code can be found to the set `paths`."""
cover_path = canonical_path(__file__, directory=True)
paths.add(cover_path)
if env.TESTING:
# Don't include our own test code.
paths.add(os.path.join(cover_path, "tests"))
# When testing, we use PyContracts, which should be considered
# part of coverage.py, and it uses six. Exclude those directories
# just as we exclude ourselves.
if env.USE_CONTRACTS:
import contracts
import six
for mod in [contracts, six]:
paths.add(canonical_path(mod))
class InOrOut:
"""Machinery for determining what files to measure."""
def __init__(self, warn, debug):
self.warn = warn
self.debug = debug
# The matchers for should_trace.
self.source_match = None
self.source_pkgs_match = None
self.pylib_paths = self.cover_paths = self.third_paths = None
self.pylib_match = self.cover_match = self.third_match = None
self.include_match = self.omit_match = None
self.plugins = []
self.disp_class = FileDisposition
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
self.source_pkgs_unmatched = []
self.omit = self.include = None
# Is the source inside a third-party area?
self.source_in_third = False
def configure(self, config):
"""Apply the configuration to get ready for decision-time."""
self.source_pkgs.extend(config.source_pkgs)
for src in config.source or []:
if os.path.isdir(src):
self.source.append(canonical_filename(src))
else:
self.source_pkgs.append(src)
self.source_pkgs_unmatched = self.source_pkgs[:]
self.omit = prep_patterns(config.run_omit)
self.include = prep_patterns(config.run_include)
# The directories for files considered "installed with the interpreter".
self.pylib_paths = set()
if not config.cover_pylib:
add_stdlib_paths(self.pylib_paths)
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
self.cover_paths = set()
add_coverage_paths(self.cover_paths)
# Find where third-party packages are installed.
self.third_paths = set()
add_third_party_paths(self.third_paths)
def debug(msg):
if self.debug:
self.debug.write(msg)
# Create the matchers we need for should_trace
if self.source or self.source_pkgs:
against = []
if self.source:
self.source_match = TreeMatcher(self.source, "source")
against.append(f"trees {self.source_match!r}")
if self.source_pkgs:
self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs")
against.append(f"modules {self.source_pkgs_match!r}")
debug("Source matching against " + " and ".join(against))
else:
if self.pylib_paths:
self.pylib_match = TreeMatcher(self.pylib_paths, "pylib")
debug(f"Python stdlib matching: {self.pylib_match!r}")
if self.include:
self.include_match = FnmatchMatcher(self.include, "include")
debug(f"Include matching: {self.include_match!r}")
if self.omit:
self.omit_match = FnmatchMatcher(self.omit, "omit")
debug(f"Omit matching: {self.omit_match!r}")
self.cover_match = TreeMatcher(self.cover_paths, "coverage")
debug(f"Coverage code matching: {self.cover_match!r}")
self.third_match = TreeMatcher(self.third_paths, "third")
debug(f"Third-party lib matching: {self.third_match!r}")
# Check if the source we want to measure has been installed as a
# third-party package.
with sys_modules_saved():
for pkg in self.source_pkgs:
try:
modfile, path = file_and_path_for_module(pkg)
debug(f"Imported source package {pkg!r} as {modfile!r}")
except CoverageException as exc:
debug(f"Couldn't import source package {pkg!r}: {exc}")
continue
if modfile:
if self.third_match.match(modfile):
debug(
f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}"
)
self.source_in_third = True
else:
for pathdir in path:
if self.third_match.match(pathdir):
debug(
f"Source is in third-party because of {pkg!r} path directory " +
f"at {pathdir!r}"
)
self.source_in_third = True
for src in self.source:
if self.third_match.match(src):
debug(f"Source is in third-party because of source directory {src!r}")
self.source_in_third = True
def should_trace(self, filename, frame=None):
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a FileDisposition object.
"""
original_filename = filename
disp = disposition_init(self.disp_class, filename)
def nope(disp, reason):
"""Simple helper to make it easy to return NO."""
disp.trace = False
disp.reason = reason
return disp
if original_filename.startswith('<'):
return nope(disp, "not a real original file name")
if frame is not None:
# Compiled Python files have two file names: frame.f_code.co_filename is
# the file name at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals and frame.f_globals.get('__file__')
if dunder_file:
filename = source_for_file(dunder_file)
if original_filename and not original_filename.startswith('<'):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
# exec'ing code. If it seems like something is wrong with
# the frame's file name, then just use the original.
filename = original_filename
if not filename:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
if filename.startswith('memory:'):
return nope(disp, "memory isn't traceable")
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# file names like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return nope(disp, "not a real file name")
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = canonical_filename(filename)
disp.canonical_filename = canonical
# Try the plugins, see if they have an opinion about the file.
plugin = None
for plugin in self.plugins.file_tracers:
if not plugin._coverage_enabled:
continue
try:
file_tracer = plugin.file_tracer(canonical)
if file_tracer is not None:
file_tracer._coverage_plugin = plugin
disp.trace = True
disp.file_tracer = file_tracer
if file_tracer.has_dynamic_source_filename():
disp.has_dynamic_filename = True
else:
disp.source_filename = canonical_filename(
file_tracer.source_filename()
)
break
except Exception:
plugin_name = plugin._coverage_plugin_name
tb = traceback.format_exc()
self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}")
plugin._coverage_enabled = False
continue
else:
# No plugin wanted it: it's Python.
disp.trace = True
disp.source_filename = canonical
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise CoverageException(
f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'"
)
reason = self.check_include_omit_etc(disp.source_filename, frame)
if reason:
nope(disp, reason)
return disp
def check_include_omit_etc(self, filename, frame):
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
why. None means no reason found to not trace.
"""
modulename = name_for_module(filename, frame)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match or self.source_pkgs_match:
extra = ""
ok = False
if self.source_pkgs_match:
if self.source_pkgs_match.match(modulename):
ok = True
if modulename in self.source_pkgs_unmatched:
self.source_pkgs_unmatched.remove(modulename)
else:
extra = f"module {modulename!r} "
if not ok and self.source_match:
if self.source_match.match(filename):
ok = True
if not ok:
return extra + "falls outside the --source spec"
if not self.source_in_third:
if self.third_match.match(filename):
return "inside --source, but is third-party"
elif self.include_match:
if not self.include_match.match(filename):
return "falls outside the --include trees"
else:
# We exclude the coverage.py code itself, since a little of it
# will be measured otherwise.
if self.cover_match.match(filename):
return "is part of coverage.py"
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(filename):
return "is in the stdlib"
# Exclude anything in the third-party installation areas.
if self.third_match.match(filename):
return "is a third-party module"
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(filename):
return "is inside an --omit pattern"
# No point tracing a file we can't later write to SQLite.
try:
filename.encode("utf-8")
except UnicodeEncodeError:
return "non-encodable filename"
# No reason found to skip this file.
return None
def warn_conflicting_settings(self):
"""Warn if there are settings that conflict."""
if self.include:
if self.source or self.source_pkgs:
self.warn("--include is ignored because --source is set", slug="include-ignored")
def warn_already_imported_files(self):
"""Warn if files have already been imported that we will be measuring."""
if self.include or self.source or self.source_pkgs:
warned = set()
for mod in list(sys.modules.values()):
filename = getattr(mod, "__file__", None)
if filename is None:
continue
if filename in warned:
continue
if len(getattr(mod, "__path__", ())) > 1:
# A namespace package, which confuses this code, so ignore it.
continue
disp = self.should_trace(filename)
if disp.has_dynamic_filename:
# A plugin with dynamic filenames: the Python file
# shouldn't cause a warning, since it won't be the subject
# of tracing anyway.
continue
if disp.trace:
msg = f"Already imported a file that will be measured: {filename}"
self.warn(msg, slug="already-imported")
warned.add(filename)
elif self.debug and self.debug.should('trace'):
self.debug.write(
"Didn't trace already imported file {!r}: {}".format(
disp.original_filename, disp.reason
)
)
def warn_unimported_source(self):
"""Warn about source packages that were of interest, but never traced."""
for pkg in self.source_pkgs_unmatched:
self._warn_about_unmeasured_code(pkg)
def _warn_about_unmeasured_code(self, pkg):
"""Warn about a package or module that we never traced.
`pkg` is a string, the name of the package or module.
"""
mod = sys.modules.get(pkg)
if mod is None:
self.warn(f"Module {pkg} was never imported.", slug="module-not-imported")
return
if module_is_namespace(mod):
# A namespace package. It's OK for this not to have been traced,
# since there is no code directly in it.
return
if not module_has_file(mod):
self.warn(f"Module {pkg} has no Python source.", slug="module-not-python")
return
# The module was in sys.modules, and seems like a module with code, but
# we never measured it. I guess that means it was imported before
# coverage even started.
msg = f"Module {pkg} was previously imported, but not measured"
self.warn(msg, slug="module-not-measured")
def find_possibly_unexecuted_files(self):
"""Find files in the areas of interest that might be untraced.
Yields pairs: file path, and responsible plug-in name.
"""
for pkg in self.source_pkgs:
if (not pkg in sys.modules or
not module_has_file(sys.modules[pkg])):
continue
pkg_file = source_for_file(sys.modules[pkg].__file__)
yield from self._find_executable_files(canonical_path(pkg_file))
for src in self.source:
yield from self._find_executable_files(src)
def _find_plugin_files(self, src_dir):
"""Get executable files from the plugins."""
for plugin in self.plugins.file_tracers:
for x_file in plugin.find_executable_files(src_dir):
yield x_file, plugin._coverage_plugin_name
def _find_executable_files(self, src_dir):
"""Find executable files in `src_dir`.
Search for files in `src_dir` that can be executed because they
are probably importable. Don't include ones that have been omitted
by the configuration.
Yield the file path, and the plugin name that handles the file.
"""
py_files = ((py_file, None) for py_file in find_python_files(src_dir))
plugin_files = self._find_plugin_files(src_dir)
for file_path, plugin_name in itertools.chain(py_files, plugin_files):
file_path = canonical_filename(file_path)
if self.omit_match and self.omit_match.match(file_path):
# Turns out this file was omitted, so don't pull it back
# in as unexecuted.
continue
yield file_path, plugin_name
def sys_info(self):
"""Our information for Coverage.sys_info.
Returns a list of (key, value) pairs.
"""
info = [
("coverage_paths", self.cover_paths),
("stdlib_paths", self.pylib_paths),
("third_party_paths", self.third_paths),
]
matcher_names = [
'source_match', 'source_pkgs_match',
'include_match', 'omit_match',
'cover_match', 'pylib_match', 'third_match',
]
for matcher_name in matcher_names:
matcher = getattr(self, matcher_name)
if matcher:
matcher_info = matcher.info()
else:
matcher_info = '-none-'
info.append((matcher_name, matcher_info))
return info
| 23,828 | Python | 38.517413 | 100 | 0.587628 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/plugin_support.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Support for plugins."""
import os
import os.path
import sys
from coverage.exceptions import CoverageException
from coverage.misc import isolate_module
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
os = isolate_module(os)
class Plugins:
"""The currently loaded collection of coverage.py plugins."""
def __init__(self):
self.order = []
self.names = {}
self.file_tracers = []
self.configurers = []
self.context_switchers = []
self.current_module = None
self.debug = None
@classmethod
def load_plugins(cls, modules, config, debug=None):
"""Load plugins from `modules`.
Returns a Plugins object with the loaded and configured plugins.
"""
plugins = cls()
plugins.debug = debug
for module in modules:
plugins.current_module = module
__import__(module)
mod = sys.modules[module]
coverage_init = getattr(mod, "coverage_init", None)
if not coverage_init:
raise CoverageException(
f"Plugin module {module!r} didn't define a coverage_init function"
)
options = config.get_plugin_options(module)
coverage_init(plugins, options)
plugins.current_module = None
return plugins
def add_file_tracer(self, plugin):
"""Add a file tracer plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.file_tracer` method.
"""
self._add_plugin(plugin, self.file_tracers)
def add_configurer(self, plugin):
"""Add a configuring plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.configure` method.
"""
self._add_plugin(plugin, self.configurers)
def add_dynamic_context(self, plugin):
"""Add a dynamic context plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.dynamic_context` method.
"""
self._add_plugin(plugin, self.context_switchers)
def add_noop(self, plugin):
"""Add a plugin that does nothing.
This is only useful for testing the plugin support.
"""
self._add_plugin(plugin, None)
def _add_plugin(self, plugin, specialized):
"""Add a plugin object.
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
is a list to append the plugin to.
"""
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
if self.debug and self.debug.should('plugin'):
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
# pylint: disable=attribute-defined-outside-init
plugin._coverage_plugin_name = plugin_name
plugin._coverage_enabled = True
self.order.append(plugin)
self.names[plugin_name] = plugin
if specialized is not None:
specialized.append(plugin)
def __nonzero__(self):
return bool(self.order)
__bool__ = __nonzero__
def __iter__(self):
return iter(self.order)
def get(self, plugin_name):
"""Return a plugin by name."""
return self.names[plugin_name]
class LabelledDebug:
"""A Debug writer, but with labels for prepending to the messages."""
def __init__(self, label, debug, prev_labels=()):
self.labels = list(prev_labels) + [label]
self.debug = debug
def add_label(self, label):
"""Add a label to the writer, and return a new `LabelledDebug`."""
return LabelledDebug(label, self.debug, self.labels)
def message_prefix(self):
"""The prefix to use on messages, combining the labels."""
prefixes = self.labels + ['']
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
def write(self, message):
"""Write `message`, but with the labels prepended."""
self.debug.write(f"{self.message_prefix()}{message}")
class DebugPluginWrapper(CoveragePlugin):
"""Wrap a plugin, and use debug to report on what it's doing."""
def __init__(self, plugin, debug):
super().__init__()
self.plugin = plugin
self.debug = debug
def file_tracer(self, filename):
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
debug = self.debug.add_label(f"file {filename!r}")
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
def file_reporter(self, filename):
reporter = self.plugin.file_reporter(filename)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
if reporter:
debug = self.debug.add_label(f"file {filename!r}")
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
def dynamic_context(self, frame):
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
def find_executable_files(self, src_dir):
executable_files = self.plugin.find_executable_files(src_dir)
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
return executable_files
def configure(self, config):
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
def sys_info(self):
return self.plugin.sys_info()
class DebugFileTracerWrapper(FileTracer):
"""A debugging `FileTracer`."""
def __init__(self, tracer, debug):
self.tracer = tracer
self.debug = debug
def _show_frame(self, frame):
"""A short string identifying a frame, for debug messages."""
return "%s@%d" % (
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
)
def source_filename(self):
sfilename = self.tracer.source_filename()
self.debug.write(f"source_filename() --> {sfilename!r}")
return sfilename
def has_dynamic_source_filename(self):
has = self.tracer.has_dynamic_source_filename()
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
return has
def dynamic_source_filename(self, filename, frame):
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format(
filename, self._show_frame(frame), dyn,
))
return dyn
def line_number_range(self, frame):
pair = self.tracer.line_number_range(frame)
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
return pair
class DebugFileReporterWrapper(FileReporter):
"""A debugging `FileReporter`."""
def __init__(self, filename, reporter, debug):
super().__init__(filename)
self.reporter = reporter
self.debug = debug
def relative_filename(self):
ret = self.reporter.relative_filename()
self.debug.write(f"relative_filename() --> {ret!r}")
return ret
def lines(self):
ret = self.reporter.lines()
self.debug.write(f"lines() --> {ret!r}")
return ret
def excluded_lines(self):
ret = self.reporter.excluded_lines()
self.debug.write(f"excluded_lines() --> {ret!r}")
return ret
def translate_lines(self, lines):
ret = self.reporter.translate_lines(lines)
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
return ret
def translate_arcs(self, arcs):
ret = self.reporter.translate_arcs(arcs)
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
return ret
def no_branch_lines(self):
ret = self.reporter.no_branch_lines()
self.debug.write(f"no_branch_lines() --> {ret!r}")
return ret
def exit_counts(self):
ret = self.reporter.exit_counts()
self.debug.write(f"exit_counts() --> {ret!r}")
return ret
def arcs(self):
ret = self.reporter.arcs()
self.debug.write(f"arcs() --> {ret!r}")
return ret
def source(self):
ret = self.reporter.source()
self.debug.write("source() --> %d chars" % (len(ret),))
return ret
def source_token_lines(self):
ret = list(self.reporter.source_token_lines())
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
return ret
| 9,004 | Python | 30.819788 | 88 | 0.608285 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/config.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Config file for coverage.py"""
import collections
import configparser
import copy
import os
import os.path
import re
from coverage.exceptions import CoverageException
from coverage.misc import contract, isolate_module, substitute_variables
from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
os = isolate_module(os)
class HandyConfigParser(configparser.RawConfigParser):
"""Our specialization of ConfigParser."""
def __init__(self, our_file):
"""Create the HandyConfigParser.
`our_file` is True if this config file is specifically for coverage,
False if we are examining another config file (tox.ini, setup.cfg)
for possible settings.
"""
configparser.RawConfigParser.__init__(self)
self.section_prefixes = ["coverage:"]
if our_file:
self.section_prefixes.append("")
def read(self, filenames, encoding_unused=None):
"""Read a file name as UTF-8 configuration data."""
return configparser.RawConfigParser.read(self, filenames, encoding="utf-8")
def has_option(self, section, option):
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
has = configparser.RawConfigParser.has_option(self, real_section, option)
if has:
return has
return False
def has_section(self, section):
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
has = configparser.RawConfigParser.has_section(self, real_section)
if has:
return real_section
return False
def options(self, section):
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
if configparser.RawConfigParser.has_section(self, real_section):
return configparser.RawConfigParser.options(self, real_section)
raise configparser.NoSectionError(section)
def get_section(self, section):
"""Get the contents of a section, as a dictionary."""
d = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
def get(self, section, option, *args, **kwargs):
"""Get a value, replacing environment variables also.
The arguments are the same as `RawConfigParser.get`, but in the found
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
environment variable ``WORD``.
Returns the finished value.
"""
for section_prefix in self.section_prefixes:
real_section = section_prefix + section
if configparser.RawConfigParser.has_option(self, real_section, option):
break
else:
raise configparser.NoOptionError(option, section)
v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs)
v = substitute_variables(v, os.environ)
return v
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values
def getregexlist(self, section, option):
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
list of regexes. Each value is stripped of whitespace.
Returns the list of strings.
"""
line_list = self.get(section, option)
value_list = []
for value in line_list.splitlines():
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(
f"Invalid [{section}].{option} value {value!r}: {e}"
) from e
if value:
value_list.append(value)
return value_list
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)',
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)',
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
'while (True|1|False|0):',
'if (True|1|False|0):',
]
class CoverageConfig:
"""Coverage.py configuration.
The attributes of this class are the various settings that control the
operation of coverage.py.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
# We tried to read these config files.
self.attempted_config_files = []
# We did read these config files, but maybe didn't find any content for us.
self.config_files_read = []
# The file that gave us our configuration.
self.config_file = None
self._config_contents = None
# Defaults for [run] and [report]
self._include = None
self._omit = None
# Defaults for [run]
self.branch = False
self.command_line = None
self.concurrency = None
self.context = None
self.cover_pylib = False
self.data_file = ".coverage"
self.debug = []
self.disable_warnings = []
self.dynamic_context = None
self.note = None
self.parallel = False
self.plugins = []
self.relative_files = False
self.run_include = None
self.run_omit = None
self.source = None
self.source_pkgs = []
self.timid = False
self._crash = None
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
self.fail_under = 0.0
self.ignore_errors = False
self.report_include = None
self.report_omit = None
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.partial_list = DEFAULT_PARTIAL[:]
self.precision = 0
self.report_contexts = None
self.show_missing = False
self.skip_covered = False
self.skip_empty = False
self.sort = None
# Defaults for [html]
self.extra_css = None
self.html_dir = "htmlcov"
self.html_skip_covered = None
self.html_skip_empty = None
self.html_title = "Coverage report"
self.show_contexts = False
# Defaults for [xml]
self.xml_output = "coverage.xml"
self.xml_package_depth = 99
# Defaults for [json]
self.json_output = "coverage.json"
self.json_pretty_print = False
self.json_show_contexts = False
# Defaults for [paths]
self.paths = collections.OrderedDict()
# Options for plugins
self.plugin_options = {}
MUST_BE_LIST = [
"debug", "concurrency", "plugins",
"report_omit", "report_include",
"run_omit", "run_include",
]
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in kwargs.items():
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, str):
v = [v]
setattr(self, k, v)
@contract(filename=str)
def from_file(self, filename, warn, our_file):
"""Read configuration from a .rc file.
`filename` is a file name to read.
`our_file` is True if this config file is specifically for coverage,
False if we are examining another config file (tox.ini, setup.cfg)
for possible settings.
Returns True or False, whether the file could be read, and it had some
coverage.py settings in it.
"""
_, ext = os.path.splitext(filename)
if ext == '.toml':
cp = TomlConfigParser(our_file)
else:
cp = HandyConfigParser(our_file)
self.attempted_config_files.append(filename)
try:
files_read = cp.read(filename)
except (configparser.Error, TomlDecodeError) as err:
raise CoverageException(f"Couldn't read config file {filename}: {err}") from err
if not files_read:
return False
self.config_files_read.extend(map(os.path.abspath, files_read))
any_set = False
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
was_set = self._set_attr_from_config_option(cp, *option_spec)
if was_set:
any_set = True
except ValueError as err:
raise CoverageException(f"Couldn't read config file {filename}: {err}") from err
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in all_options.items():
real_section = cp.has_section(section)
if real_section:
for unknown in set(cp.options(section)) - options:
warn(
"Unrecognized option '[{}] {}=' in config file {}".format(
real_section, unknown, filename
)
)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
any_set = True
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
any_set = True
# Was this file used as a config file? If it's specifically our file,
# then it was used. If we're piggybacking on someone else's file,
# then it was only used if we found some settings in it.
if our_file:
used = True
else:
used = any_set
if used:
self.config_file = os.path.abspath(filename)
with open(filename, "rb") as f:
self._config_contents = f.read()
return used
def copy(self):
"""Return a copy of the configuration."""
return copy.deepcopy(self)
CONFIG_FILE_OPTIONS = [
# These are *args for _set_attr_from_config_option:
# (attr, where, type_="")
#
# attr is the attribute to set on the CoverageConfig object.
# where is the section:name to read from the configuration file.
# type_ is the optional type to apply, by using .getTYPE to read the
# configuration value from the file.
# [run]
('branch', 'run:branch', 'boolean'),
('command_line', 'run:command_line'),
('concurrency', 'run:concurrency', 'list'),
('context', 'run:context'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
('disable_warnings', 'run:disable_warnings', 'list'),
('dynamic_context', 'run:dynamic_context'),
('note', 'run:note'),
('parallel', 'run:parallel', 'boolean'),
('plugins', 'run:plugins', 'list'),
('relative_files', 'run:relative_files', 'boolean'),
('run_include', 'run:include', 'list'),
('run_omit', 'run:omit', 'list'),
('source', 'run:source', 'list'),
('source_pkgs', 'run:source_pkgs', 'list'),
('timid', 'run:timid', 'boolean'),
('_crash', 'run:_crash'),
# [report]
('exclude_list', 'report:exclude_lines', 'regexlist'),
('fail_under', 'report:fail_under', 'float'),
('ignore_errors', 'report:ignore_errors', 'boolean'),
('partial_always_list', 'report:partial_branches_always', 'regexlist'),
('partial_list', 'report:partial_branches', 'regexlist'),
('precision', 'report:precision', 'int'),
('report_contexts', 'report:contexts', 'list'),
('report_include', 'report:include', 'list'),
('report_omit', 'report:omit', 'list'),
('show_missing', 'report:show_missing', 'boolean'),
('skip_covered', 'report:skip_covered', 'boolean'),
('skip_empty', 'report:skip_empty', 'boolean'),
('sort', 'report:sort'),
# [html]
('extra_css', 'html:extra_css'),
('html_dir', 'html:directory'),
('html_skip_covered', 'html:skip_covered', 'boolean'),
('html_skip_empty', 'html:skip_empty', 'boolean'),
('html_title', 'html:title'),
('show_contexts', 'html:show_contexts', 'boolean'),
# [xml]
('xml_output', 'xml:output'),
('xml_package_depth', 'xml:package_depth', 'int'),
# [json]
('json_output', 'json:output'),
('json_pretty_print', 'json:pretty_print', 'boolean'),
('json_show_contexts', 'json:show_contexts', 'boolean'),
]
def _set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser.
Returns True if the attribute was set.
"""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get' + type_)
setattr(self, attr, method(section, option))
return True
return False
def get_plugin_options(self, plugin):
"""Get a dictionary of options for the plugin named `plugin`."""
return self.plugin_options.get(plugin, {})
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
`value` is the new value for the option.
"""
# Special-cased options.
if option_name == "paths":
self.paths = value
return
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
setattr(self, attr, value)
return
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
self.plugin_options.setdefault(plugin_name, {})[key] = value
return
# If we get here, we didn't find the option.
raise CoverageException(f"No such option: {option_name!r}")
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
"""
# Special-cased options.
if option_name == "paths":
return self.paths
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
return getattr(self, attr)
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
return self.plugin_options.get(plugin_name, {}).get(key)
# If we get here, we didn't find the option.
raise CoverageException(f"No such option: {option_name!r}")
def post_process_file(self, path):
"""Make final adjustments to a file path to make it usable."""
return os.path.expanduser(path)
def post_process(self):
"""Make final adjustments to settings to make them usable."""
self.data_file = self.post_process_file(self.data_file)
self.html_dir = self.post_process_file(self.html_dir)
self.xml_output = self.post_process_file(self.xml_output)
self.paths = collections.OrderedDict(
(k, [self.post_process_file(f) for f in v])
for k, v in self.paths.items()
)
def config_files_to_try(config_file):
"""What config files should we try to read?
Returns a list of tuples:
(filename, is_our_file, was_file_specified)
"""
# Some API users were specifying ".coveragerc" to mean the same as
# True, so make it so.
if config_file == ".coveragerc":
config_file = True
specified_file = (config_file is not True)
if not specified_file:
# No file was specified. Check COVERAGE_RCFILE.
config_file = os.environ.get('COVERAGE_RCFILE')
if config_file:
specified_file = True
if not specified_file:
# Still no file specified. Default to .coveragerc
config_file = ".coveragerc"
files_to_try = [
(config_file, True, specified_file),
("setup.cfg", False, False),
("tox.ini", False, False),
("pyproject.toml", False, False),
]
return files_to_try
def read_coverage_config(config_file, warn, **kwargs):
"""Read the coverage.py configuration.
Arguments:
config_file: a boolean or string, see the `Coverage` class for the
tricky details.
warn: a function to issue warnings.
all others: keyword arguments from the `Coverage` class, used for
setting values in the configuration.
Returns:
config:
config is a CoverageConfig object read from the appropriate
configuration file.
"""
# Build the configuration from a number of sources:
# 1) defaults:
config = CoverageConfig()
# 2) from a file:
if config_file:
files_to_try = config_files_to_try(config_file)
for fname, our_file, specified_file in files_to_try:
config_read = config.from_file(fname, warn, our_file=our_file)
if config_read:
break
if specified_file:
raise CoverageException(f"Couldn't read {fname!r} as a config file")
# $set_env.py: COVERAGE_DEBUG - Options for --debug.
# 3) from environment variables:
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
config.data_file = env_data_file
debugs = os.environ.get('COVERAGE_DEBUG')
if debugs:
config.debug.extend(d.strip() for d in debugs.split(","))
# 4) from constructor arguments:
config.from_args(**kwargs)
# Once all the config has been collected, there's a little post-processing
# to do.
config.post_process()
return config
| 19,641 | Python | 33.580986 | 92 | 0.586274 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/exceptions.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Exceptions coverage.py can raise."""
class BaseCoverageException(Exception):
"""The base of all Coverage exceptions."""
pass
class CoverageException(BaseCoverageException):
"""An exception raised by a coverage.py function."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
class StopEverything(BaseCoverageException):
"""An exception that means everything should stop.
The CoverageTest class converts these to SkipTest, so that when running
tests, raising this exception will automatically skip the test.
"""
pass
class CoverageWarning(Warning):
"""A warning from Coverage.py."""
pass
| 1,237 | Python | 21.925926 | 79 | 0.713015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/phystokens.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Better tokenizing for coverage.py."""
import ast
import keyword
import re
import token
import tokenize
from coverage import env
from coverage.misc import contract
def phys_tokens(toks):
"""Return all physical tokens, even line continuations.
tokenize.generate_tokens() doesn't return a token for the backslash that
continues lines. This wrapper provides those tokens so that we can
re-create a faithful representation of the original source.
Returns the same values as generate_tokens()
"""
last_line = None
last_lineno = -1
last_ttext = None
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
if last_line and last_line.endswith("\\\n"):
# We are at the beginning of a new line, and the last line
# ended with a backslash. We probably have to inject a
# backslash token into the stream. Unfortunately, there's more
# to figure out. This code::
#
# usage = """\
# HEY THERE
# """
#
# triggers this condition, but the token text is::
#
# '"""\\\nHEY THERE\n"""'
#
# so we need to figure out if the backslash is already in the
# string token or not.
inject_backslash = True
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
if inject_backslash:
# Figure out what column the backslash is in.
ccol = len(last_line.split("\n")[-2]) - 1
# Yield the token, with a fake token type.
yield (
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
last_line
)
last_line = ltext
if ttype not in (tokenize.NEWLINE, tokenize.NL):
last_ttext = ttext
yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
last_lineno = elineno
class MatchCaseFinder(ast.NodeVisitor):
"""Helper for finding match/case lines."""
def __init__(self, source):
# This will be the set of line numbers that start match or case statements.
self.match_case_lines = set()
self.visit(ast.parse(source))
def visit_Match(self, node):
"""Invoked by ast.NodeVisitor.visit"""
self.match_case_lines.add(node.lineno)
for case in node.cases:
self.match_case_lines.add(case.pattern.lineno)
self.generic_visit(node)
@contract(source='unicode')
def source_token_lines(source):
"""Generate a series of lines, one for each line in `source`.
Each line is a list of pairs, each pair is a token::
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
Each pair has a token class, and the token text.
If you concatenate all the token texts, and then join them with newlines,
you should have your original `source` back, with two differences:
trailing whitespace is not preserved, and a final line with no newline
is indistinguishable from a final line with a newline.
"""
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
line = []
col = 0
source = source.expandtabs(8).replace('\r\n', '\n')
tokgen = generate_tokens(source)
if env.PYBEHAVIOR.soft_keywords:
match_case_lines = MatchCaseFinder(source).match_case_lines
for ttype, ttext, (sline, scol), (_, ecol), _ in phys_tokens(tokgen):
mark_start = True
for part in re.split('(\n)', ttext):
if part == '\n':
yield line
line = []
col = 0
mark_end = False
elif part == '':
mark_end = False
elif ttype in ws_tokens:
mark_end = False
else:
if mark_start and scol > col:
line.append(("ws", " " * (scol - col)))
mark_start = False
tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
if ttype == token.NAME:
if keyword.iskeyword(ttext):
# Hard keywords are always keywords.
tok_class = "key"
elif env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
# Soft keywords appear at the start of the line, on lines that start
# match or case statements.
if len(line) == 0:
is_start_of_line = True
elif (len(line) == 1) and line[0][0] == "ws":
is_start_of_line = True
else:
is_start_of_line = False
if is_start_of_line and sline in match_case_lines:
tok_class = "key"
line.append((tok_class, part))
mark_end = True
scol = 0
if mark_end:
col = ecol
if line:
yield line
class CachedTokenizer:
"""A one-element cache around tokenize.generate_tokens.
When reporting, coverage.py tokenizes files twice, once to find the
structure of the file, and once to syntax-color it. Tokenizing is
expensive, and easily cached.
This is a one-element cache so that our twice-in-a-row tokenizing doesn't
actually tokenize twice.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
@contract(text='unicode')
def generate_tokens(self, text):
"""A stand-in for `tokenize.generate_tokens`."""
if text != self.last_text:
self.last_text = text
readline = iter(text.splitlines(True)).__next__
self.last_tokens = list(tokenize.generate_tokens(readline))
return self.last_tokens
# Create our generate_tokens cache as a callable replacement function.
generate_tokens = CachedTokenizer().generate_tokens
COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
@contract(source='bytes')
def source_encoding(source):
"""Determine the encoding for `source`, according to PEP 263.
`source` is a byte string: the text of the program.
Returns a string, the name of the encoding.
"""
readline = iter(source.splitlines(True)).__next__
return tokenize.detect_encoding(readline)[0]
@contract(source='unicode')
def compile_unicode(source, filename, mode):
"""Just like the `compile` builtin, but works on any Unicode string.
Python 2's compile() builtin has a stupid restriction: if the source string
is Unicode, then it may not have a encoding declaration in it. Why not?
Who knows! It also decodes to utf-8, and then tries to interpret those
utf-8 bytes according to the encoding declaration. Why? Who knows!
This function neuters the coding declaration, and compiles it.
"""
source = neuter_encoding_declaration(source)
code = compile(source, filename, mode)
return code
@contract(source='unicode', returns='unicode')
def neuter_encoding_declaration(source):
"""Return `source`, with any encoding declaration neutered."""
if COOKIE_RE.search(source):
source_lines = source.splitlines(True)
for lineno in range(min(2, len(source_lines))):
source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno])
source = "".join(source_lines)
return source
| 8,236 | Python | 35.772321 | 97 | 0.568237 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/report.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Reporter foundation for coverage.py."""
import sys
from coverage.exceptions import CoverageException, NotPython
from coverage.files import prep_patterns, FnmatchMatcher
from coverage.misc import ensure_dir_for_file, file_be_gone
def render_report(output_path, reporter, morfs, msgfn):
"""Run a one-file report generator, managing the output file.
This function ensures the output file is ready to be written to. Then writes
the report to it. Then closes the file and cleans up.
"""
file_to_close = None
delete_file = False
if output_path == "-":
outfile = sys.stdout
else:
# Ensure that the output directory is created; done here
# because this report pre-opens the output file.
# HTMLReport does this using the Report plumbing because
# its task is more complex, being multiple files.
ensure_dir_for_file(output_path)
outfile = open(output_path, "w", encoding="utf-8")
file_to_close = outfile
try:
return reporter.report(morfs, outfile=outfile)
except CoverageException:
delete_file = True
raise
finally:
if file_to_close:
file_to_close.close()
if delete_file:
file_be_gone(output_path) # pragma: part covered (doesn't return)
else:
msgfn(f"Wrote {reporter.report_type} to {output_path}")
def get_analysis_to_report(coverage, morfs):
"""Get the files to report on.
For each morf in `morfs`, if it should be reported on (based on the omit
and include configuration options), yield a pair, the `FileReporter` and
`Analysis` for the morf.
"""
file_reporters = coverage._get_file_reporters(morfs)
config = coverage.config
if config.report_include:
matcher = FnmatchMatcher(prep_patterns(config.report_include), "report_include")
file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
if config.report_omit:
matcher = FnmatchMatcher(prep_patterns(config.report_omit), "report_omit")
file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
if not file_reporters:
raise CoverageException("No data to report.")
for fr in sorted(file_reporters):
try:
analysis = coverage._analyze(fr)
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
# NotPython is only raised by PythonFileReporter, which has a
# should_be_python() method.
if fr.should_be_python():
if config.ignore_errors:
msg = f"Couldn't parse Python file '{fr.filename}'"
coverage._warn(msg, slug="couldnt-parse")
else:
raise
except Exception as exc:
if config.ignore_errors:
msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
coverage._warn(msg, slug="couldnt-parse")
else:
raise
else:
yield (fr, analysis)
| 3,327 | Python | 35.173913 | 91 | 0.62609 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/parser.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Code parsing for coverage.py."""
import ast
import collections
import os
import re
import token
import tokenize
from coverage import env
from coverage.bytecode import code_objects
from coverage.debug import short_stack
from coverage.exceptions import NoSource, NotPython, StopEverything
from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser:
"""Parse code to find executable lines, excluded lines, etc.
This information is all based on static analysis: no code execution is
involved.
"""
@contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which the text will be read. Excluded lines are those that match
`exclude`, a regex.
"""
assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
except OSError as err:
raise NoSource(f"No source for code: '{self.filename}': {err}") from err
self.exclude = exclude
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
self.statements = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
self.excluded = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
self.raw_statements = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
# The line numbers of class and function definitions.
self.raw_classdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
self._multiline = {}
# Lazily-created arc data, and missing arc descriptions.
self._all_arcs = None
self._missing_arc_fragments = None
def lines_matching(self, *regexes):
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
of the regexes in `regexes`. The entire line needn't match, just a
part of it.
"""
combined = join_regex(regexes)
regex_c = re.compile(combined)
matches = set()
for i, ltext in enumerate(self.lines, start=1):
if regex_c.search(ltext):
matches.add(i)
return matches
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: debugging
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
self.raw_excluded.add(elineno)
exclude_indent = indent
excluding = True
excluding_decorators = False
elif ttext == '@' and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
if excluding_decorators:
self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.raw_excluded.add(elineno)
first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
byte_parser = ByteParser(self.text, filename=self.filename)
self.raw_statements.update(byte_parser._find_statements())
# The first line of modules can lie and say 1 always, even if the first
# line of code is later. If so, map 1 to the actual first line of the
# module.
if env.PYBEHAVIOR.module_firstline_1 and self._multiline:
self._multiline[1] = min(self.raw_statements)
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
if line < 0:
line = -self._multiline.get(-line, -line)
else:
line = self._multiline.get(line, line)
return line
def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Returns a set of the first lines.
"""
return {self.first_line(l) for l in lines}
def translate_lines(self, lines):
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
line of multi-line statements.
"""
try:
self._raw_parse()
except (tokenize.TokenError, IndentationError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
f"Couldn't parse '{self.filename}' as Python source: " +
f"{err.args[0]!r} at line {lineno}"
) from err
self.excluded = self.first_lines(self.raw_excluded)
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
to the first line of multi-line statements.
"""
if self._all_arcs is None:
self._analyze_ast()
return self._all_arcs
def _analyze_ast(self):
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
"""
aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
aaa.analyze()
self._all_arcs = set()
for l1, l2 in aaa.arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
self._all_arcs.add((fl1, fl2))
self._missing_arc_fragments = aaa.missing_arc_fragments
def exit_counts(self):
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
def missing_arc_description(self, start, end, executed_arcs=None):
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
actual_start = start
if (
executed_arcs and
end < 0 and end == -start and
(end, start) not in executed_arcs and
(end, start) in self._missing_arc_fragments
):
# It's a one-line callable, and we never even started it,
# and we have a message about not starting it.
start, end = end, start
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
for smsg, emsg in fragment_pairs:
if emsg is None:
if end < 0:
# Hmm, maybe we have a one-line callable, let's check.
if (-end, end) in self._missing_arc_fragments:
return self.missing_arc_description(-end, end)
emsg = "didn't jump to the function exit"
else:
emsg = "didn't jump to line {lineno}"
emsg = emsg.format(lineno=end)
msg = f"line {actual_start} {emsg}"
if smsg is not None:
msg += f", because {smsg.format(lineno=actual_start)}"
msgs.append(msg)
return " or ".join(msgs)
class ByteParser:
"""Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
) from synerr
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
for attr in ['co_lnotab', 'co_firstlineno']:
if not hasattr(self.code, attr):
raise StopEverything( # pragma: only jython
"This implementation of Python doesn't support code analysis.\n" +
"Run coverage.py under another Python for this command."
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
return (ByteParser(self.text, code=c) for c in code_objects(self.code))
def _line_numbers(self):
"""Yield the line numbers possible in this code object.
Uses co_lnotab described in Python/compile.c to find the
line numbers. Produces a sequence: l0, l1, ...
"""
if hasattr(self.code, "co_lines"):
for _, _, line in self.code.co_lines():
if line is not None:
yield line
else:
# Adapted from dis.py in the standard library.
byte_increments = self.code.co_lnotab[0::2]
line_increments = self.code.co_lnotab[1::2]
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
yield line_num
last_line_num = line_num
byte_num += byte_incr
if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80:
line_incr -= 0x100
line_num += line_incr
if line_num != last_line_num:
yield line_num
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
yield from bp._line_numbers()
#
# AST analysis
#
class BlockBase:
"""
Blocks need to handle various exiting statements in their own ways.
All of these methods take a list of exits, and a callable `add_arc`
function that they can use to add arcs if needed. They return True if the
exits are handled, or False if the search should continue up the block
stack.
"""
# pylint: disable=unused-argument
def process_break_exits(self, exits, add_arc):
"""Process break exits."""
# Because break can only appear in loops, and most subclasses
# implement process_break_exits, this function is never reached.
raise AssertionError
def process_continue_exits(self, exits, add_arc):
"""Process continue exits."""
# Because continue can only appear in loops, and most subclasses
# implement process_continue_exits, this function is never reached.
raise AssertionError
def process_raise_exits(self, exits, add_arc):
"""Process raise exits."""
return False
def process_return_exits(self, exits, add_arc):
"""Process return exits."""
return False
class LoopBlock(BlockBase):
"""A block on the block stack representing a `for` or `while` loop."""
@contract(start=int)
def __init__(self, start):
# The line number where the loop starts.
self.start = start
# A set of ArcStarts, the arcs from break statements exiting this loop.
self.break_exits = set()
def process_break_exits(self, exits, add_arc):
self.break_exits.update(exits)
return True
def process_continue_exits(self, exits, add_arc):
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
return True
class FunctionBlock(BlockBase):
"""A block on the block stack representing a function definition."""
@contract(start=int, name=str)
def __init__(self, start, name):
# The line number where the function starts.
self.start = start
# The name of the function.
self.name = name
def process_raise_exits(self, exits, add_arc):
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
f"didn't except from function {self.name!r}",
)
return True
def process_return_exits(self, exits, add_arc):
for xit in exits:
add_arc(
xit.lineno, -self.start, xit.cause,
f"didn't return from function {self.name!r}",
)
return True
class TryBlock(BlockBase):
"""A block on the block stack representing a `try` block."""
@contract(handler_start='int|None', final_start='int|None')
def __init__(self, handler_start, final_start):
# The line number of the first "except" handler, if any.
self.handler_start = handler_start
# The line number of the "finally:" clause, if any.
self.final_start = final_start
# The ArcStarts for breaks/continues/returns/raises inside the "try:"
# that need to route through the "finally:" clause.
self.break_from = set()
self.continue_from = set()
self.raise_from = set()
self.return_from = set()
def process_break_exits(self, exits, add_arc):
if self.final_start is not None:
self.break_from.update(exits)
return True
return False
def process_continue_exits(self, exits, add_arc):
if self.final_start is not None:
self.continue_from.update(exits)
return True
return False
def process_raise_exits(self, exits, add_arc):
if self.handler_start is not None:
for xit in exits:
add_arc(xit.lineno, self.handler_start, xit.cause)
else:
assert self.final_start is not None
self.raise_from.update(exits)
return True
def process_return_exits(self, exits, add_arc):
if self.final_start is not None:
self.return_from.update(exits)
return True
return False
class WithBlock(BlockBase):
"""A block on the block stack representing a `with` block."""
@contract(start=int)
def __init__(self, start):
# We only ever use this block if it is needed, so that we don't have to
# check this setting in all the methods.
assert env.PYBEHAVIOR.exit_through_with
# The line number of the with statement.
self.start = start
# The ArcStarts for breaks/continues/returns/raises inside the "with:"
# that need to go through the with-statement while exiting.
self.break_from = set()
self.continue_from = set()
self.return_from = set()
def _process_exits(self, exits, add_arc, from_set=None):
"""Helper to process the four kinds of exits."""
for xit in exits:
add_arc(xit.lineno, self.start, xit.cause)
if from_set is not None:
from_set.update(exits)
return True
def process_break_exits(self, exits, add_arc):
return self._process_exits(exits, add_arc, self.break_from)
def process_continue_exits(self, exits, add_arc):
return self._process_exits(exits, add_arc, self.continue_from)
def process_raise_exits(self, exits, add_arc):
return self._process_exits(exits, add_arc)
def process_return_exits(self, exits, add_arc):
return self._process_exits(exits, add_arc, self.return_from)
class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
"""The information needed to start an arc.
`lineno` is the line number the arc starts from.
`cause` is an English text fragment used as the `startmsg` for
AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
arc wasn't executed, so should fit well into a sentence of the form,
"Line 17 didn't run because {cause}." The fragment can include "{lineno}"
to have `lineno` interpolated into it.
"""
def __new__(cls, lineno, cause=None):
return super().__new__(cls, lineno, cause)
# Define contract words that PyContract doesn't have.
# ArcStarts is for a list or set of ArcStart's.
new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
class NodeList:
"""A synthetic fictitious node, containing a sequence of nodes.
This is used when collapsing optimized if-statements, to represent the
unconditional execution of one of the clauses.
"""
def __init__(self, body):
self.body = body
self.lineno = body[0].lineno
# TODO: some add_arcs methods here don't add arcs, they return them. Rename them.
# TODO: the cause messages have too many commas.
# TODO: Shouldn't the cause messages join with "and" instead of "or"?
def ast_parse(text):
"""How we create an AST parse."""
return ast.parse(neuter_encoding_declaration(text))
class AstArcAnalyzer:
"""Analyze source text with an AST to find executable code paths."""
@contract(text='unicode', statements=set)
def __init__(self, text, statements, multiline):
self.root_node = ast_parse(text)
# TODO: I think this is happening in too many places.
self.statements = {multiline.get(l, l) for l in statements}
self.multiline = multiline
# Turn on AST dumps with an environment variable.
# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
dump_ast = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
if dump_ast: # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print(f"Statements: {self.statements}")
print(f"Multiline map: {self.multiline}")
ast_dump(self.root_node)
self.arcs = set()
# A map from arc pairs to a list of pairs of sentence fragments:
# { (start, end): [(startmsg, endmsg), ...], }
#
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments = collections.defaultdict(list)
self.block_stack = []
# $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code.
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
def analyze(self):
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
pairs.
"""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
@contract(start=int, end=int)
def add_arc(self, start, end, smsg=None, emsg=None):
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
print(f"\nAdding arc: ({start}, {end}): {smsg!r}, {emsg!r}")
print(short_stack(limit=6))
self.arcs.add((start, end))
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
def nearest_blocks(self):
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
@contract(returns=int)
def line_for_node(self, node):
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_line__" + node_name, None)
if handler is not None:
return handler(node)
else:
return node.lineno
def _line_decorated(self, node):
"""Compute first line number for things that can be decorated (classes and functions)."""
lineno = node.lineno
if env.PYBEHAVIOR.trace_decorated_def:
if node.decorator_list:
lineno = node.decorator_list[0].lineno
return lineno
def _line__Assign(self, node):
return self.line_for_node(node.value)
_line__ClassDef = _line_decorated
def _line__Dict(self, node):
if node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{'a':1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
_line__FunctionDef = _line_decorated
_line__AsyncFunctionDef = _line_decorated
def _line__List(self, node):
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node):
if env.PYBEHAVIOR.module_firstline_1:
return 1
elif node.body:
return self.line_for_node(node.body[0])
else:
# Empty modules have no line number, they always start at 1.
return 1
# The node types that just flow to the next node with no complications.
OK_TO_DEFAULT = {
"AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global",
"Import", "ImportFrom", "Nonlocal", "Pass",
}
@contract(returns='ArcStarts')
def add_arcs(self, node):
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next. Because a
node represents an entire sub-tree (including its children), the exits
from a node can be arbitrarily complex::
if something(1):
if other(2):
doit(3)
else:
doit(5)
There are two exits from line 1: they start at line 3 and line 5.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_handle__" + node_name, None)
if handler is not None:
return handler(node)
else:
# No handler: either it's something that's ok to default (a simple
# statement), or it's something we overlooked.
if env.TESTING:
if node_name not in self.OK_TO_DEFAULT:
raise Exception(f"*** Unhandled: {node}") # pragma: only failure
# Default for simple statements: one exit from this node.
return {ArcStart(self.line_for_node(node))}
@one_of("from_start, prev_starts")
@contract(returns='ArcStarts')
def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
be the previous line in flow before this body. `prev_starts` is a set
of ArcStarts that can be the previous line. Only one of them should be
given.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
prev_starts = {from_start}
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
body_node = self.find_non_missing_node(body_node)
if body_node is None:
continue
lineno = self.line_for_node(body_node)
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
def find_non_missing_node(self, node):
"""Search `node` looking for a child that has not been optimized away.
This might return the node you started with, or it will work recursively
to find a child node in self.statements.
Returns a node, or None if none of the node remains.
"""
# This repeats work just done in add_body_arcs, but this duplication
# means we can avoid a function call in the 99.9999% case of not
# optimizing away statements.
lineno = self.line_for_node(node)
first_line = self.multiline.get(lineno, lineno)
if first_line in self.statements:
return node
missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
if missing_fn:
node = missing_fn(node)
else:
node = None
return node
# Missing nodes: _missing__*
#
# Entire statements can be optimized away by Python. They will appear in
# the AST, but not the bytecode. These functions are called (by
# find_non_missing_node) to find a node to use instead of the missing
# node. They can return None if the node should truly be gone.
def _missing__If(self, node):
# If the if-node is missing, then one of its children might still be
# here, but not both. So return the first of the two that isn't missing.
# Use a NodeList to hold the clauses as a single node.
non_missing = self.find_non_missing_node(NodeList(node.body))
if non_missing:
return non_missing
if node.orelse:
return self.find_non_missing_node(NodeList(node.orelse))
return None
def _missing__NodeList(self, node):
# A NodeList might be a mixture of missing and present nodes. Find the
# ones that are present.
non_missing_children = []
for child in node.body:
child = self.find_non_missing_node(child)
if child is not None:
non_missing_children.append(child)
# Return the simplest representation of the present children.
if not non_missing_children:
return None
if len(non_missing_children) == 1:
return non_missing_children[0]
return NodeList(non_missing_children)
def _missing__While(self, node):
body_nodes = self.find_non_missing_node(NodeList(node.body))
if not body_nodes:
return None
# Make a synthetic While-true node.
new_while = ast.While()
new_while.lineno = body_nodes.lineno
new_while.test = ast.Name()
new_while.test.lineno = body_nodes.lineno
new_while.test.id = "True"
new_while.body = body_nodes.body
new_while.orelse = None
return new_while
def is_constant_expr(self, node):
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["Constant", "NameConstant", "Num"]:
return "Num"
elif node_name == "Name":
if node.id in ["True", "False", "None", "__debug__"]:
return "Name"
return None
# In the fullness of time, these might be good tests to write:
# while EXPR:
# while False:
# listcomps hidden deep in other expressions
# listcomps hidden in lists: x = [[i for i in range(10)]]
# nested function definitions
# Exit processing: process_*_exits
#
# These functions process the four kinds of jump exits: break, continue,
# raise, and return. To figure out where an exit goes, we have to look at
# the block stack context. For example, a break will jump to the nearest
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
@contract(exits='ArcStarts')
def process_break_exits(self, exits):
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_break_exits(exits, self.add_arc):
break
@contract(exits='ArcStarts')
def process_continue_exits(self, exits):
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_continue_exits(exits, self.add_arc):
break
@contract(exits='ArcStarts')
def process_raise_exits(self, exits):
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if block.process_raise_exits(exits, self.add_arc):
break
@contract(exits='ArcStarts')
def process_return_exits(self, exits):
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_return_exits(exits, self.add_arc):
break
# Handlers: _handle__*
#
# Each handler deals with a specific AST node type, dispatched from
# add_arcs. Handlers return the set of exits from that node, and can
# also call self.add_arc to record arcs they find. These functions mirror
# the Python semantics of each syntactic construct. See the docstring
# for add_arcs to understand the concept of exits from a node.
#
# Every node type that represents a statement should have a handler, or it
# should be listed in OK_TO_DEFAULT.
@contract(returns='ArcStarts')
def _handle__Break(self, node):
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits([break_start])
return set()
@contract(returns='ArcStarts')
def _handle_decorated(self, node):
"""Add arcs for things that can be decorated (classes and functions)."""
main_line = last = node.lineno
if node.decorator_list:
if env.PYBEHAVIOR.trace_decorated_def:
last = None
for dec_node in node.decorator_list:
dec_start = self.line_for_node(dec_node)
if last is not None and dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
if env.PYBEHAVIOR.trace_decorated_def:
self.add_arc(last, main_line)
last = main_line
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
if node.body:
body_start = self.line_for_node(node.body[0])
body_start = self.multiline.get(body_start, body_start)
for lineno in range(last+1, body_start):
if lineno in self.statements:
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
return {ArcStart(last)}
_handle__ClassDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__Continue(self, node):
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits([continue_start])
return set()
@contract(returns='ArcStarts')
def _handle__For(self, node):
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.add_body_arcs(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__If(self, node):
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
@contract(returns='ArcStarts')
def _handle__Match(self, node):
start = self.line_for_node(node)
last_start = start
exits = set()
had_wildcard = False
for case in node.cases:
case_start = self.line_for_node(case.pattern)
if isinstance(case.pattern, ast.MatchAs):
had_wildcard = True
self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
from_start = ArcStart(case_start, cause="the pattern on line {lineno} never matched")
exits |= self.add_body_arcs(case.body, from_start=from_start)
last_start = case_start
if not had_wildcard:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__NodeList(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
@contract(returns='ArcStarts')
def _handle__Raise(self, node):
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits([raise_start])
# `raise` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Return(self, node):
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits([return_start])
# `return` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Try(self, node):
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
# This is true by virtue of Python syntax: have to have either except
# or finally, or both.
assert handler_start is not None or final_start is not None
try_block = TryBlock(handler_start, final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
if node.handlers:
# If there are `except` clauses, then raises in the try body
# will already jump to them. Start this set over for raises in
# `except` and `else`.
try_block.raise_from = set()
else:
self.block_stack.pop()
handler_exits = set()
if node.handlers:
last_handler_start = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
self.add_arc(last_handler_start, handler_start)
last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.add_body_arcs(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = ( # You can get to the `finally` clause from:
exits | # the exits of the body or `else` clause,
try_block.break_from | # or a `break`,
try_block.continue_from | # or a `continue`,
try_block.raise_from | # or a `raise`,
try_block.return_from # or a `return`.
)
final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
if try_block.break_from:
if env.PYBEHAVIOR.finally_jumps_back:
for break_line in try_block.break_from:
lineno = break_line.lineno
cause = break_line.cause.format(lineno=lineno)
for final_exit in final_exits:
self.add_arc(final_exit.lineno, lineno, cause)
breaks = try_block.break_from
else:
breaks = self._combine_finally_starts(try_block.break_from, final_exits)
self.process_break_exits(breaks)
if try_block.continue_from:
if env.PYBEHAVIOR.finally_jumps_back:
for continue_line in try_block.continue_from:
lineno = continue_line.lineno
cause = continue_line.cause.format(lineno=lineno)
for final_exit in final_exits:
self.add_arc(final_exit.lineno, lineno, cause)
continues = try_block.continue_from
else:
continues = self._combine_finally_starts(try_block.continue_from, final_exits)
self.process_continue_exits(continues)
if try_block.raise_from:
self.process_raise_exits(
self._combine_finally_starts(try_block.raise_from, final_exits)
)
if try_block.return_from:
if env.PYBEHAVIOR.finally_jumps_back:
for return_line in try_block.return_from:
lineno = return_line.lineno
cause = return_line.cause.format(lineno=lineno)
for final_exit in final_exits:
self.add_arc(final_exit.lineno, lineno, cause)
returns = try_block.return_from
else:
returns = self._combine_finally_starts(try_block.return_from, final_exits)
self.process_return_exits(returns)
if exits:
# The finally clause's exits are only exits for the try block
# as a whole if the try block had some exits to begin with.
exits = final_exits
return exits
@contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
def _combine_finally_starts(self, starts, exits):
"""Helper for building the cause of `finally` branches.
"finally" clauses might not execute their exits, and the causes could
be due to a failure to execute any of the exits in the try block. So
we use the causes from `starts` as the causes for `exits`.
"""
causes = []
for start in sorted(starts):
if start.cause is not None:
causes.append(start.cause.format(lineno=start.lineno))
cause = " or ".join(causes)
exits = {ArcStart(xit.lineno, cause) for xit in exits}
return exits
@contract(returns='ArcStarts')
def _handle__While(self, node):
start = to_top = self.line_for_node(node.test)
constant_test = self.is_constant_expr(node.test)
top_is_body0 = False
if constant_test:
top_is_body0 = True
if env.PYBEHAVIOR.keep_constant_test:
top_is_body0 = False
if top_is_body0:
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=to_top))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__With(self, node):
start = self.line_for_node(node)
if env.PYBEHAVIOR.exit_through_with:
self.block_stack.append(WithBlock(start=start))
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
if env.PYBEHAVIOR.exit_through_with:
with_block = self.block_stack.pop()
with_exit = {ArcStart(start)}
if exits:
for xit in exits:
self.add_arc(xit.lineno, start)
exits = with_exit
if with_block.break_from:
self.process_break_exits(
self._combine_finally_starts(with_block.break_from, with_exit)
)
if with_block.continue_from:
self.process_continue_exits(
self._combine_finally_starts(with_block.continue_from, with_exit)
)
if with_block.return_from:
self.process_return_exits(
self._combine_finally_starts(with_block.return_from, with_exit)
)
return exits
_handle__AsyncWith = _handle__With
# Code object dispatchers: _code_object__*
#
# These methods are used by analyze() as the start of the analysis.
# There is one for each construct with a code object.
def _code_object__Module(self, node):
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
else:
# Empty module.
self.add_arc(-start, start)
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node):
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
for xit in exits:
self.add_arc(
xit.lineno, -start, xit.cause,
f"didn't exit the body of class {node.name!r}",
)
def _make_expression_code_method(noun): # pylint: disable=no-self-argument
"""A function to make methods for expression-based callable _code_object__ methods."""
def _code_object__expression_callable(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}")
self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}")
return _code_object__expression_callable
_code_object__Lambda = _make_expression_code_method("lambda")
_code_object__GeneratorExp = _make_expression_code_method("generator expression")
_code_object__DictComp = _make_expression_code_method("dictionary comprehension")
_code_object__SetComp = _make_expression_code_method("set comprehension")
_code_object__ListComp = _make_expression_code_method("list comprehension")
# Code only used when dumping the AST for debugging.
SKIP_DUMP_FIELDS = ["ctx"]
def _is_simple_value(value):
"""Is `value` simple enough to be displayed on a single line?"""
return (
value in [None, [], (), {}, set()] or
isinstance(value, (bytes, int, float, str))
)
def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
"""
indent = " " * depth
lineno = getattr(node, "lineno", None)
if lineno is not None:
linemark = f" @ {node.lineno},{node.col_offset}"
if hasattr(node, "end_lineno"):
linemark += ":"
if node.end_lineno != node.lineno:
linemark += f"{node.end_lineno},"
linemark += f"{node.end_col_offset}"
else:
linemark = ""
head = f"{indent}<{node.__class__.__name__}{linemark}"
named_fields = [
(name, value)
for name, value in ast.iter_fields(node)
if name not in SKIP_DUMP_FIELDS
]
if not named_fields:
print(f"{head}>")
elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
field_name, value = named_fields[0]
print(f"{head} {field_name}: {value!r}>")
else:
print(head)
if 0:
print("{}# mro: {}".format(
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
for field_name, value in named_fields:
prefix = f"{next_indent}{field_name}:"
if _is_simple_value(value):
print(f"{prefix} {value!r}")
elif isinstance(value, list):
print(f"{prefix} [")
for n in value:
if _is_simple_value(n):
print(f"{next_indent} {n!r}")
else:
ast_dump(n, depth + 8, print=print)
print(f"{next_indent}]")
else:
print(prefix)
ast_dump(value, depth + 8, print=print)
print(f"{indent}>")
| 53,040 | Python | 37.857875 | 98 | 0.579581 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/summary.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Summary reporting"""
import sys
from coverage.exceptions import CoverageException
from coverage.misc import human_sorted_items
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
class SummaryReporter:
"""A reporter for writing the summary report."""
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.branches = coverage.get_data().has_arcs()
self.outfile = None
self.fr_analysis = []
self.skipped_count = 0
self.empty_count = 0
self.total = Numbers(precision=self.config.precision)
self.fmt_err = "%s %s: %s"
def writeout(self, line):
"""Write a line to the output, adding a newline."""
self.outfile.write(line.rstrip())
self.outfile.write("\n")
def report(self, morfs, outfile=None):
"""Writes a report summarizing coverage statistics per module.
`outfile` is a file object to write the summary to. It must be opened
for native strings (bytes on Python 2, Unicode on Python 3).
"""
self.outfile = outfile or sys.stdout
self.coverage.get_data().set_query_contexts(self.config.report_contexts)
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.report_one_file(fr, analysis)
# Prepare the formatting strings, header, and column sorting.
max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5])
fmt_name = "%%- %ds " % max_name
fmt_skip_covered = "\n%s file%s skipped due to complete coverage."
fmt_skip_empty = "\n%s empty file%s skipped."
header = (fmt_name % "Name") + " Stmts Miss"
fmt_coverage = fmt_name + "%6d %6d"
if self.branches:
header += " Branch BrPart"
fmt_coverage += " %6d %6d"
width100 = Numbers(precision=self.config.precision).pc_str_width()
header += "%*s" % (width100+4, "Cover")
fmt_coverage += "%%%ds%%%%" % (width100+3,)
if self.config.show_missing:
header += " Missing"
fmt_coverage += " %s"
rule = "-" * len(header)
column_order = dict(name=0, stmts=1, miss=2, cover=-1)
if self.branches:
column_order.update(dict(branch=3, brpart=4))
# Write the header
self.writeout(header)
self.writeout(rule)
# `lines` is a list of pairs, (line text, line values). The line text
# is a string that will be printed, and line values is a tuple of
# sortable values.
lines = []
for (fr, analysis) in self.fr_analysis:
nums = analysis.numbers
args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
if self.branches:
args += (nums.n_branches, nums.n_partial_branches)
args += (nums.pc_covered_str,)
if self.config.show_missing:
args += (analysis.missing_formatted(branches=True),)
text = fmt_coverage % args
# Add numeric percent coverage so that sorting makes sense.
args += (nums.pc_covered,)
lines.append((text, args))
# Sort the lines and write them out.
sort_option = (self.config.sort or "name").lower()
reverse = False
if sort_option[0] == '-':
reverse = True
sort_option = sort_option[1:]
elif sort_option[0] == '+':
sort_option = sort_option[1:]
if sort_option == "name":
lines = human_sorted_items(lines, reverse=reverse)
else:
position = column_order.get(sort_option)
if position is None:
raise CoverageException(f"Invalid sorting option: {self.config.sort!r}")
lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse)
for line in lines:
self.writeout(line[0])
# Write a TOTAL line if we had at least one file.
if self.total.n_files > 0:
self.writeout(rule)
args = ("TOTAL", self.total.n_statements, self.total.n_missing)
if self.branches:
args += (self.total.n_branches, self.total.n_partial_branches)
args += (self.total.pc_covered_str,)
if self.config.show_missing:
args += ("",)
self.writeout(fmt_coverage % args)
# Write other final lines.
if not self.total.n_files and not self.skipped_count:
raise CoverageException("No data to report.")
if self.config.skip_covered and self.skipped_count:
self.writeout(
fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '')
)
if self.config.skip_empty and self.empty_count:
self.writeout(
fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '')
)
return self.total.n_statements and self.total.pc_covered
def report_one_file(self, fr, analysis):
"""Report on just one file, the callback from report()."""
nums = analysis.numbers
self.total += nums
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if self.config.skip_covered and no_missing_lines and no_missing_branches:
# Don't report on 100% files.
self.skipped_count += 1
elif self.config.skip_empty and nums.n_statements == 0:
# Don't report on empty files.
self.empty_count += 1
else:
self.fr_analysis.append((fr, analysis))
| 5,905 | Python | 37.601307 | 98 | 0.583065 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/debug.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Control of and utilities for debugging."""
import contextlib
import functools
import inspect
import io
import itertools
import os
import pprint
import reprlib
import sys
import _thread
from coverage.misc import isolate_module
os = isolate_module(os)
# When debugging, it can be helpful to force some options, especially when
# debugging the configuration mechanisms you usually use to control debugging!
# This is a list of forced debugging options.
FORCED_DEBUG = []
FORCED_DEBUG_FILE = None
class DebugControl:
"""Control and output for debugging."""
show_repr_attr = False # For SimpleReprMixin
def __init__(self, options, output):
"""Configure the options and output file for debugging."""
self.options = list(options) + FORCED_DEBUG
self.suppress_callers = False
filters = []
if self.should('pid'):
filters.append(add_pid_and_tid)
self.output = DebugOutputFile.get_one(
output,
show_process=self.should('process'),
filters=filters,
)
self.raw_output = self.output.outfile
def __repr__(self):
return f"<DebugControl options={self.options!r} raw_output={self.raw_output!r}>"
def should(self, option):
"""Decide whether to output debug information in category `option`."""
if option == "callers" and self.suppress_callers:
return False
return (option in self.options)
@contextlib.contextmanager
def without_callers(self):
"""A context manager to prevent call stacks from being logged."""
old = self.suppress_callers
self.suppress_callers = True
try:
yield
finally:
self.suppress_callers = old
def write(self, msg):
"""Write a line of debug output.
`msg` is the line to write. A newline will be appended.
"""
self.output.write(msg+"\n")
if self.should('self'):
caller_self = inspect.stack()[1][0].f_locals.get('self')
if caller_self is not None:
self.output.write(f"self: {caller_self!r}\n")
if self.should('callers'):
dump_stack_frames(out=self.output, skip=1)
self.output.flush()
class DebugControlString(DebugControl):
"""A `DebugControl` that writes to a StringIO, for testing."""
def __init__(self, options):
super().__init__(options, io.StringIO())
def get_output(self):
"""Get the output text from the `DebugControl`."""
return self.raw_output.getvalue()
class NoDebugging:
"""A replacement for DebugControl that will never try to do anything."""
def should(self, option): # pylint: disable=unused-argument
"""Should we write debug messages? Never."""
return False
def info_header(label):
"""Make a nice header string."""
return "--{:-<60s}".format(" "+label+" ")
def info_formatter(info):
"""Produce a sequence of formatted lines from info.
`info` is a sequence of pairs (label, data). The produced lines are
nicely formatted, ready to print.
"""
info = list(info)
if not info:
return
label_len = 30
assert all(len(l) < label_len for l, _ in info)
for label, data in info:
if data == []:
data = "-none-"
if isinstance(data, (list, set, tuple)):
prefix = "%*s:" % (label_len, label)
for e in data:
yield "%*s %s" % (label_len+1, prefix, e)
prefix = ""
else:
yield "%*s: %s" % (label_len, label, data)
def write_formatted_info(writer, header, info):
"""Write a sequence of (label,data) pairs nicely."""
writer.write(info_header(header))
for line in info_formatter(info):
writer.write(" %s" % line)
def short_stack(limit=None, skip=0):
"""Return a string summarizing the call stack.
The string is multi-line, with one line per stack frame. Each line shows
the function name, the file name, and the line number:
...
start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
...
`limit` is the number of frames to include, defaulting to all of them.
`skip` is the number of frames to skip, so that debugging functions can
call this and not be included in the result.
"""
stack = inspect.stack()[limit:skip:-1]
return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack)
def dump_stack_frames(limit=None, out=None, skip=0):
"""Print a summary of the stack to stdout, or someplace else."""
out = out or sys.stdout
out.write(short_stack(limit=limit, skip=skip+1))
out.write("\n")
def clipped_repr(text, numchars=50):
"""`repr(text)`, but limited to `numchars`."""
r = reprlib.Repr()
r.maxstring = numchars
return r.repr(text)
def short_id(id64):
"""Given a 64-bit id, make a shorter 16-bit one."""
id16 = 0
for offset in range(0, 64, 16):
id16 ^= id64 >> offset
return id16 & 0xFFFF
def add_pid_and_tid(text):
"""A filter to add pid and tid to debug messages."""
# Thread ids are useful, but too long. Make a shorter one.
tid = f"{short_id(_thread.get_ident()):04x}"
text = f"{os.getpid():5d}.{tid}: {text}"
return text
class SimpleReprMixin:
"""A mixin implementing a simple __repr__."""
simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id']
def __repr__(self):
show_attrs = (
(k, v) for k, v in self.__dict__.items()
if getattr(v, "show_repr_attr", True)
and not callable(v)
and k not in self.simple_repr_ignore
)
return "<{klass} @0x{id:x} {attrs}>".format(
klass=self.__class__.__name__,
id=id(self),
attrs=" ".join(f"{k}={v!r}" for k, v in show_attrs),
)
def simplify(v): # pragma: debugging
"""Turn things which are nearly dict/list/etc into dict/list/etc."""
if isinstance(v, dict):
return {k:simplify(vv) for k, vv in v.items()}
elif isinstance(v, (list, tuple)):
return type(v)(simplify(vv) for vv in v)
elif hasattr(v, "__dict__"):
return simplify({'.'+k: v for k, v in v.__dict__.items()})
else:
return v
def pp(v): # pragma: debugging
"""Debug helper to pretty-print data, including SimpleNamespace objects."""
# Might not be needed in 3.9+
pprint.pprint(simplify(v))
def filter_text(text, filters):
"""Run `text` through a series of filters.
`filters` is a list of functions. Each takes a string and returns a
string. Each is run in turn.
Returns: the final string that results after all of the filters have
run.
"""
clean_text = text.rstrip()
ending = text[len(clean_text):]
text = clean_text
for fn in filters:
lines = []
for line in text.splitlines():
lines.extend(fn(line).splitlines())
text = "\n".join(lines)
return text + ending
class CwdTracker: # pragma: debugging
"""A class to add cwd info to debug messages."""
def __init__(self):
self.cwd = None
def filter(self, text):
"""Add a cwd message for each new cwd."""
cwd = os.getcwd()
if cwd != self.cwd:
text = f"cwd is now {cwd!r}\n" + text
self.cwd = cwd
return text
class DebugOutputFile: # pragma: debugging
"""A file-like object that includes pid and cwd information."""
def __init__(self, outfile, show_process, filters):
self.outfile = outfile
self.show_process = show_process
self.filters = list(filters)
if self.show_process:
self.filters.insert(0, CwdTracker().filter)
self.write(f"New process: executable: {sys.executable!r}\n")
self.write("New process: cmd: {!r}\n".format(getattr(sys, 'argv', None)))
if hasattr(os, 'getppid'):
self.write(f"New process: pid: {os.getpid()!r}, parent pid: {os.getppid()!r}\n")
SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
@classmethod
def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False):
"""Get a DebugOutputFile.
If `fileobj` is provided, then a new DebugOutputFile is made with it.
If `fileobj` isn't provided, then a file is chosen
(COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton
DebugOutputFile is made.
`show_process` controls whether the debug file adds process-level
information, and filters is a list of other message filters to apply.
`filters` are the text filters to apply to the stream to annotate with
pids, etc.
If `interim` is true, then a future `get_one` can replace this one.
"""
if fileobj is not None:
# Make DebugOutputFile around the fileobj passed.
return cls(fileobj, show_process, filters)
# Because of the way igor.py deletes and re-imports modules,
# this class can be defined more than once. But we really want
# a process-wide singleton. So stash it in sys.modules instead of
# on a class attribute. Yes, this is aggressively gross.
the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True))
if the_one is None or is_interim:
if fileobj is None:
debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
if debug_file_name in ("stdout", "stderr"):
fileobj = getattr(sys, debug_file_name)
elif debug_file_name:
fileobj = open(debug_file_name, "a")
else:
fileobj = sys.stderr
the_one = cls(fileobj, show_process, filters)
sys.modules[cls.SYS_MOD_NAME] = (the_one, interim)
return the_one
def write(self, text):
"""Just like file.write, but filter through all our filters."""
self.outfile.write(filter_text(text, self.filters))
self.outfile.flush()
def flush(self):
"""Flush our file."""
self.outfile.flush()
def log(msg, stack=False): # pragma: debugging
"""Write a log message as forcefully as possible."""
out = DebugOutputFile.get_one(interim=True)
out.write(msg+"\n")
if stack:
dump_stack_frames(out=out, skip=1)
def decorate_methods(decorator, butnot=(), private=False): # pragma: debugging
"""A class decorator to apply a decorator to methods."""
def _decorator(cls):
for name, meth in inspect.getmembers(cls, inspect.isroutine):
if name not in cls.__dict__:
continue
if name != "__init__":
if not private and name.startswith("_"):
continue
if name in butnot:
continue
setattr(cls, name, decorator(meth))
return cls
return _decorator
def break_in_pudb(func): # pragma: debugging
"""A function decorator to stop in the debugger for each call."""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
import pudb
sys.stdout = sys.__stdout__
pudb.set_trace()
return func(*args, **kwargs)
return _wrapper
OBJ_IDS = itertools.count()
CALLS = itertools.count()
OBJ_ID_ATTR = "$coverage.object_id"
def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging
"""A method decorator to debug-log each call to the function."""
def _decorator(func):
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
oid = getattr(self, OBJ_ID_ATTR, None)
if oid is None:
oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}"
setattr(self, OBJ_ID_ATTR, oid)
extra = ""
if show_args:
eargs = ", ".join(map(repr, args))
ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
extra += "("
extra += eargs
if eargs and ekwargs:
extra += ", "
extra += ekwargs
extra += ")"
if show_stack:
extra += " @ "
extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines())
callid = next(CALLS)
msg = f"{oid} {callid:04d} {func.__name__}{extra}\n"
DebugOutputFile.get_one(interim=True).write(msg)
ret = func(self, *args, **kwargs)
if show_return:
msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n"
DebugOutputFile.get_one(interim=True).write(msg)
return ret
return _wrapper
return _decorator
def _clean_stack_line(s): # pragma: debugging
"""Simplify some paths in a stack trace, for compactness."""
s = s.strip()
s = s.replace(os.path.dirname(__file__) + '/', '')
s = s.replace(os.path.dirname(os.__file__) + '/', '')
s = s.replace(sys.prefix + '/', '')
return s
| 13,766 | Python | 32.825553 | 96 | 0.578309 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/pytracer.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Raw data collector for coverage.py."""
import atexit
import dis
import sys
from coverage import env
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
YIELD_VALUE = dis.opmap['YIELD_VALUE']
# When running meta-coverage, this file can try to trace itself, which confuses
# everything. Don't trace ourselves.
THIS_FILE = __file__.rstrip("co")
class PyTracer:
"""Python implementation of the raw data tracer."""
# Because of poor implementations of trace-function-manipulating tools,
# the Python trace function must be kept very simple. In particular, there
# must be only one function ever set as the trace function, both through
# sys.settrace, and as the return value from the trace function. Put
# another way, the trace function must always return itself. It cannot
# swap in other functions, or return None to avoid tracing a particular
# frame.
#
# The trace manipulator that introduced this restriction is DecoratorTools,
# which sets a trace function, and then later restores the pre-existing one
# by calling sys.settrace with a function it found in the current frame.
#
# Systems that use DecoratorTools (or similar trace manipulations) must use
# PyTracer to get accurate results. The command-line --timid argument is
# used to force the use of this tracer.
def __init__(self):
# Attributes set from the collector:
self.data = None
self.trace_arcs = False
self.should_trace = None
self.should_trace_cache = None
self.should_start_context = None
self.warn = None
# The threading module to use, if any.
self.threading = None
self.cur_file_data = None
self.last_line = 0 # int, but uninitialized.
self.cur_file_name = None
self.context = None
self.started_context = False
self.data_stack = []
self.thread = None
self.stopped = False
self._activity = False
self.in_atexit = False
# On exit, self.in_atexit = True
atexit.register(setattr, self, 'in_atexit', True)
def __repr__(self):
return "<PyTracer at {}: {} lines in {} files>".format(
id(self),
sum(len(v) for v in self.data.values()),
len(self.data),
)
def log(self, marker, *args):
"""For hard-core logging of what this tracer is doing."""
with open("/tmp/debug_trace.txt", "a") as f:
f.write("{} {}[{}]".format(
marker,
id(self),
len(self.data_stack),
))
if 0:
f.write(".{:x}.{:x}".format(
self.thread.ident,
self.threading.current_thread().ident,
))
f.write(" {}".format(" ".join(map(str, args))))
if 0:
f.write(" | ")
stack = " / ".join(
(fname or "???").rpartition("/")[-1]
for _, fname, _, _ in self.data_stack
)
f.write(stack)
f.write("\n")
def _trace(self, frame, event, arg_unused):
"""The trace function passed to sys.settrace."""
if THIS_FILE in frame.f_code.co_filename:
return None
#self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event)
if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable
# The PyTrace.stop() method has been called, possibly by another
# thread, let's deactivate ourselves now.
if 0:
self.log("---\nX", frame.f_code.co_filename, frame.f_lineno)
f = frame
while f:
self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
f = f.f_back
sys.settrace(None)
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
self.data_stack.pop()
)
return None
# if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
if event == 'call':
# Should we start a new context?
if self.should_start_context and self.context is None:
context_maybe = self.should_start_context(frame)
if context_maybe is not None:
self.context = context_maybe
self.started_context = True
self.switch_context(self.context)
else:
self.started_context = False
else:
self.started_context = False
# Entering a new frame. Decide if we should trace
# in this file.
self._activity = True
self.data_stack.append(
(
self.cur_file_data,
self.cur_file_name,
self.last_line,
self.started_context,
)
)
filename = frame.f_code.co_filename
self.cur_file_name = filename
disp = self.should_trace_cache.get(filename)
if disp is None:
disp = self.should_trace(filename, frame)
self.should_trace_cache[filename] = disp
self.cur_file_data = None
if disp.trace:
tracename = disp.source_filename
if tracename not in self.data:
self.data[tracename] = set()
self.cur_file_data = self.data[tracename]
# The call event is really a "start frame" event, and happens for
# function calls and re-entering generators. The f_lasti field is
# -1 for calls, and a real offset for generators. Use <0 as the
# line number for calls, and the real line number for generators.
if getattr(frame, 'f_lasti', -1) < 0:
self.last_line = -frame.f_code.co_firstlineno
else:
self.last_line = frame.f_lineno
elif event == 'line':
# Record an executed line.
if self.cur_file_data is not None:
lineno = frame.f_lineno
if self.trace_arcs:
self.cur_file_data.add((self.last_line, lineno))
else:
self.cur_file_data.add(lineno)
self.last_line = lineno
elif event == 'return':
if self.trace_arcs and self.cur_file_data:
# Record an arc leaving the function, but beware that a
# "return" event might just mean yielding from a generator.
# Jython seems to have an empty co_code, so just assume return.
code = frame.f_code.co_code
if (not code) or code[frame.f_lasti] != YIELD_VALUE:
first = frame.f_code.co_firstlineno
self.cur_file_data.add((self.last_line, -first))
# Leaving this function, pop the filename stack.
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
self.data_stack.pop()
)
# Leaving a context?
if self.started_context:
self.context = None
self.switch_context(None)
return self._trace
def start(self):
"""Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.stopped = False
if self.threading:
if self.thread is None:
self.thread = self.threading.current_thread()
else:
if self.thread.ident != self.threading.current_thread().ident:
# Re-starting from a different thread!? Don't set the trace
# function, but we are marked as running again, so maybe it
# will be ok?
#self.log("~", "starting on different threads")
return self._trace
sys.settrace(self._trace)
return self._trace
def stop(self):
"""Stop this Tracer."""
# Get the active tracer callback before setting the stop flag to be
# able to detect if the tracer was changed prior to stopping it.
tf = sys.gettrace()
# Set the stop flag. The actual call to sys.settrace(None) will happen
# in the self._trace callback itself to make sure to call it from the
# right thread.
self.stopped = True
if self.threading and self.thread.ident != self.threading.current_thread().ident:
# Called on a different thread than started us: we can't unhook
# ourselves, but we've set the flag that we should stop, so we
# won't do any more tracing.
#self.log("~", "stopping on different threads")
return
if self.warn:
# PyPy clears the trace function before running atexit functions,
# so don't warn if we are in atexit on PyPy and the trace function
# has changed to None.
dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable
msg = f"Trace function changed, measurement is likely wrong: {tf!r}"
self.warn(msg, slug="trace-changed")
def activity(self):
"""Has there been any activity?"""
return self._activity
def reset_activity(self):
"""Reset the activity() flag."""
self._activity = False
def get_stats(self):
"""Return a dictionary of statistics, or None."""
return None
| 10,174 | Python | 38.901961 | 106 | 0.554551 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/__init__.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Code coverage measurement for Python.
Ned Batchelder
https://nedbatchelder.com/code/coverage
"""
import sys
from coverage.version import __version__, __url__, version_info
from coverage.control import Coverage, process_startup
from coverage.data import CoverageData
from coverage.exceptions import CoverageException
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
from coverage.pytracer import PyTracer
# Backward compatibility.
coverage = Coverage
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
| 1,289 | Python | 33.864864 | 87 | 0.78045 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/version.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""The version and URL for coverage.py"""
# This file is exec'ed in setup.py, don't import anything!
# Same semantics as sys.version_info.
version_info = (6, 1, 2, "final", 0)
def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
version = "%d.%d" % (major, minor)
if micro:
version += ".%d" % (micro,)
if releaselevel != 'final':
short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
version += "%s%d" % (short, serial)
return version
def _make_url(major, minor, micro, releaselevel, serial):
"""Make the URL people should start at for this version of coverage.py."""
url = "https://coverage.readthedocs.io"
if releaselevel != 'final':
# For pre-releases, use a version-specific URL.
url += "/en/" + _make_version(major, minor, micro, releaselevel, serial)
return url
__version__ = _make_version(*version_info)
__url__ = _make_url(*version_info)
| 1,251 | Python | 35.823528 | 80 | 0.647482 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/jsonreport.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Json reporting for coverage.py"""
import datetime
import json
import sys
from coverage import __version__
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
class JsonReporter:
"""A reporter for writing JSON coverage results."""
report_type = "JSON report"
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.total = Numbers(self.config.precision)
self.report_data = {}
def report(self, morfs, outfile=None):
"""Generate a json report for `morfs`.
`morfs` is a list of modules or file names.
`outfile` is a file object to write the json to
"""
outfile = outfile or sys.stdout
coverage_data = self.coverage.get_data()
coverage_data.set_query_contexts(self.config.report_contexts)
self.report_data["meta"] = {
"version": __version__,
"timestamp": datetime.datetime.now().isoformat(),
"branch_coverage": coverage_data.has_arcs(),
"show_contexts": self.config.json_show_contexts,
}
measured_files = {}
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
measured_files[file_reporter.relative_filename()] = self.report_one_file(
coverage_data,
analysis
)
self.report_data["files"] = measured_files
self.report_data["totals"] = {
'covered_lines': self.total.n_executed,
'num_statements': self.total.n_statements,
'percent_covered': self.total.pc_covered,
'percent_covered_display': self.total.pc_covered_str,
'missing_lines': self.total.n_missing,
'excluded_lines': self.total.n_excluded,
}
if coverage_data.has_arcs():
self.report_data["totals"].update({
'num_branches': self.total.n_branches,
'num_partial_branches': self.total.n_partial_branches,
'covered_branches': self.total.n_executed_branches,
'missing_branches': self.total.n_missing_branches,
})
json.dump(
self.report_data,
outfile,
indent=4 if self.config.json_pretty_print else None
)
return self.total.n_statements and self.total.pc_covered
def report_one_file(self, coverage_data, analysis):
"""Extract the relevant report data for a single file"""
nums = analysis.numbers
self.total += nums
summary = {
'covered_lines': nums.n_executed,
'num_statements': nums.n_statements,
'percent_covered': nums.pc_covered,
'percent_covered_display': nums.pc_covered_str,
'missing_lines': nums.n_missing,
'excluded_lines': nums.n_excluded,
}
reported_file = {
'executed_lines': sorted(analysis.executed),
'summary': summary,
'missing_lines': sorted(analysis.missing),
'excluded_lines': sorted(analysis.excluded),
}
if self.config.json_show_contexts:
reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename)
if coverage_data.has_arcs():
reported_file['summary'].update({
'num_branches': nums.n_branches,
'num_partial_branches': nums.n_partial_branches,
'covered_branches': nums.n_executed_branches,
'missing_branches': nums.n_missing_branches,
})
return reported_file
| 3,812 | Python | 35.314285 | 91 | 0.594963 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/disposition.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Simple value objects for tracking what to do with files."""
class FileDisposition:
"""A simple value type for recording what to do with a file."""
pass
# FileDisposition "methods": FileDisposition is a pure value object, so it can
# be implemented in either C or Python. Acting on them is done with these
# functions.
def disposition_init(cls, original_filename):
"""Construct and initialize a new FileDisposition object."""
disp = cls()
disp.original_filename = original_filename
disp.canonical_filename = original_filename
disp.source_filename = None
disp.trace = False
disp.reason = ""
disp.file_tracer = None
disp.has_dynamic_filename = False
return disp
def disposition_debug_msg(disp):
"""Make a nice debug message of what the FileDisposition is doing."""
if disp.trace:
msg = f"Tracing {disp.original_filename!r}"
if disp.original_filename != disp.source_filename:
msg += f" as {disp.source_filename!r}"
if disp.file_tracer:
msg += ": will be traced by %r" % disp.file_tracer
else:
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
return msg
| 1,350 | Python | 32.774999 | 79 | 0.682222 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/bytecode.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Bytecode manipulation for coverage.py"""
import types
def code_objects(code):
"""Iterate over all the code objects in `code`."""
stack = [code]
while stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
stack.append(c)
yield code
| 609 | Python | 29.499999 | 79 | 0.64532 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/files.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""File wrangling."""
import hashlib
import fnmatch
import ntpath
import os
import os.path
import posixpath
import re
import sys
from coverage import env
from coverage.exceptions import CoverageException
from coverage.misc import contract, human_sorted, isolate_module, join_regex
os = isolate_module(os)
def set_relative_directory():
"""Set the directory that `relative_filename` will be relative to."""
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
# The absolute path to our current directory.
RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
CANONICAL_FILENAME_CACHE = {}
def relative_directory():
"""Return the directory that `relative_filename` is relative to."""
return RELATIVE_DIR
@contract(returns='unicode')
def relative_filename(filename):
"""Return the relative form of `filename`.
The file name will be relative to the current directory when the
`set_relative_directory` was called.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(RELATIVE_DIR):
filename = filename[len(RELATIVE_DIR):]
return filename
@contract(returns='unicode')
def canonical_filename(filename):
"""Return a canonical file name for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in CANONICAL_FILENAME_CACHE:
cf = filename
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
f = os.path.join(path, filename)
try:
exists = os.path.exists(f)
except UnicodeError:
exists = False
if exists:
cf = f
break
cf = abs_file(cf)
CANONICAL_FILENAME_CACHE[filename] = cf
return CANONICAL_FILENAME_CACHE[filename]
MAX_FLAT = 100
@contract(filename='unicode', returns='unicode')
def flat_rootname(filename):
"""A base for a flat file name to correspond to this file.
Useful for writing files about the code where you want all the files in
the same directory, but need to differentiate same-named files from
different directories.
For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py'
"""
dirname, basename = ntpath.split(filename)
if dirname:
fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16]
prefix = f"d_{fp}_"
else:
prefix = ""
return prefix + basename.replace(".", "_")
if env.WINDOWS:
_ACTUAL_PATH_CACHE = {}
_ACTUAL_PATH_LIST_CACHE = {}
def actual_path(path):
"""Get the actual path of `path`, including the correct case."""
if path in _ACTUAL_PATH_CACHE:
return _ACTUAL_PATH_CACHE[path]
head, tail = os.path.split(path)
if not tail:
# This means head is the drive spec: normalize it.
actpath = head.upper()
elif not head:
actpath = tail
else:
head = actual_path(head)
if head in _ACTUAL_PATH_LIST_CACHE:
files = _ACTUAL_PATH_LIST_CACHE[head]
else:
try:
files = os.listdir(head)
except Exception:
# This will raise OSError, or this bizarre TypeError:
# https://bugs.python.org/issue1776160
files = []
_ACTUAL_PATH_LIST_CACHE[head] = files
normtail = os.path.normcase(tail)
for f in files:
if os.path.normcase(f) == normtail:
tail = f
break
actpath = os.path.join(head, tail)
_ACTUAL_PATH_CACHE[path] = actpath
return actpath
else:
def actual_path(filename):
"""The actual path for non-Windows platforms."""
return filename
@contract(returns='unicode')
def abs_file(path):
"""Return the absolute normalized form of `path`."""
try:
path = os.path.realpath(path)
except UnicodeError:
pass
path = os.path.abspath(path)
path = actual_path(path)
return path
def python_reported_file(filename):
"""Return the string as Python would describe this file name."""
if env.PYBEHAVIOR.report_absolute_files:
filename = os.path.abspath(filename)
return filename
RELATIVE_DIR = None
CANONICAL_FILENAME_CACHE = None
set_relative_directory()
def isabs_anywhere(filename):
"""Is `filename` an absolute path on any OS?"""
return ntpath.isabs(filename) or posixpath.isabs(filename)
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
prepped = []
for p in patterns or []:
if p.startswith(("*", "?")):
prepped.append(p)
else:
prepped.append(abs_file(p))
return prepped
class TreeMatcher:
"""A matcher for files in a tree.
Construct with a list of paths, either files or directories. Paths match
with the `match` method if they are one of the files, or if they are
somewhere in a subtree rooted at one of the directories.
"""
def __init__(self, paths, name="unknown"):
self.original_paths = human_sorted(paths)
self.paths = list(map(os.path.normcase, paths))
self.name = name
def __repr__(self):
return f"<TreeMatcher {self.name} {self.original_paths!r}>"
def info(self):
"""A list of strings for displaying when dumping state."""
return self.original_paths
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
fpath = os.path.normcase(fpath)
for p in self.paths:
if fpath.startswith(p):
if fpath == p:
# This is the same file!
return True
if fpath[len(p)] == os.sep:
# This is a file in the directory
return True
return False
class ModuleMatcher:
"""A matcher for modules in a tree."""
def __init__(self, module_names, name="unknown"):
self.modules = list(module_names)
self.name = name
def __repr__(self):
return f"<ModuleMatcher {self.name} {self.modules!r}>"
def info(self):
"""A list of strings for displaying when dumping state."""
return self.modules
def match(self, module_name):
"""Does `module_name` indicate a module in one of our packages?"""
if not module_name:
return False
for m in self.modules:
if module_name.startswith(m):
if module_name == m:
return True
if module_name[len(m)] == '.':
# This is a module in the package
return True
return False
class FnmatchMatcher:
"""A matcher for files by file name pattern."""
def __init__(self, pats, name="unknown"):
self.pats = list(pats)
self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS)
self.name = name
def __repr__(self):
return f"<FnmatchMatcher {self.name} {self.pats!r}>"
def info(self):
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath):
"""Does `fpath` match one of our file name patterns?"""
return self.re.match(fpath) is not None
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
def fnmatches_to_regex(patterns, case_insensitive=False, partial=False):
"""Convert fnmatch patterns to a compiled regex that matches any of them.
Slashes are always converted to match either slash or backslash, for
Windows support, even when running elsewhere.
If `partial` is true, then the pattern will match if the target string
starts with the pattern. Otherwise, it must match the entire string.
Returns: a compiled regex object. Use the .match method to compare target
strings.
"""
regexes = (fnmatch.translate(pattern) for pattern in patterns)
# Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/",
# so we have to deal with maybe a backslash.
regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes)
if partial:
# fnmatch always adds a \Z to match the whole string, which we don't
# want, so we remove the \Z. While removing it, we only replace \Z if
# followed by paren (introducing flags), or at end, to keep from
# destroying a literal \Z in the pattern.
regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes)
flags = 0
if case_insensitive:
flags |= re.IGNORECASE
compiled = re.compile(join_regex(regexes), flags=flags)
return compiled
class PathAliases:
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
"""
def __init__(self, relative=False):
self.aliases = []
self.relative = relative
def pprint(self): # pragma: debugging
"""Dump the important parts of the PathAliases, for debugging."""
print(f"Aliases (relative={self.relative}):")
for regex, result in self.aliases:
print(f"{regex.pattern!r} --> {result!r}")
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
pattern_sep = sep(pattern)
if len(pattern) > 1:
pattern = pattern.rstrip(r"\/")
# The pattern can't end with a wildcard component.
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
# The pattern is meant to match a filepath. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not pattern.startswith('*') and not isabs_anywhere(pattern +
pattern_sep):
pattern = abs_file(pattern)
if not pattern.endswith(pattern_sep):
pattern += pattern_sep
# Make a regex from the pattern.
regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
Returns the mapped path. If a mapping has happened, this is a
canonical path. If no mapping has happened, it is the original value
of `path` unchanged.
"""
for regex, result in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
new = new.replace(sep(path), sep(result))
if not self.relative:
new = canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but sub-directories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
| 14,061 | Python | 32.00939 | 82 | 0.609701 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/data.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Coverage data for coverage.py.
This file had the 4.x JSON data support, which is now gone. This file still
has storage-agnostic helpers, and is kept to avoid changing too many imports.
CoverageData is now defined in sqldata.py, and imported here to keep the
imports working.
"""
import glob
import os.path
from coverage.exceptions import CoverageException
from coverage.misc import file_be_gone
from coverage.sqldata import CoverageData
def line_counts(data, fullpath=False):
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
Returns a dict mapping file names to counts of lines.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename in data.measured_files():
summ[filename_fn(filename)] = len(data.lines(filename))
return summ
def add_data_to_hash(data, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
`hasher` is a `coverage.misc.Hasher` instance to be updated with
the file's data. It should only get the results data, not the run
data.
"""
if data.has_arcs():
hasher.update(sorted(data.arcs(filename) or []))
else:
hasher.update(sorted(data.lines(filename) or []))
hasher.update(data.file_tracer(filename))
def combine_parallel_data(
data, aliases=None, data_paths=None, strict=False, keep=False, message=None,
):
"""Combine a number of data files together.
Treat `data.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
If `data_paths` is provided, it is a list of directories or files to
combine. Directories are searched for files that start with
`data.filename` plus dot as a prefix, and those files are combined.
If `data_paths` is not provided, then the directory portion of
`data.filename` is used as the directory to search for data files.
Unless `keep` is True every data file found and combined is then deleted from disk. If a file
cannot be read, a warning will be issued, and the file will not be
deleted.
If `strict` is true, and no files are found to combine, an error is
raised.
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
data_dir, local = os.path.split(data.base_filename())
localdot = local + '.*'
data_paths = data_paths or [data_dir]
files_to_combine = []
for p in data_paths:
if os.path.isfile(p):
files_to_combine.append(os.path.abspath(p))
elif os.path.isdir(p):
pattern = os.path.join(os.path.abspath(p), localdot)
files_to_combine.extend(glob.glob(pattern))
else:
raise CoverageException(f"Couldn't combine from non-existent path '{p}'")
if strict and not files_to_combine:
raise CoverageException("No data to combine")
files_combined = 0
for f in files_to_combine:
if f == data.data_filename():
# Sometimes we are combining into a file which is one of the
# parallel files. Skip that file.
if data._debug.should('dataio'):
data._debug.write(f"Skipping combining ourself: {f!r}")
continue
if data._debug.should('dataio'):
data._debug.write(f"Combining data file {f!r}")
try:
new_data = CoverageData(f, debug=data._debug)
new_data.read()
except CoverageException as exc:
if data._warn:
# The CoverageException has the file name in it, so just
# use the message as the warning.
data._warn(str(exc))
else:
data.update(new_data, aliases=aliases)
files_combined += 1
if message:
message(f"Combined data file {os.path.relpath(f)}")
if not keep:
if data._debug.should('dataio'):
data._debug.write(f"Deleting combined data file {f!r}")
file_be_gone(f)
if strict and not files_combined:
raise CoverageException("No usable data files")
| 4,680 | Python | 34.732824 | 97 | 0.648932 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/env.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Determine facts about the environment."""
import os
import platform
import sys
# Operating systems.
WINDOWS = sys.platform == "win32"
LINUX = sys.platform.startswith("linux")
# Python implementations.
CPYTHON = (platform.python_implementation() == "CPython")
PYPY = (platform.python_implementation() == "PyPy")
JYTHON = (platform.python_implementation() == "Jython")
IRONPYTHON = (platform.python_implementation() == "IronPython")
# Python versions. We amend version_info with one more value, a zero if an
# official version, or 1 if built from source beyond an official version.
PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
if PYPY:
PYPYVERSION = sys.pypy_version_info
# Python behavior.
class PYBEHAVIOR:
"""Flags indicating this Python's behavior."""
# Does Python conform to PEP626, Precise line numbers for debugging and other tools.
# https://www.python.org/dev/peps/pep-0626
pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4))
# Is "if __debug__" optimized away?
if PYPY:
optimize_if_debug = True
else:
optimize_if_debug = not pep626
# Is "if not __debug__" optimized away?
optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4))
if pep626:
optimize_if_not_debug = False
if PYPY:
optimize_if_not_debug = True
# Is "if not __debug__" optimized away even better?
optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1))
if pep626:
optimize_if_not_debug2 = False
# Yet another way to optimize "if not __debug__"?
optimize_if_not_debug3 = (PYPY and PYVERSION >= (3, 8))
# Can co_lnotab have negative deltas?
negative_lnotab = not (PYPY and PYPYVERSION < (7, 2))
# Do .pyc files conform to PEP 552? Hash-based pyc's.
hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4))
# Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It
# used to be an empty string (meaning the current directory). It changed
# to be the actual path to the current directory, so that os.chdir wouldn't
# affect the outcome.
actual_syspath0_dash_m = (
(CPYTHON and (PYVERSION >= (3, 7, 0, 'beta', 3))) or
(PYPY and (PYPYVERSION >= (7, 3, 4)))
)
# 3.7 changed how functions with only docstrings are numbered.
docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10))
# When a break/continue/return statement in a try block jumps to a finally
# block, does the finally block do the break/continue/return (pre-3.8), or
# does the finally jump back to the break/continue/return (3.8) to do the
# work?
finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10))
# When a function is decorated, does the trace function get called for the
# @-line and also the def-line (new behavior in 3.8)? Or just the @-line
# (old behavior)?
trace_decorated_def = (CPYTHON and PYVERSION >= (3, 8))
# Are while-true loops optimized into absolute jumps with no loop setup?
nix_while_true = (PYVERSION >= (3, 8))
# Python 3.9a1 made sys.argv[0] and other reported files absolute paths.
report_absolute_files = (PYVERSION >= (3, 9))
# Lines after break/continue/return/raise are no longer compiled into the
# bytecode. They used to be marked as missing, now they aren't executable.
omit_after_jump = pep626
# PyPy has always omitted statements after return.
omit_after_return = omit_after_jump or PYPY
# Modules used to have firstlineno equal to the line number of the first
# real line of code. Now they always start at 1.
module_firstline_1 = pep626
# Are "if 0:" lines (and similar) kept in the compiled code?
keep_constant_test = pep626
# When leaving a with-block, do we visit the with-line again for the exit?
exit_through_with = (PYVERSION >= (3, 10, 0, 'beta'))
# Match-case construct.
match_case = (PYVERSION >= (3, 10))
# Some words are keywords in some places, identifiers in other places.
soft_keywords = (PYVERSION >= (3, 10))
# Coverage.py specifics.
# Are we using the C-implemented trace function?
C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
# Are we coverage-measuring ourselves?
METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
# Are we running our test suite?
# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
# test-specific behavior like contracts.
TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging
# tests to remove noise from stack traces.
# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces.
USE_CONTRACTS = TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
| 5,011 | Python | 36.969697 | 91 | 0.672121 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/html.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""HTML reporting for coverage.py."""
import datetime
import json
import os
import re
import shutil
import types
import coverage
from coverage.data import add_data_to_hash
from coverage.exceptions import CoverageException
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime
from coverage.misc import human_sorted
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
from coverage.templite import Templite
os = isolate_module(os)
def data_filename(fname):
"""Return the path to an "htmlfiles" data file of ours.
"""
static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles")
static_filename = os.path.join(static_dir, fname)
return static_filename
def read_data(fname):
"""Return the contents of a data file of ours."""
with open(data_filename(fname)) as data_file:
return data_file.read()
def write_html(fname, html):
"""Write `html` to `fname`, properly encoded."""
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
with open(fname, "wb") as fout:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
class HtmlDataGeneration:
"""Generate structured data to be turned into HTML reports."""
EMPTY = "(empty)"
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
data = self.coverage.get_data()
self.has_arcs = data.has_arcs()
if self.config.show_contexts:
if data.measured_contexts() == {""}:
self.coverage._warn("No contexts were measured")
data.set_query_contexts(self.config.report_contexts)
def data_for_file(self, fr, analysis):
"""Produce the data needed for one file's report."""
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
arcs_executed = analysis.arcs_executed()
if self.config.show_contexts:
contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
lines = []
for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
# Figure out how to mark this line.
category = None
short_annotations = []
long_annotations = []
if lineno in analysis.excluded:
category = 'exc'
elif lineno in analysis.missing:
category = 'mis'
elif self.has_arcs and lineno in missing_branch_arcs:
category = 'par'
for b in missing_branch_arcs[lineno]:
if b < 0:
short_annotations.append("exit")
else:
short_annotations.append(b)
long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
elif lineno in analysis.statements:
category = 'run'
contexts = contexts_label = None
context_list = None
if category and self.config.show_contexts:
contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ()))
if contexts == [self.EMPTY]:
contexts_label = self.EMPTY
else:
contexts_label = f"{len(contexts)} ctx"
context_list = contexts
lines.append(types.SimpleNamespace(
tokens=tokens,
number=lineno,
category=category,
statement=(lineno in analysis.statements),
contexts=contexts,
contexts_label=contexts_label,
context_list=context_list,
short_annotations=short_annotations,
long_annotations=long_annotations,
))
file_data = types.SimpleNamespace(
relative_filename=fr.relative_filename(),
nums=analysis.numbers,
lines=lines,
)
return file_data
class HtmlReporter:
"""HTML reporting."""
# These files will be copied from the htmlfiles directory to the output
# directory.
STATIC_FILES = [
"style.css",
"coverage_html.js",
"keybd_closed.png",
"keybd_open.png",
"favicon_32.png",
]
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
self.directory = self.config.html_dir
self.skip_covered = self.config.html_skip_covered
if self.skip_covered is None:
self.skip_covered = self.config.skip_covered
self.skip_empty = self.config.html_skip_empty
if self.skip_empty is None:
self.skip_empty = self.config.skip_empty
self.skipped_covered_count = 0
self.skipped_empty_count = 0
title = self.config.html_title
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
else:
self.extra_css = None
self.data = self.coverage.get_data()
self.has_arcs = self.data.has_arcs()
self.file_summaries = []
self.all_files_nums = []
self.incr = IncrementalChecker(self.directory)
self.datagen = HtmlDataGeneration(self.coverage)
self.totals = Numbers(precision=self.config.precision)
self.template_globals = {
# Functions available in the templates.
'escape': escape,
'pair': pair,
'len': len,
# Constants for this report.
'__url__': coverage.__url__,
'__version__': coverage.__version__,
'title': title,
'time_stamp': format_local_datetime(datetime.datetime.now()),
'extra_css': self.extra_css,
'has_arcs': self.has_arcs,
'show_contexts': self.config.show_contexts,
# Constants for all reports.
# These css classes determine which lines are highlighted by default.
'category': {
'exc': 'exc show_exc',
'mis': 'mis show_mis',
'par': 'par run show_par',
'run': 'run',
}
}
self.pyfile_html_source = read_data("pyfile.html")
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
"""
# Read the status data and check that this run used the same
# global data as the last run.
self.incr.read()
self.incr.check_global_data(self.config, self.pyfile_html_source)
# Process all the files.
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.html_file(fr, analysis)
if not self.all_files_nums:
raise CoverageException("No data to report.")
self.totals = sum(self.all_files_nums)
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.n_statements and self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static in self.STATIC_FILES:
shutil.copyfile(data_filename(static), os.path.join(self.directory, static))
# .gitignore can't be copied from the source tree because it would
# prevent the static files from being checked in.
with open(os.path.join(self.directory, ".gitignore"), "w") as fgi:
fgi.write("# Created by coverage.py\n*\n")
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css))
def html_file(self, fr, analysis):
"""Generate an HTML file for one source file."""
rootname = flat_rootname(fr.relative_filename())
html_filename = rootname + ".html"
ensure_dir(self.directory)
html_path = os.path.join(self.directory, html_filename)
# Get the numbers for this file.
nums = analysis.numbers
self.all_files_nums.append(nums)
if self.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
# If there's an existing file, remove it.
file_be_gone(html_path)
self.skipped_covered_count += 1
return
if self.skip_empty:
# Don't report on empty files.
if nums.n_statements == 0:
file_be_gone(html_path)
self.skipped_empty_count += 1
return
# Find out if the file on disk is already correct.
if self.incr.can_skip_file(self.data, fr, rootname):
self.file_summaries.append(self.incr.index_info(rootname))
return
# Write the HTML page for this file.
file_data = self.datagen.data_for_file(fr, analysis)
for ldata in file_data.lines:
# Build the HTML for the line.
html = []
for tok_type, tok_text in ldata.tokens:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
f'<span class="{tok_type}">{tok_html}</span>'
)
ldata.html = ''.join(html)
if ldata.short_annotations:
# 202F is NARROW NO-BREAK SPACE.
# 219B is RIGHTWARDS ARROW WITH STROKE.
ldata.annotate = ", ".join(
f"{ldata.number} ↛ {d}"
for d in ldata.short_annotations
)
else:
ldata.annotate = None
if ldata.long_annotations:
longs = ldata.long_annotations
if len(longs) == 1:
ldata.annotate_long = longs[0]
else:
ldata.annotate_long = "{:d} missed branches: {}".format(
len(longs),
", ".join(
f"{num:d}) {ann_long}"
for num, ann_long in enumerate(longs, start=1)
),
)
else:
ldata.annotate_long = None
css_classes = []
if ldata.category:
css_classes.append(self.template_globals['category'][ldata.category])
ldata.css_class = ' '.join(css_classes) or "pln"
html = self.source_tmpl.render(file_data.__dict__)
write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'relative_filename': fr.relative_filename(),
}
self.file_summaries.append(index_info)
self.incr.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(read_data("index.html"), self.template_globals)
skipped_covered_msg = skipped_empty_msg = ""
if self.skipped_covered_count:
msg = "{} {} skipped due to complete coverage."
skipped_covered_msg = msg.format(
self.skipped_covered_count,
"file" if self.skipped_covered_count == 1 else "files",
)
if self.skipped_empty_count:
msg = "{} empty {} skipped."
skipped_empty_msg = msg.format(
self.skipped_empty_count,
"file" if self.skipped_empty_count == 1 else "files",
)
html = index_tmpl.render({
'files': self.file_summaries,
'totals': self.totals,
'skipped_covered_msg': skipped_covered_msg,
'skipped_empty_msg': skipped_empty_msg,
})
index_file = os.path.join(self.directory, "index.html")
write_html(index_file, html)
self.coverage._message(f"Wrote HTML report to {index_file}")
# Write the latest hashes for next time.
self.incr.write()
class IncrementalChecker:
"""Logic and data to support incremental reporting."""
STATUS_FILE = "status.json"
STATUS_FORMAT = 2
# pylint: disable=wrong-spelling-in-comment,useless-suppression
# The data looks like:
#
# {
# "format": 2,
# "globals": "540ee119c15d52a68a53fe6f0897346d",
# "version": "4.0a1",
# "files": {
# "cogapp___init__": {
# "hash": "e45581a5b48f879f301c0f30bf77a50c",
# "index": {
# "html_filename": "cogapp___init__.html",
# "relative_filename": "cogapp/__init__",
# "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
# }
# },
# ...
# "cogapp_whiteutils": {
# "hash": "8504bb427fc488c4176809ded0277d51",
# "index": {
# "html_filename": "cogapp_whiteutils.html",
# "relative_filename": "cogapp/whiteutils",
# "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
# }
# }
# }
# }
def __init__(self, directory):
self.directory = directory
self.reset()
def reset(self):
"""Initialize to empty. Causes all files to be reported."""
self.globals = ''
self.files = {}
def read(self):
"""Read the information we stored last time."""
usable = False
try:
status_file = os.path.join(self.directory, self.STATUS_FILE)
with open(status_file) as fstatus:
status = json.load(fstatus)
except (OSError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = {}
for filename, fileinfo in status['files'].items():
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
self.files[filename] = fileinfo
self.globals = status['globals']
else:
self.reset()
def write(self):
"""Write the current status."""
status_file = os.path.join(self.directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in self.files.items():
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
files[filename] = fileinfo
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'globals': self.globals,
'files': files,
}
with open(status_file, "w") as fout:
json.dump(status, fout, separators=(',', ':'))
def check_global_data(self, *data):
"""Check the global data that can affect incremental reporting."""
m = Hasher()
for d in data:
m.update(d)
these_globals = m.hexdigest()
if self.globals != these_globals:
self.reset()
self.globals = these_globals
def can_skip_file(self, data, fr, rootname):
"""Can we skip reporting this file?
`data` is a CoverageData object, `fr` is a `FileReporter`, and
`rootname` is the name being used for the file.
"""
m = Hasher()
m.update(fr.source().encode('utf-8'))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
that_hash = self.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
return True
else:
self.set_file_hash(rootname, this_hash)
return False
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio):
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "%s %s" % ratio
| 17,694 | Python | 33.970356 | 100 | 0.549452 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/numbits.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""
Functions to manipulate packed binary representations of number sets.
To save space, coverage stores sets of line numbers in SQLite using a packed
binary representation called a numbits. A numbits is a set of positive
integers.
A numbits is stored as a blob in the database. The exact meaning of the bytes
in the blobs should be considered an implementation detail that might change in
the future. Use these functions to work with those binary blobs of data.
"""
import json
from itertools import zip_longest
from coverage.misc import contract, new_contract
def _to_blob(b):
"""Convert a bytestring into a type SQLite will accept for a blob."""
return b
new_contract('blob', lambda v: isinstance(v, bytes))
@contract(nums='Iterable', returns='blob')
def nums_to_numbits(nums):
"""Convert `nums` into a numbits.
Arguments:
nums: a reusable iterable of integers, the line numbers to store.
Returns:
A binary blob.
"""
try:
nbytes = max(nums) // 8 + 1
except ValueError:
# nums was empty.
return _to_blob(b'')
b = bytearray(nbytes)
for num in nums:
b[num//8] |= 1 << num % 8
return _to_blob(bytes(b))
@contract(numbits='blob', returns='list[int]')
def numbits_to_nums(numbits):
"""Convert a numbits into a list of numbers.
Arguments:
numbits: a binary blob, the packed number set.
Returns:
A list of ints.
When registered as a SQLite function by :func:`register_sqlite_functions`,
this returns a string, a JSON-encoded list of ints.
"""
nums = []
for byte_i, byte in enumerate(numbits):
for bit_i in range(8):
if (byte & (1 << bit_i)):
nums.append(byte_i * 8 + bit_i)
return nums
@contract(numbits1='blob', numbits2='blob', returns='blob')
def numbits_union(numbits1, numbits2):
"""Compute the union of two numbits.
Returns:
A new numbits, the union of `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
return _to_blob(bytes(b1 | b2 for b1, b2 in byte_pairs))
@contract(numbits1='blob', numbits2='blob', returns='blob')
def numbits_intersection(numbits1, numbits2):
"""Compute the intersection of two numbits.
Returns:
A new numbits, the intersection `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs)
return _to_blob(intersection_bytes.rstrip(b'\0'))
@contract(numbits1='blob', numbits2='blob', returns='bool')
def numbits_any_intersection(numbits1, numbits2):
"""Is there any number that appears in both numbits?
Determine whether two number sets have a non-empty intersection. This is
faster than computing the intersection.
Returns:
A bool, True if there is any number in both `numbits1` and `numbits2`.
"""
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
return any(b1 & b2 for b1, b2 in byte_pairs)
@contract(num='int', numbits='blob', returns='bool')
def num_in_numbits(num, numbits):
"""Does the integer `num` appear in `numbits`?
Returns:
A bool, True if `num` is a member of `numbits`.
"""
nbyte, nbit = divmod(num, 8)
if nbyte >= len(numbits):
return False
return bool(numbits[nbyte] & (1 << nbit))
def register_sqlite_functions(connection):
"""
Define numbits functions in a SQLite connection.
This defines these functions for use in SQLite statements:
* :func:`numbits_union`
* :func:`numbits_intersection`
* :func:`numbits_any_intersection`
* :func:`num_in_numbits`
* :func:`numbits_to_nums`
`connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
object. After creating the connection, pass it to this function to
register the numbits functions. Then you can use numbits functions in your
queries::
import sqlite3
from coverage.numbits import register_sqlite_functions
conn = sqlite3.connect('example.db')
register_sqlite_functions(conn)
c = conn.cursor()
# Kind of a nonsense query: find all the files and contexts that
# executed line 47 in any file:
c.execute(
"select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
(47,)
)
"""
connection.create_function("numbits_union", 2, numbits_union)
connection.create_function("numbits_intersection", 2, numbits_intersection)
connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
connection.create_function("num_in_numbits", 2, num_in_numbits)
connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
| 4,995 | Python | 30.821656 | 94 | 0.669469 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/execfile.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Execute files of Python code."""
import importlib.machinery
import importlib.util
import inspect
import marshal
import os
import struct
import sys
import types
from coverage import env
from coverage.exceptions import CoverageException, ExceptionDuringRun, NoCode, NoSource
from coverage.files import canonical_filename, python_reported_file
from coverage.misc import isolate_module
from coverage.phystokens import compile_unicode
from coverage.python import get_python_source
os = isolate_module(os)
PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
class DummyLoader:
"""A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
Currently only implements the .fullname attribute
"""
def __init__(self, fullname, *_args):
self.fullname = fullname
def find_module(modulename):
"""Find the module named `modulename`.
Returns the file path of the module, the name of the enclosing
package, and the spec.
"""
try:
spec = importlib.util.find_spec(modulename)
except ImportError as err:
raise NoSource(str(err)) from err
if not spec:
raise NoSource(f"No module named {modulename!r}")
pathname = spec.origin
packagename = spec.name
if spec.submodule_search_locations:
mod_main = modulename + ".__main__"
spec = importlib.util.find_spec(mod_main)
if not spec:
raise NoSource(
f"No module named {mod_main}; " +
f"{modulename!r} is a package and cannot be directly executed"
)
pathname = spec.origin
packagename = spec.name
packagename = packagename.rpartition(".")[0]
return pathname, packagename, spec
class PyRunner:
"""Multi-stage execution of Python code.
This is meant to emulate real Python execution as closely as possible.
"""
def __init__(self, args, as_module=False):
self.args = args
self.as_module = as_module
self.arg0 = args[0]
self.package = self.modulename = self.pathname = self.loader = self.spec = None
def prepare(self):
"""Set sys.path properly.
This needs to happen before any importing, and without importing anything.
"""
if self.as_module:
if env.PYBEHAVIOR.actual_syspath0_dash_m:
path0 = os.getcwd()
else:
path0 = ""
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
path0 = self.arg0
else:
path0 = os.path.abspath(os.path.dirname(self.arg0))
if os.path.isdir(sys.path[0]):
# sys.path fakery. If we are being run as a command, then sys.path[0]
# is the directory of the "coverage" script. If this is so, replace
# sys.path[0] with the directory of the file we're running, or the
# current directory when running modules. If it isn't so, then we
# don't know what's going on, and just leave it alone.
top_file = inspect.stack()[-1][0].f_code.co_filename
sys_path_0_abs = os.path.abspath(sys.path[0])
top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
sys_path_0_abs = canonical_filename(sys_path_0_abs)
top_file_dir_abs = canonical_filename(top_file_dir_abs)
if sys_path_0_abs != top_file_dir_abs:
path0 = None
else:
# sys.path[0] is a file. Is the next entry the directory containing
# that file?
if sys.path[1] == os.path.dirname(sys.path[0]):
# Can it be right to always remove that?
del sys.path[1]
if path0 is not None:
sys.path[0] = python_reported_file(path0)
def _prepare2(self):
"""Do more preparation to run Python code.
Includes finding the module to run and adjusting sys.argv[0].
This method is allowed to import code.
"""
if self.as_module:
self.modulename = self.arg0
pathname, self.package, self.spec = find_module(self.modulename)
if self.spec is not None:
self.modulename = self.spec.name
self.loader = DummyLoader(self.modulename)
self.pathname = os.path.abspath(pathname)
self.args[0] = self.arg0 = self.pathname
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
for ext in [".py", ".pyc", ".pyo"]:
try_filename = os.path.join(self.arg0, "__main__" + ext)
# 3.8.10 changed how files are reported when running a
# directory. But I'm not sure how far this change is going to
# spread, so I'll just hard-code it here for now.
if env.PYVERSION >= (3, 8, 10):
try_filename = os.path.abspath(try_filename)
if os.path.exists(try_filename):
self.arg0 = try_filename
break
else:
raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
# Make a spec. I don't know if this is the right way to do it.
try_filename = python_reported_file(try_filename)
self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
self.spec.has_location = True
self.package = ""
self.loader = DummyLoader("__main__")
else:
self.loader = DummyLoader("__main__")
self.arg0 = python_reported_file(self.arg0)
def run(self):
"""Run the Python code!"""
self._prepare2()
# Create a module to serve as __main__
main_mod = types.ModuleType('__main__')
from_pyc = self.arg0.endswith((".pyc", ".pyo"))
main_mod.__file__ = self.arg0
if from_pyc:
main_mod.__file__ = main_mod.__file__[:-1]
if self.package is not None:
main_mod.__package__ = self.package
main_mod.__loader__ = self.loader
if self.spec is not None:
main_mod.__spec__ = self.spec
main_mod.__builtins__ = sys.modules['builtins']
sys.modules['__main__'] = main_mod
# Set sys.argv properly.
sys.argv = self.args
try:
# Make a code object somehow.
if from_pyc:
code = make_code_from_pyc(self.arg0)
else:
code = make_code_from_py(self.arg0)
except CoverageException:
raise
except Exception as exc:
msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}"
raise CoverageException(msg) from exc
# Execute the code object.
# Return to the original directory in case the test code exits in
# a non-existent directory.
cwd = os.getcwd()
try:
exec(code, main_mod.__dict__)
except SystemExit: # pylint: disable=try-except-raise
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except Exception:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel one layer off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
# PyPy3 weirdness. If I don't access __context__, then somehow it
# is non-None when the exception is reported at the upper layer,
# and a nested exception is shown to the user. This getattr fixes
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
getattr(err, '__context__', None)
# Call the excepthook.
try:
err.__traceback__ = err.__traceback__.tb_next
sys.excepthook(typ, err, tb.tb_next)
except SystemExit: # pylint: disable=try-except-raise
raise
except Exception as exc:
# Getting the output right in the case of excepthook
# shenanigans is kind of involved.
sys.stderr.write("Error in sys.excepthook:\n")
typ2, err2, tb2 = sys.exc_info()
err2.__suppress_context__ = True
err2.__traceback__ = err2.__traceback__.tb_next
sys.__excepthook__(typ2, err2, tb2.tb_next)
sys.stderr.write("\nOriginal exception was:\n")
raise ExceptionDuringRun(typ, err, tb.tb_next) from exc
else:
sys.exit(1)
finally:
os.chdir(cwd)
def run_python_module(args):
"""Run a Python module, as though with ``python -m name args...``.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
This is a helper for tests, to encapsulate how to use PyRunner.
"""
runner = PyRunner(args, as_module=True)
runner.prepare()
runner.run()
def run_python_file(args):
"""Run a Python file as if it were the main program on the command line.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
This is a helper for tests, to encapsulate how to use PyRunner.
"""
runner = PyRunner(args, as_module=False)
runner.prepare()
runner.run()
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source = get_python_source(filename)
except (OSError, NoSource) as exc:
raise NoSource(f"No file to run: '{filename}'") from exc
code = compile_unicode(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except OSError as exc:
raise NoCode(f"No file to run: '{filename}'") from exc
with fpyc:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != PYC_MAGIC_NUMBER:
raise NoCode(f"Bad magic number in .pyc file: {magic} != {PYC_MAGIC_NUMBER}")
date_based = True
if env.PYBEHAVIOR.hashed_pyc_pep552:
flags = struct.unpack('<L', fpyc.read(4))[0]
hash_based = flags & 0x01
if hash_based:
fpyc.read(8) # Skip the hash.
date_based = False
if date_based:
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
return code
| 11,487 | Python | 35.469841 | 95 | 0.581527 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/coverage/collector.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Raw data collector for coverage.py."""
import os
import sys
from coverage import env
from coverage.debug import short_stack
from coverage.disposition import FileDisposition
from coverage.exceptions import CoverageException
from coverage.misc import human_sorted, isolate_module
from coverage.pytracer import PyTracer
os = isolate_module(os)
try:
# Use the C extension code when we can, for speed.
from coverage.tracer import CTracer, CFileDisposition
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
if os.getenv('COVERAGE_TEST_TRACER') == 'c': # pragma: part covered
# During testing, we use the COVERAGE_TEST_TRACER environment variable
# to indicate that we've fiddled with the environment to test this
# fallback code. If we thought we had a C tracer, but couldn't import
# it, then exit quickly and clearly instead of dribbling confusing
# errors. I'm using sys.exit here instead of an exception because an
# exception here causes all sorts of other noise in unittest.
sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
sys.exit(1)
CTracer = None
class Collector:
"""Collects trace data.
Creates a Tracer object for each thread, since they track stack
information. Each Tracer points to the same shared data, contributing
traced data points.
When the Collector is started, it creates a Tracer for the current thread,
and installs a function to create Tracers for each new thread started.
When the Collector is stopped, all active Tracers are stopped.
Threads started while the Collector is stopped will never have Tracers
associated with them.
"""
# The stack of active Collectors. Collectors are added here when started,
# and popped when stopped. Collectors on the stack are paused when not
# the top, and resumed when they become the top again.
_collectors = []
# The concurrency settings we support here.
SUPPORTED_CONCURRENCIES = {"greenlet", "eventlet", "gevent", "thread"}
def __init__(
self, should_trace, check_include, should_start_context, file_mapper,
timid, branch, warn, concurrency,
):
"""Create a collector.
`should_trace` is a function, taking a file name and a frame, and
returning a `coverage.FileDisposition object`.
`check_include` is a function taking a file name and a frame. It returns
a boolean: True if the file should be traced, False if not.
`should_start_context` is a function taking a frame, and returning a
string. If the frame should be the start of a new context, the string
is the new context. If the frame should not be the start of a new
context, return None.
`file_mapper` is a function taking a filename, and returning a Unicode
filename. The result is the name that will be recorded in the data
file.
If `timid` is true, then a slower simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions make the faster more sophisticated trace function not
operate properly.
If `branch` is true, then branches will be measured. This involves
collecting data on which statements followed each other (arcs). Use
`get_arc_data` to get the arc data.
`warn` is a warning function, taking a single string message argument
and an optional slug argument which will be a string or None, to be
used if a warning needs to be issued.
`concurrency` is a list of strings indicating the concurrency libraries
in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
(the default). Of these four values, only one can be supplied. Other
values are ignored.
"""
self.should_trace = should_trace
self.check_include = check_include
self.should_start_context = should_start_context
self.file_mapper = file_mapper
self.warn = warn
self.branch = branch
self.threading = None
self.covdata = None
self.static_context = None
self.origin = short_stack()
self.concur_id_func = None
self.mapped_file_cache = {}
# We can handle a few concurrency options here, but only one at a time.
these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
if len(these_concurrencies) > 1:
raise CoverageException(f"Conflicting concurrency settings: {concurrency}")
self.concurrency = these_concurrencies.pop() if these_concurrencies else ''
try:
if self.concurrency == "greenlet":
import greenlet
self.concur_id_func = greenlet.getcurrent
elif self.concurrency == "eventlet":
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
self.concur_id_func = eventlet.greenthread.getcurrent
elif self.concurrency == "gevent":
import gevent # pylint: disable=import-error,useless-suppression
self.concur_id_func = gevent.getcurrent
elif self.concurrency == "thread" or not self.concurrency:
# It's important to import threading only if we need it. If
# it's imported early, and the program being measured uses
# gevent, then gevent's monkey-patching won't work properly.
import threading
self.threading = threading
else:
raise CoverageException(f"Don't understand concurrency={concurrency}")
except ImportError as ex:
raise CoverageException(
"Couldn't trace with concurrency={}, the module isn't installed.".format(
self.concurrency,
)
) from ex
self.reset()
if timid:
# Being timid: use the simple Python trace function.
self._trace_class = PyTracer
else:
# Being fast: use the C Tracer if it is available, else the Python
# trace function.
self._trace_class = CTracer or PyTracer
if self._trace_class is CTracer:
self.file_disposition_class = CFileDisposition
self.supports_plugins = True
self.packed_arcs = True
else:
self.file_disposition_class = FileDisposition
self.supports_plugins = False
self.packed_arcs = False
def __repr__(self):
return f"<Collector at 0x{id(self):x}: {self.tracer_name()}>"
def use_data(self, covdata, context):
"""Use `covdata` for recording data."""
self.covdata = covdata
self.static_context = context
self.covdata.set_context(self.static_context)
def tracer_name(self):
"""Return the class name of the tracer we're using."""
return self._trace_class.__name__
def _clear_data(self):
"""Clear out existing data, but stay ready for more collection."""
# We used to used self.data.clear(), but that would remove filename
# keys and data values that were still in use higher up the stack
# when we are called as part of switch_context.
for d in self.data.values():
d.clear()
for tracer in self.tracers:
tracer.reset_activity()
def reset(self):
"""Clear collected data, and prepare to collect more."""
# A dictionary mapping file names to dicts with line number keys (if not
# branch coverage), or mapping file names to dicts with line number
# pairs as keys (if branch coverage).
self.data = {}
# A dictionary mapping file names to file tracer plugin names that will
# handle them.
self.file_tracers = {}
self.disabled_plugins = set()
# The .should_trace_cache attribute is a cache from file names to
# coverage.FileDisposition objects, or None. When a file is first
# considered for tracing, a FileDisposition is obtained from
# Coverage.should_trace. Its .trace attribute indicates whether the
# file should be traced or not. If it should be, a plugin with dynamic
# file names can decide not to trace it based on the dynamic file name
# being excluded by the inclusion rules, in which case the
# FileDisposition will be replaced by None in the cache.
if env.PYPY:
import __pypy__ # pylint: disable=import-error
# Alex Gaynor said:
# should_trace_cache is a strictly growing key: once a key is in
# it, it never changes. Further, the keys used to access it are
# generally constant, given sufficient context. That is to say, at
# any given point _trace() is called, pypy is able to know the key.
# This is because the key is determined by the physical source code
# line, and that's invariant with the call site.
#
# This property of a dict with immutable keys, combined with
# call-site-constant keys is a match for PyPy's module dict,
# which is optimized for such workloads.
#
# This gives a 20% benefit on the workload described at
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
self.should_trace_cache = __pypy__.newdict("module")
else:
self.should_trace_cache = {}
# Our active Tracers.
self.tracers = []
self._clear_data()
def _start_tracer(self):
"""Start a new Tracer object, and store it in self.tracers."""
tracer = self._trace_class()
tracer.data = self.data
tracer.trace_arcs = self.branch
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
if hasattr(tracer, 'concur_id_func'):
tracer.concur_id_func = self.concur_id_func
elif self.concur_id_func:
raise CoverageException(
"Can't support concurrency={} with {}, only threads are supported".format(
self.concurrency, self.tracer_name(),
)
)
if hasattr(tracer, 'file_tracers'):
tracer.file_tracers = self.file_tracers
if hasattr(tracer, 'threading'):
tracer.threading = self.threading
if hasattr(tracer, 'check_include'):
tracer.check_include = self.check_include
if hasattr(tracer, 'should_start_context'):
tracer.should_start_context = self.should_start_context
tracer.switch_context = self.switch_context
if hasattr(tracer, 'disable_plugin'):
tracer.disable_plugin = self.disable_plugin
fn = tracer.start()
self.tracers.append(tracer)
return fn
# The trace function has to be set individually on each thread before
# execution begins. Ironically, the only support the threading module has
# for running code before the thread main is the tracing function. So we
# install this as a trace function, and the first time it's called, it does
# the real trace installation.
def _installation_trace(self, frame, event, arg):
"""Called on new threads, installs the real tracer."""
# Remove ourselves as the trace function.
sys.settrace(None)
# Install the real tracer.
fn = self._start_tracer()
# Invoke the real trace function with the current event, to be sure
# not to lose an event.
if fn:
fn = fn(frame, event, arg)
# Return the new trace function to continue tracing in this scope.
return fn
def start(self):
"""Start collecting trace information."""
if self._collectors:
self._collectors[-1].pause()
self.tracers = []
# Check to see whether we had a fullcoverage tracer installed. If so,
# get the stack frames it stashed away for us.
traces0 = []
fn0 = sys.gettrace()
if fn0:
tracer0 = getattr(fn0, '__self__', None)
if tracer0:
traces0 = getattr(tracer0, 'traces', [])
try:
# Install the tracer on this thread.
fn = self._start_tracer()
except:
if self._collectors:
self._collectors[-1].resume()
raise
# If _start_tracer succeeded, then we add ourselves to the global
# stack of collectors.
self._collectors.append(self)
# Replay all the events from fullcoverage into the new trace function.
for args in traces0:
(frame, event, arg), lineno = args
try:
fn(frame, event, arg, lineno=lineno)
except TypeError as ex:
raise Exception("fullcoverage must be run with the C trace function.") from ex
# Install our installation tracer in threading, to jump-start other
# threads.
if self.threading:
self.threading.settrace(self._installation_trace)
def stop(self):
"""Stop collecting trace information."""
assert self._collectors
if self._collectors[-1] is not self:
print("self._collectors:")
for c in self._collectors:
print(f" {c!r}\n{c.origin}")
assert self._collectors[-1] is self, (
f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}"
)
self.pause()
# Remove this Collector from the stack, and resume the one underneath
# (if any).
self._collectors.pop()
if self._collectors:
self._collectors[-1].resume()
def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print("\nCoverage.py tracer stats:")
for k in human_sorted(stats.keys()):
print(f"{k:>20}: {stats[k]}")
if self.threading:
self.threading.settrace(None)
def resume(self):
"""Resume tracing after a `pause`."""
for tracer in self.tracers:
tracer.start()
if self.threading:
self.threading.settrace(self._installation_trace)
else:
self._start_tracer()
def _activity(self):
"""Has any activity been traced?
Returns a boolean, True if any trace function was invoked.
"""
return any(tracer.activity() for tracer in self.tracers)
def switch_context(self, new_context):
"""Switch to a new dynamic context."""
self.flush_data()
if self.static_context:
context = self.static_context
if new_context:
context += "|" + new_context
else:
context = new_context
self.covdata.set_context(context)
def disable_plugin(self, disposition):
"""Disable the plugin mentioned in `disposition`."""
file_tracer = disposition.file_tracer
plugin = file_tracer._coverage_plugin
plugin_name = plugin._coverage_plugin_name
self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception")
plugin._coverage_enabled = False
disposition.trace = False
def cached_mapped_file(self, filename):
"""A locally cached version of file names mapped through file_mapper."""
key = (type(filename), filename)
try:
return self.mapped_file_cache[key]
except KeyError:
return self.mapped_file_cache.setdefault(key, self.file_mapper(filename))
def mapped_file_dict(self, d):
"""Return a dict like d, but with keys modified by file_mapper."""
# The call to list(items()) ensures that the GIL protects the dictionary
# iterator against concurrent modifications by tracers running
# in other threads. We try three times in case of concurrent
# access, hoping to get a clean copy.
runtime_err = None
for _ in range(3): # pragma: part covered
try:
items = list(d.items())
except RuntimeError as ex: # pragma: cant happen
runtime_err = ex
else:
break
else:
raise runtime_err # pragma: cant happen
return {self.cached_mapped_file(k): v for k, v in items if v}
def plugin_was_disabled(self, plugin):
"""Record that `plugin` was disabled during the run."""
self.disabled_plugins.add(plugin._coverage_plugin_name)
def flush_data(self):
"""Save the collected data to our associated `CoverageData`.
Data may have also been saved along the way. This forces the
last of the data to be saved.
Returns True if there was data to save, False if not.
"""
if not self._activity():
return False
if self.branch:
if self.packed_arcs:
# Unpack the line number pairs packed into integers. See
# tracer.c:CTracer_record_pair for the C code that creates
# these packed ints.
data = {}
for fname, packeds in self.data.items():
tuples = []
for packed in packeds:
l1 = packed & 0xFFFFF
l2 = (packed & (0xFFFFF << 20)) >> 20
if packed & (1 << 40):
l1 *= -1
if packed & (1 << 41):
l2 *= -1
tuples.append((l1, l2))
data[fname] = tuples
else:
data = self.data
self.covdata.add_arcs(self.mapped_file_dict(data))
else:
self.covdata.add_lines(self.mapped_file_dict(self.data))
file_tracers = {
k: v for k, v in self.file_tracers.items()
if v not in self.disabled_plugins
}
self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers))
self._clear_data()
return True
| 18,826 | Python | 38.887712 | 98 | 0.60358 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.