file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/_queue.py | """
A fork of Python 3.6's stdlib queue with Lock swapped out for RLock to avoid a
deadlock while garbage collecting.
See
https://codewithoutrules.com/2017/08/16/concurrency-python/
https://bugs.python.org/issue14976
https://github.com/sqlalchemy/sqlalchemy/blob/4eb747b61f0c1b1c25bdee3856d7195d10a0c227/lib/sqlalchemy/queue.py#L1
We also vendor the code to evade eventlet's broken monkeypatching, see
https://github.com/getsentry/sentry-python/pull/484
"""
import threading
from collections import deque
from time import time
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
__all__ = ["EmptyError", "FullError", "Queue"]
class EmptyError(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class FullError(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue(object):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError("task_done() called too many times")
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
with self.mutex:
return self._qsize()
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
"""
with self.mutex:
return not self._qsize()
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
"""
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the FullError exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the FullError exception ('timeout'
is ignored in that case).
"""
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise FullError()
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise FullError()
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the EmptyError exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the EmptyError exception ('timeout' is ignored
in that case).
"""
with self.not_empty:
if not block:
if not self._qsize():
raise EmptyError()
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise EmptyError()
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the FullError exception.
"""
return self.put(item, block=False)
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the EmptyError exception.
"""
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque() # type: Any
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| 8,475 | Python | 36.175438 | 113 | 0.609204 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/serializer.py | import sys
import math
from datetime import datetime
from sentry_sdk.utils import (
AnnotatedValue,
capture_internal_exception,
disable_capture_event,
format_timestamp,
json_dumps,
safe_repr,
strip_string,
)
import sentry_sdk.utils
from sentry_sdk._compat import (
text_type,
PY2,
string_types,
number_types,
iteritems,
binary_sequence_types,
)
from sentry_sdk._types import MYPY
if MYPY:
from datetime import timedelta
from types import TracebackType
from typing import Any
from typing import Callable
from typing import ContextManager
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from sentry_sdk._types import NotImplementedType, Event
Span = Dict[str, Any]
ReprProcessor = Callable[[Any, Dict[str, Any]], Union[NotImplementedType, str]]
Segment = Union[str, int]
if PY2:
# Importing ABCs from collections is deprecated, and will stop working in 3.8
# https://github.com/python/cpython/blob/master/Lib/collections/__init__.py#L49
from collections import Mapping, Sequence, Set
serializable_str_types = string_types + binary_sequence_types
else:
# New in 3.3
# https://docs.python.org/3/library/collections.abc.html
from collections.abc import Mapping, Sequence, Set
# Bytes are technically not strings in Python 3, but we can serialize them
serializable_str_types = string_types + binary_sequence_types
# Maximum length of JSON-serialized event payloads that can be safely sent
# before the server may reject the event due to its size. This is not intended
# to reflect actual values defined server-side, but rather only be an upper
# bound for events sent by the SDK.
#
# Can be overwritten if wanting to send more bytes, e.g. with a custom server.
# When changing this, keep in mind that events may be a little bit larger than
# this value due to attached metadata, so keep the number conservative.
MAX_EVENT_BYTES = 10**6
MAX_DATABAG_DEPTH = 5
MAX_DATABAG_BREADTH = 10
CYCLE_MARKER = "<cyclic>"
global_repr_processors = [] # type: List[ReprProcessor]
def add_global_repr_processor(processor):
# type: (ReprProcessor) -> None
global_repr_processors.append(processor)
class Memo(object):
__slots__ = ("_ids", "_objs")
def __init__(self):
# type: () -> None
self._ids = {} # type: Dict[int, Any]
self._objs = [] # type: List[Any]
def memoize(self, obj):
# type: (Any) -> ContextManager[bool]
self._objs.append(obj)
return self
def __enter__(self):
# type: () -> bool
obj = self._objs[-1]
if id(obj) in self._ids:
return True
else:
self._ids[id(obj)] = obj
return False
def __exit__(
self,
ty, # type: Optional[Type[BaseException]]
value, # type: Optional[BaseException]
tb, # type: Optional[TracebackType]
):
# type: (...) -> None
self._ids.pop(id(self._objs.pop()), None)
def serialize(event, smart_transaction_trimming=False, **kwargs):
# type: (Event, bool, **Any) -> Event
memo = Memo()
path = [] # type: List[Segment]
meta_stack = [] # type: List[Dict[str, Any]]
span_description_bytes = [] # type: List[int]
def _annotate(**meta):
# type: (**Any) -> None
while len(meta_stack) <= len(path):
try:
segment = path[len(meta_stack) - 1]
node = meta_stack[-1].setdefault(text_type(segment), {})
except IndexError:
node = {}
meta_stack.append(node)
meta_stack[-1].setdefault("", {}).update(meta)
def _should_repr_strings():
# type: () -> Optional[bool]
"""
By default non-serializable objects are going through
safe_repr(). For certain places in the event (local vars) we
want to repr() even things that are JSON-serializable to
make their type more apparent. For example, it's useful to
see the difference between a unicode-string and a bytestring
when viewing a stacktrace.
For container-types we still don't do anything different.
Generally we just try to make the Sentry UI present exactly
what a pretty-printed repr would look like.
:returns: `True` if we are somewhere in frame variables, and `False` if
we are in a position where we will never encounter frame variables
when recursing (for example, we're in `event.extra`). `None` if we
are not (yet) in frame variables, but might encounter them when
recursing (e.g. we're in `event.exception`)
"""
try:
p0 = path[0]
if p0 == "stacktrace" and path[1] == "frames" and path[3] == "vars":
return True
if (
p0 in ("threads", "exception")
and path[1] == "values"
and path[3] == "stacktrace"
and path[4] == "frames"
and path[6] == "vars"
):
return True
except IndexError:
return None
return False
def _is_databag():
# type: () -> Optional[bool]
"""
A databag is any value that we need to trim.
:returns: Works like `_should_repr_strings()`. `True` for "yes",
`False` for :"no", `None` for "maybe soon".
"""
try:
rv = _should_repr_strings()
if rv in (True, None):
return rv
p0 = path[0]
if p0 == "request" and path[1] == "data":
return True
if p0 == "breadcrumbs" and path[1] == "values":
path[2]
return True
if p0 == "extra":
return True
except IndexError:
return None
return False
def _serialize_node(
obj, # type: Any
is_databag=None, # type: Optional[bool]
should_repr_strings=None, # type: Optional[bool]
segment=None, # type: Optional[Segment]
remaining_breadth=None, # type: Optional[int]
remaining_depth=None, # type: Optional[int]
):
# type: (...) -> Any
if segment is not None:
path.append(segment)
try:
with memo.memoize(obj) as result:
if result:
return CYCLE_MARKER
return _serialize_node_impl(
obj,
is_databag=is_databag,
should_repr_strings=should_repr_strings,
remaining_depth=remaining_depth,
remaining_breadth=remaining_breadth,
)
except BaseException:
capture_internal_exception(sys.exc_info())
if is_databag:
return "<failed to serialize, use init(debug=True) to see error logs>"
return None
finally:
if segment is not None:
path.pop()
del meta_stack[len(path) + 1 :]
def _flatten_annotated(obj):
# type: (Any) -> Any
if isinstance(obj, AnnotatedValue):
_annotate(**obj.metadata)
obj = obj.value
return obj
def _serialize_node_impl(
obj, is_databag, should_repr_strings, remaining_depth, remaining_breadth
):
# type: (Any, Optional[bool], Optional[bool], Optional[int], Optional[int]) -> Any
if should_repr_strings is None:
should_repr_strings = _should_repr_strings()
if is_databag is None:
is_databag = _is_databag()
if is_databag and remaining_depth is None:
remaining_depth = MAX_DATABAG_DEPTH
if is_databag and remaining_breadth is None:
remaining_breadth = MAX_DATABAG_BREADTH
obj = _flatten_annotated(obj)
if remaining_depth is not None and remaining_depth <= 0:
_annotate(rem=[["!limit", "x"]])
if is_databag:
return _flatten_annotated(strip_string(safe_repr(obj)))
return None
if is_databag and global_repr_processors:
hints = {"memo": memo, "remaining_depth": remaining_depth}
for processor in global_repr_processors:
result = processor(obj, hints)
if result is not NotImplemented:
return _flatten_annotated(result)
sentry_repr = getattr(type(obj), "__sentry_repr__", None)
if obj is None or isinstance(obj, (bool, number_types)):
if should_repr_strings or (
isinstance(obj, float) and (math.isinf(obj) or math.isnan(obj))
):
return safe_repr(obj)
else:
return obj
elif callable(sentry_repr):
return sentry_repr(obj)
elif isinstance(obj, datetime):
return (
text_type(format_timestamp(obj))
if not should_repr_strings
else safe_repr(obj)
)
elif isinstance(obj, Mapping):
# Create temporary copy here to avoid calling too much code that
# might mutate our dictionary while we're still iterating over it.
obj = dict(iteritems(obj))
rv_dict = {} # type: Dict[str, Any]
i = 0
for k, v in iteritems(obj):
if remaining_breadth is not None and i >= remaining_breadth:
_annotate(len=len(obj))
break
str_k = text_type(k)
v = _serialize_node(
v,
segment=str_k,
should_repr_strings=should_repr_strings,
is_databag=is_databag,
remaining_depth=remaining_depth - 1
if remaining_depth is not None
else None,
remaining_breadth=remaining_breadth,
)
rv_dict[str_k] = v
i += 1
return rv_dict
elif not isinstance(obj, serializable_str_types) and isinstance(
obj, (Set, Sequence)
):
rv_list = []
for i, v in enumerate(obj):
if remaining_breadth is not None and i >= remaining_breadth:
_annotate(len=len(obj))
break
rv_list.append(
_serialize_node(
v,
segment=i,
should_repr_strings=should_repr_strings,
is_databag=is_databag,
remaining_depth=remaining_depth - 1
if remaining_depth is not None
else None,
remaining_breadth=remaining_breadth,
)
)
return rv_list
if should_repr_strings:
obj = safe_repr(obj)
else:
if isinstance(obj, bytes) or isinstance(obj, bytearray):
obj = obj.decode("utf-8", "replace")
if not isinstance(obj, string_types):
obj = safe_repr(obj)
# Allow span descriptions to be longer than other strings.
#
# For database auto-instrumented spans, the description contains
# potentially long SQL queries that are most useful when not truncated.
# Because arbitrarily large events may be discarded by the server as a
# protection mechanism, we dynamically limit the description length
# later in _truncate_span_descriptions.
if (
smart_transaction_trimming
and len(path) == 3
and path[0] == "spans"
and path[-1] == "description"
):
span_description_bytes.append(len(obj))
return obj
return _flatten_annotated(strip_string(obj))
def _truncate_span_descriptions(serialized_event, event, excess_bytes):
# type: (Event, Event, int) -> None
"""
Modifies serialized_event in-place trying to remove excess_bytes from
span descriptions. The original event is used read-only to access the
span timestamps (represented as RFC3399-formatted strings in
serialized_event).
It uses heuristics to prioritize preserving the description of spans
that might be the most interesting ones in terms of understanding and
optimizing performance.
"""
# When truncating a description, preserve a small prefix.
min_length = 10
def shortest_duration_longest_description_first(args):
# type: (Tuple[int, Span]) -> Tuple[timedelta, int]
i, serialized_span = args
span = event["spans"][i]
now = datetime.utcnow()
start = span.get("start_timestamp") or now
end = span.get("timestamp") or now
duration = end - start
description = serialized_span.get("description") or ""
return (duration, -len(description))
# Note: for simplicity we sort spans by exact duration and description
# length. If ever needed, we could have a more involved heuristic, e.g.
# replacing exact durations with "buckets" and/or looking at other span
# properties.
path.append("spans")
for i, span in sorted(
enumerate(serialized_event.get("spans") or []),
key=shortest_duration_longest_description_first,
):
description = span.get("description") or ""
if len(description) <= min_length:
continue
excess_bytes -= len(description) - min_length
path.extend([i, "description"])
# Note: the last time we call strip_string we could preserve a few
# more bytes up to a total length of MAX_EVENT_BYTES. Since that's
# not strictly required, we leave it out for now for simplicity.
span["description"] = _flatten_annotated(
strip_string(description, max_length=min_length)
)
del path[-2:]
del meta_stack[len(path) + 1 :]
if excess_bytes <= 0:
break
path.pop()
del meta_stack[len(path) + 1 :]
disable_capture_event.set(True)
try:
rv = _serialize_node(event, **kwargs)
if meta_stack and isinstance(rv, dict):
rv["_meta"] = meta_stack[0]
sum_span_description_bytes = sum(span_description_bytes)
if smart_transaction_trimming and sum_span_description_bytes > 0:
span_count = len(event.get("spans") or [])
# This is an upper bound of how many bytes all descriptions would
# consume if the usual string truncation in _serialize_node_impl
# would have taken place, not accounting for the metadata attached
# as event["_meta"].
descriptions_budget_bytes = span_count * sentry_sdk.utils.MAX_STRING_LENGTH
# If by not truncating descriptions we ended up with more bytes than
# per the usual string truncation, check if the event is too large
# and we need to truncate some descriptions.
#
# This is guarded with an if statement to avoid JSON-encoding the
# event unnecessarily.
if sum_span_description_bytes > descriptions_budget_bytes:
original_bytes = len(json_dumps(rv))
excess_bytes = original_bytes - MAX_EVENT_BYTES
if excess_bytes > 0:
# Event is too large, will likely be discarded by the
# server. Trim it down before sending.
_truncate_span_descriptions(rv, event, excess_bytes)
# Span descriptions truncated, set or reset _meta.
#
# We run the same code earlier because we want to account
# for _meta when calculating original_bytes, the number of
# bytes in the JSON-encoded event.
if meta_stack and isinstance(rv, dict):
rv["_meta"] = meta_stack[0]
return rv
finally:
disable_capture_event.set(False)
| 16,573 | Python | 33.819328 | 90 | 0.560792 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/_types.py | try:
from typing import TYPE_CHECKING as MYPY
except ImportError:
MYPY = False
if MYPY:
from types import TracebackType
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from typing_extensions import Literal
ExcInfo = Tuple[
Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]
]
Event = Dict[str, Any]
Hint = Dict[str, Any]
Breadcrumb = Dict[str, Any]
BreadcrumbHint = Dict[str, Any]
SamplingContext = Dict[str, Any]
EventProcessor = Callable[[Event, Hint], Optional[Event]]
ErrorProcessor = Callable[[Event, ExcInfo], Optional[Event]]
BreadcrumbProcessor = Callable[[Breadcrumb, BreadcrumbHint], Optional[Breadcrumb]]
TransactionProcessor = Callable[[Event, Hint], Optional[Event]]
TracesSampler = Callable[[SamplingContext], Union[float, int, bool]]
# https://github.com/python/mypy/issues/5710
NotImplementedType = Any
EventDataCategory = Literal[
"default",
"error",
"crash",
"transaction",
"security",
"attachment",
"session",
"internal",
"profile",
]
SessionStatus = Literal["ok", "exited", "crashed", "abnormal"]
EndpointType = Literal["store", "envelope"]
DurationUnit = Literal[
"nanosecond",
"microsecond",
"millisecond",
"second",
"minute",
"hour",
"day",
"week",
]
InformationUnit = Literal[
"bit",
"byte",
"kilobyte",
"kibibyte",
"megabyte",
"mebibyte",
"gigabyte",
"gibibyte",
"terabyte",
"tebibyte",
"petabyte",
"pebibyte",
"exabyte",
"exbibyte",
]
FractionUnit = Literal["ratio", "percent"]
MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str]
| 2,045 | Python | 23.357143 | 87 | 0.602445 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/debug.py | import sys
import logging
from sentry_sdk import utils
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.client import _client_init_debug
from logging import LogRecord
class _HubBasedClientFilter(logging.Filter):
def filter(self, record):
# type: (LogRecord) -> bool
if _client_init_debug.get(False):
return True
hub = Hub.current
if hub is not None and hub.client is not None:
return hub.client.options["debug"]
return False
def init_debug_support():
# type: () -> None
if not logger.handlers:
configure_logger()
configure_debug_hub()
def configure_logger():
# type: () -> None
_handler = logging.StreamHandler(sys.stderr)
_handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s"))
logger.addHandler(_handler)
logger.setLevel(logging.DEBUG)
logger.addFilter(_HubBasedClientFilter())
def configure_debug_hub():
# type: () -> None
def _get_debug_hub():
# type: () -> Hub
return Hub.current
utils._get_debug_hub = _get_debug_hub
| 1,132 | Python | 24.177777 | 84 | 0.65106 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/_compat.py | import sys
from sentry_sdk._types import MYPY
if MYPY:
from typing import Optional
from typing import Tuple
from typing import Any
from typing import Type
from typing import TypeVar
T = TypeVar("T")
PY2 = sys.version_info[0] == 2
PY33 = sys.version_info[0] == 3 and sys.version_info[1] >= 3
PY37 = sys.version_info[0] == 3 and sys.version_info[1] >= 7
PY310 = sys.version_info[0] == 3 and sys.version_info[1] >= 10
PY311 = sys.version_info[0] == 3 and sys.version_info[1] >= 11
if PY2:
import urlparse
text_type = unicode # noqa
string_types = (str, text_type)
number_types = (int, long, float) # noqa
int_types = (int, long) # noqa
iteritems = lambda x: x.iteritems() # noqa: B301
binary_sequence_types = (bytearray, memoryview)
def implements_str(cls):
# type: (T) -> T
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: unicode(x).encode("utf-8") # noqa
return cls
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
else:
import urllib.parse as urlparse # noqa
text_type = str
string_types = (text_type,) # type: Tuple[type]
number_types = (int, float) # type: Tuple[type, type]
int_types = (int,)
iteritems = lambda x: x.items()
binary_sequence_types = (bytes, bytearray, memoryview)
def implements_str(x):
# type: (T) -> T
return x
def reraise(tp, value, tb=None):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[Any]) -> None
assert value is not None
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def with_metaclass(meta, *bases):
# type: (Any, *Any) -> Any
class MetaClass(type):
def __new__(metacls, name, this_bases, d):
# type: (Any, Any, Any, Any) -> Any
return meta(name, bases, d)
return type.__new__(MetaClass, "temporary_class", (), {})
def check_thread_support():
# type: () -> None
try:
from uwsgi import opt # type: ignore
except ImportError:
return
# When `threads` is passed in as a uwsgi option,
# `enable-threads` is implied on.
if "threads" in opt:
return
if str(opt.get("enable-threads", "0")).lower() in ("false", "off", "no", "0"):
from warnings import warn
warn(
Warning(
"We detected the use of uwsgi with disabled threads. "
"This will cause issues with the transport you are "
"trying to use. Please enable threading for uwsgi. "
'(Add the "enable-threads" flag).'
)
)
| 2,702 | Python | 27.15625 | 95 | 0.581421 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/scope.py | from copy import copy
from collections import deque
from itertools import chain
from sentry_sdk._functools import wraps
from sentry_sdk._types import MYPY
from sentry_sdk.utils import logger, capture_internal_exceptions
from sentry_sdk.tracing import Transaction
from sentry_sdk.attachments import Attachment
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
from typing import Deque
from typing import List
from typing import Callable
from typing import TypeVar
from sentry_sdk._types import (
Breadcrumb,
Event,
EventProcessor,
ErrorProcessor,
ExcInfo,
Hint,
Type,
)
from sentry_sdk.profiler import Profile
from sentry_sdk.tracing import Span
from sentry_sdk.session import Session
F = TypeVar("F", bound=Callable[..., Any])
T = TypeVar("T")
global_event_processors = [] # type: List[EventProcessor]
def add_global_event_processor(processor):
# type: (EventProcessor) -> None
global_event_processors.append(processor)
def _attr_setter(fn):
# type: (Any) -> Any
return property(fset=fn, doc=fn.__doc__)
def _disable_capture(fn):
# type: (F) -> F
@wraps(fn)
def wrapper(self, *args, **kwargs):
# type: (Any, *Dict[str, Any], **Any) -> Any
if not self._should_capture:
return
try:
self._should_capture = False
return fn(self, *args, **kwargs)
finally:
self._should_capture = True
return wrapper # type: ignore
class Scope(object):
"""The scope holds extra information that should be sent with all
events that belong to it.
"""
# NOTE: Even though it should not happen, the scope needs to not crash when
# accessed by multiple threads. It's fine if it's full of races, but those
# races should never make the user application crash.
#
# The same needs to hold for any accesses of the scope the SDK makes.
__slots__ = (
"_level",
"_name",
"_fingerprint",
# note that for legacy reasons, _transaction is the transaction *name*,
# not a Transaction object (the object is stored in _span)
"_transaction",
"_transaction_info",
"_user",
"_tags",
"_contexts",
"_extras",
"_breadcrumbs",
"_event_processors",
"_error_processors",
"_should_capture",
"_span",
"_session",
"_attachments",
"_force_auto_session_tracking",
"_profile",
)
def __init__(self):
# type: () -> None
self._event_processors = [] # type: List[EventProcessor]
self._error_processors = [] # type: List[ErrorProcessor]
self._name = None # type: Optional[str]
self.clear()
def clear(self):
# type: () -> None
"""Clears the entire scope."""
self._level = None # type: Optional[str]
self._fingerprint = None # type: Optional[List[str]]
self._transaction = None # type: Optional[str]
self._transaction_info = {} # type: Dict[str, str]
self._user = None # type: Optional[Dict[str, Any]]
self._tags = {} # type: Dict[str, Any]
self._contexts = {} # type: Dict[str, Dict[str, Any]]
self._extras = {} # type: Dict[str, Any]
self._attachments = [] # type: List[Attachment]
self.clear_breadcrumbs()
self._should_capture = True
self._span = None # type: Optional[Span]
self._session = None # type: Optional[Session]
self._force_auto_session_tracking = None # type: Optional[bool]
self._profile = None # type: Optional[Profile]
@_attr_setter
def level(self, value):
# type: (Optional[str]) -> None
"""When set this overrides the level. Deprecated in favor of set_level."""
self._level = value
def set_level(self, value):
# type: (Optional[str]) -> None
"""Sets the level for the scope."""
self._level = value
@_attr_setter
def fingerprint(self, value):
# type: (Optional[List[str]]) -> None
"""When set this overrides the default fingerprint."""
self._fingerprint = value
@property
def transaction(self):
# type: () -> Any
# would be type: () -> Optional[Transaction], see https://github.com/python/mypy/issues/3004
"""Return the transaction (root span) in the scope, if any."""
# there is no span/transaction on the scope
if self._span is None:
return None
# there is an orphan span on the scope
if self._span.containing_transaction is None:
return None
# there is either a transaction (which is its own containing
# transaction) or a non-orphan span on the scope
return self._span.containing_transaction
@transaction.setter
def transaction(self, value):
# type: (Any) -> None
# would be type: (Optional[str]) -> None, see https://github.com/python/mypy/issues/3004
"""When set this forces a specific transaction name to be set.
Deprecated: use set_transaction_name instead."""
# XXX: the docstring above is misleading. The implementation of
# apply_to_event prefers an existing value of event.transaction over
# anything set in the scope.
# XXX: note that with the introduction of the Scope.transaction getter,
# there is a semantic and type mismatch between getter and setter. The
# getter returns a Transaction, the setter sets a transaction name.
# Without breaking version compatibility, we could make the setter set a
# transaction name or transaction (self._span) depending on the type of
# the value argument.
logger.warning(
"Assigning to scope.transaction directly is deprecated: use scope.set_transaction_name() instead."
)
self._transaction = value
if self._span and self._span.containing_transaction:
self._span.containing_transaction.name = value
def set_transaction_name(self, name, source=None):
# type: (str, Optional[str]) -> None
"""Set the transaction name and optionally the transaction source."""
self._transaction = name
if self._span and self._span.containing_transaction:
self._span.containing_transaction.name = name
if source:
self._span.containing_transaction.source = source
if source:
self._transaction_info["source"] = source
@_attr_setter
def user(self, value):
# type: (Optional[Dict[str, Any]]) -> None
"""When set a specific user is bound to the scope. Deprecated in favor of set_user."""
self.set_user(value)
def set_user(self, value):
# type: (Optional[Dict[str, Any]]) -> None
"""Sets a user for the scope."""
self._user = value
if self._session is not None:
self._session.update(user=value)
@property
def span(self):
# type: () -> Optional[Span]
"""Get/set current tracing span or transaction."""
return self._span
@span.setter
def span(self, span):
# type: (Optional[Span]) -> None
self._span = span
# XXX: this differs from the implementation in JS, there Scope.setSpan
# does not set Scope._transactionName.
if isinstance(span, Transaction):
transaction = span
if transaction.name:
self._transaction = transaction.name
@property
def profile(self):
# type: () -> Optional[Profile]
return self._profile
@profile.setter
def profile(self, profile):
# type: (Optional[Profile]) -> None
self._profile = profile
def set_tag(
self,
key, # type: str
value, # type: Any
):
# type: (...) -> None
"""Sets a tag for a key to a specific value."""
self._tags[key] = value
def remove_tag(
self, key # type: str
):
# type: (...) -> None
"""Removes a specific tag."""
self._tags.pop(key, None)
def set_context(
self,
key, # type: str
value, # type: Dict[str, Any]
):
# type: (...) -> None
"""Binds a context at a certain key to a specific value."""
self._contexts[key] = value
def remove_context(
self, key # type: str
):
# type: (...) -> None
"""Removes a context."""
self._contexts.pop(key, None)
def set_extra(
self,
key, # type: str
value, # type: Any
):
# type: (...) -> None
"""Sets an extra key to a specific value."""
self._extras[key] = value
def remove_extra(
self, key # type: str
):
# type: (...) -> None
"""Removes a specific extra key."""
self._extras.pop(key, None)
def clear_breadcrumbs(self):
# type: () -> None
"""Clears breadcrumb buffer."""
self._breadcrumbs = deque() # type: Deque[Breadcrumb]
def add_attachment(
self,
bytes=None, # type: Optional[bytes]
filename=None, # type: Optional[str]
path=None, # type: Optional[str]
content_type=None, # type: Optional[str]
add_to_transactions=False, # type: bool
):
# type: (...) -> None
"""Adds an attachment to future events sent."""
self._attachments.append(
Attachment(
bytes=bytes,
path=path,
filename=filename,
content_type=content_type,
add_to_transactions=add_to_transactions,
)
)
def add_event_processor(
self, func # type: EventProcessor
):
# type: (...) -> None
"""Register a scope local event processor on the scope.
:param func: This function behaves like `before_send.`
"""
if len(self._event_processors) > 20:
logger.warning(
"Too many event processors on scope! Clearing list to free up some memory: %r",
self._event_processors,
)
del self._event_processors[:]
self._event_processors.append(func)
def add_error_processor(
self,
func, # type: ErrorProcessor
cls=None, # type: Optional[Type[BaseException]]
):
# type: (...) -> None
"""Register a scope local error processor on the scope.
:param func: A callback that works similar to an event processor but is invoked with the original exception info triple as second argument.
:param cls: Optionally, only process exceptions of this type.
"""
if cls is not None:
cls_ = cls # For mypy.
real_func = func
def func(event, exc_info):
# type: (Event, ExcInfo) -> Optional[Event]
try:
is_inst = isinstance(exc_info[1], cls_)
except Exception:
is_inst = False
if is_inst:
return real_func(event, exc_info)
return event
self._error_processors.append(func)
@_disable_capture
def apply_to_event(
self,
event, # type: Event
hint, # type: Hint
):
# type: (...) -> Optional[Event]
"""Applies the information contained on the scope to the given event."""
def _drop(event, cause, ty):
# type: (Dict[str, Any], Any, str) -> Optional[Any]
logger.info("%s (%s) dropped event (%s)", ty, cause, event)
return None
is_transaction = event.get("type") == "transaction"
# put all attachments into the hint. This lets callbacks play around
# with attachments. We also later pull this out of the hint when we
# create the envelope.
attachments_to_send = hint.get("attachments") or []
for attachment in self._attachments:
if not is_transaction or attachment.add_to_transactions:
attachments_to_send.append(attachment)
hint["attachments"] = attachments_to_send
if self._level is not None:
event["level"] = self._level
if not is_transaction:
event.setdefault("breadcrumbs", {}).setdefault("values", []).extend(
self._breadcrumbs
)
if event.get("user") is None and self._user is not None:
event["user"] = self._user
if event.get("transaction") is None and self._transaction is not None:
event["transaction"] = self._transaction
if event.get("transaction_info") is None and self._transaction_info is not None:
event["transaction_info"] = self._transaction_info
if event.get("fingerprint") is None and self._fingerprint is not None:
event["fingerprint"] = self._fingerprint
if self._extras:
event.setdefault("extra", {}).update(self._extras)
if self._tags:
event.setdefault("tags", {}).update(self._tags)
if self._contexts:
event.setdefault("contexts", {}).update(self._contexts)
if self._span is not None:
contexts = event.setdefault("contexts", {})
if not contexts.get("trace"):
contexts["trace"] = self._span.get_trace_context()
exc_info = hint.get("exc_info")
if exc_info is not None:
for error_processor in self._error_processors:
new_event = error_processor(event, exc_info)
if new_event is None:
return _drop(event, error_processor, "error processor")
event = new_event
for event_processor in chain(global_event_processors, self._event_processors):
new_event = event
with capture_internal_exceptions():
new_event = event_processor(event, hint)
if new_event is None:
return _drop(event, event_processor, "event processor")
event = new_event
return event
def update_from_scope(self, scope):
# type: (Scope) -> None
if scope._level is not None:
self._level = scope._level
if scope._fingerprint is not None:
self._fingerprint = scope._fingerprint
if scope._transaction is not None:
self._transaction = scope._transaction
if scope._transaction_info is not None:
self._transaction_info.update(scope._transaction_info)
if scope._user is not None:
self._user = scope._user
if scope._tags:
self._tags.update(scope._tags)
if scope._contexts:
self._contexts.update(scope._contexts)
if scope._extras:
self._extras.update(scope._extras)
if scope._breadcrumbs:
self._breadcrumbs.extend(scope._breadcrumbs)
if scope._span:
self._span = scope._span
if scope._attachments:
self._attachments.extend(scope._attachments)
if scope._profile:
self._profile = scope._profile
def update_from_kwargs(
self,
user=None, # type: Optional[Any]
level=None, # type: Optional[str]
extras=None, # type: Optional[Dict[str, Any]]
contexts=None, # type: Optional[Dict[str, Any]]
tags=None, # type: Optional[Dict[str, str]]
fingerprint=None, # type: Optional[List[str]]
):
# type: (...) -> None
if level is not None:
self._level = level
if user is not None:
self._user = user
if extras is not None:
self._extras.update(extras)
if contexts is not None:
self._contexts.update(contexts)
if tags is not None:
self._tags.update(tags)
if fingerprint is not None:
self._fingerprint = fingerprint
def __copy__(self):
# type: () -> Scope
rv = object.__new__(self.__class__) # type: Scope
rv._level = self._level
rv._name = self._name
rv._fingerprint = self._fingerprint
rv._transaction = self._transaction
rv._transaction_info = dict(self._transaction_info)
rv._user = self._user
rv._tags = dict(self._tags)
rv._contexts = dict(self._contexts)
rv._extras = dict(self._extras)
rv._breadcrumbs = copy(self._breadcrumbs)
rv._event_processors = list(self._event_processors)
rv._error_processors = list(self._error_processors)
rv._should_capture = self._should_capture
rv._span = self._span
rv._session = self._session
rv._force_auto_session_tracking = self._force_auto_session_tracking
rv._attachments = list(self._attachments)
rv._profile = self._profile
return rv
def __repr__(self):
# type: () -> str
return "<%s id=%s name=%s>" % (
self.__class__.__name__,
hex(id(self)),
self._name,
)
| 17,318 | Python | 31.863378 | 147 | 0.568772 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/__init__.py | from sentry_sdk.hub import Hub, init
from sentry_sdk.scope import Scope
from sentry_sdk.transport import Transport, HttpTransport
from sentry_sdk.client import Client
from sentry_sdk.api import * # noqa
from sentry_sdk.consts import VERSION # noqa
__all__ = [ # noqa
"Hub",
"Scope",
"Client",
"Transport",
"HttpTransport",
"init",
"integrations",
# From sentry_sdk.api
"capture_event",
"capture_message",
"capture_exception",
"add_breadcrumb",
"configure_scope",
"push_scope",
"flush",
"last_event_id",
"start_span",
"start_transaction",
"set_tag",
"set_context",
"set_extra",
"set_user",
"set_level",
]
# Initialize the debug support after everything is loaded
from sentry_sdk.debug import init_debug_support
init_debug_support()
del init_debug_support
| 854 | Python | 19.853658 | 57 | 0.652225 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/utils.py | import base64
import json
import linecache
import logging
import os
import re
import subprocess
import sys
import threading
import time
from datetime import datetime
from functools import partial
try:
from functools import partialmethod
_PARTIALMETHOD_AVAILABLE = True
except ImportError:
_PARTIALMETHOD_AVAILABLE = False
import sentry_sdk
from sentry_sdk._compat import PY2, PY33, PY37, implements_str, text_type, urlparse
from sentry_sdk._types import MYPY
if MYPY:
from types import FrameType, TracebackType
from typing import (
Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
Union,
)
from sentry_sdk._types import EndpointType, ExcInfo
epoch = datetime(1970, 1, 1)
# The logger is created here but initialized in the debug support module
logger = logging.getLogger("sentry_sdk.errors")
MAX_STRING_LENGTH = 1024
BASE64_ALPHABET = re.compile(r"^[a-zA-Z0-9/+=]*$")
def json_dumps(data):
# type: (Any) -> bytes
"""Serialize data into a compact JSON representation encoded as UTF-8."""
return json.dumps(data, allow_nan=False, separators=(",", ":")).encode("utf-8")
def _get_debug_hub():
# type: () -> Optional[sentry_sdk.Hub]
# This function is replaced by debug.py
pass
def get_default_release():
# type: () -> Optional[str]
"""Try to guess a default release."""
release = os.environ.get("SENTRY_RELEASE")
if release:
return release
with open(os.path.devnull, "w+") as null:
try:
release = (
subprocess.Popen(
["git", "rev-parse", "HEAD"],
stdout=subprocess.PIPE,
stderr=null,
stdin=null,
)
.communicate()[0]
.strip()
.decode("utf-8")
)
except (OSError, IOError):
pass
if release:
return release
for var in (
"HEROKU_SLUG_COMMIT",
"SOURCE_VERSION",
"CODEBUILD_RESOLVED_SOURCE_VERSION",
"CIRCLE_SHA1",
"GAE_DEPLOYMENT_ID",
):
release = os.environ.get(var)
if release:
return release
return None
def get_sdk_name(installed_integrations):
# type: (List[str]) -> str
"""Return the SDK name including the name of the used web framework."""
# Note: I can not use for example sentry_sdk.integrations.django.DjangoIntegration.identifier
# here because if django is not installed the integration is not accessible.
framework_integrations = [
"django",
"flask",
"fastapi",
"bottle",
"falcon",
"quart",
"sanic",
"starlette",
"chalice",
"serverless",
"pyramid",
"tornado",
"aiohttp",
"aws_lambda",
"gcp",
"beam",
"asgi",
"wsgi",
]
for integration in framework_integrations:
if integration in installed_integrations:
return "sentry.python.{}".format(integration)
return "sentry.python"
class CaptureInternalException(object):
__slots__ = ()
def __enter__(self):
# type: () -> ContextManager[Any]
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> bool
if ty is not None and value is not None:
capture_internal_exception((ty, value, tb))
return True
_CAPTURE_INTERNAL_EXCEPTION = CaptureInternalException()
def capture_internal_exceptions():
# type: () -> ContextManager[Any]
return _CAPTURE_INTERNAL_EXCEPTION
def capture_internal_exception(exc_info):
# type: (ExcInfo) -> None
hub = _get_debug_hub()
if hub is not None:
hub._capture_internal_exception(exc_info)
def to_timestamp(value):
# type: (datetime) -> float
return (value - epoch).total_seconds()
def format_timestamp(value):
# type: (datetime) -> str
return value.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def event_hint_with_exc_info(exc_info=None):
# type: (Optional[ExcInfo]) -> Dict[str, Optional[ExcInfo]]
"""Creates a hint with the exc info filled in."""
if exc_info is None:
exc_info = sys.exc_info()
else:
exc_info = exc_info_from_error(exc_info)
if exc_info[0] is None:
exc_info = None
return {"exc_info": exc_info}
class BadDsn(ValueError):
"""Raised on invalid DSNs."""
@implements_str
class Dsn(object):
"""Represents a DSN."""
def __init__(self, value):
# type: (Union[Dsn, str]) -> None
if isinstance(value, Dsn):
self.__dict__ = dict(value.__dict__)
return
parts = urlparse.urlsplit(text_type(value))
if parts.scheme not in ("http", "https"):
raise BadDsn("Unsupported scheme %r" % parts.scheme)
self.scheme = parts.scheme
if parts.hostname is None:
raise BadDsn("Missing hostname")
self.host = parts.hostname
if parts.port is None:
self.port = self.scheme == "https" and 443 or 80 # type: int
else:
self.port = parts.port
if not parts.username:
raise BadDsn("Missing public key")
self.public_key = parts.username
self.secret_key = parts.password
path = parts.path.rsplit("/", 1)
try:
self.project_id = text_type(int(path.pop()))
except (ValueError, TypeError):
raise BadDsn("Invalid project in DSN (%r)" % (parts.path or "")[1:])
self.path = "/".join(path) + "/"
@property
def netloc(self):
# type: () -> str
"""The netloc part of a DSN."""
rv = self.host
if (self.scheme, self.port) not in (("http", 80), ("https", 443)):
rv = "%s:%s" % (rv, self.port)
return rv
def to_auth(self, client=None):
# type: (Optional[Any]) -> Auth
"""Returns the auth info object for this dsn."""
return Auth(
scheme=self.scheme,
host=self.netloc,
path=self.path,
project_id=self.project_id,
public_key=self.public_key,
secret_key=self.secret_key,
client=client,
)
def __str__(self):
# type: () -> str
return "%s://%s%s@%s%s%s" % (
self.scheme,
self.public_key,
self.secret_key and "@" + self.secret_key or "",
self.netloc,
self.path,
self.project_id,
)
class Auth(object):
"""Helper object that represents the auth info."""
def __init__(
self,
scheme,
host,
project_id,
public_key,
secret_key=None,
version=7,
client=None,
path="/",
):
# type: (str, str, str, str, Optional[str], int, Optional[Any], str) -> None
self.scheme = scheme
self.host = host
self.path = path
self.project_id = project_id
self.public_key = public_key
self.secret_key = secret_key
self.version = version
self.client = client
@property
def store_api_url(self):
# type: () -> str
"""Returns the API url for storing events.
Deprecated: use get_api_url instead.
"""
return self.get_api_url(type="store")
def get_api_url(
self, type="store" # type: EndpointType
):
# type: (...) -> str
"""Returns the API url for storing events."""
return "%s://%s%sapi/%s/%s/" % (
self.scheme,
self.host,
self.path,
self.project_id,
type,
)
def to_header(self):
# type: () -> str
"""Returns the auth header a string."""
rv = [("sentry_key", self.public_key), ("sentry_version", self.version)]
if self.client is not None:
rv.append(("sentry_client", self.client))
if self.secret_key is not None:
rv.append(("sentry_secret", self.secret_key))
return "Sentry " + ", ".join("%s=%s" % (key, value) for key, value in rv)
class AnnotatedValue(object):
"""
Meta information for a data field in the event payload.
This is to tell Relay that we have tampered with the fields value.
See:
https://github.com/getsentry/relay/blob/be12cd49a0f06ea932ed9b9f93a655de5d6ad6d1/relay-general/src/types/meta.rs#L407-L423
"""
__slots__ = ("value", "metadata")
def __init__(self, value, metadata):
# type: (Optional[Any], Dict[str, Any]) -> None
self.value = value
self.metadata = metadata
@classmethod
def removed_because_raw_data(cls):
# type: () -> AnnotatedValue
"""The value was removed because it could not be parsed. This is done for request body values that are not json nor a form."""
return AnnotatedValue(
value="",
metadata={
"rem": [ # Remark
[
"!raw", # Unparsable raw data
"x", # The fields original value was removed
]
]
},
)
@classmethod
def removed_because_over_size_limit(cls):
# type: () -> AnnotatedValue
"""The actual value was removed because the size of the field exceeded the configured maximum size (specified with the request_bodies sdk option)"""
return AnnotatedValue(
value="",
metadata={
"rem": [ # Remark
[
"!config", # Because of configured maximum size
"x", # The fields original value was removed
]
]
},
)
@classmethod
def substituted_because_contains_sensitive_data(cls):
# type: () -> AnnotatedValue
"""The actual value was removed because it contained sensitive information."""
from sentry_sdk.consts import SENSITIVE_DATA_SUBSTITUTE
return AnnotatedValue(
value=SENSITIVE_DATA_SUBSTITUTE,
metadata={
"rem": [ # Remark
[
"!config", # Because of SDK configuration (in this case the config is the hard coded removal of certain django cookies)
"s", # The fields original value was substituted
]
]
},
)
if MYPY:
from typing import TypeVar
T = TypeVar("T")
Annotated = Union[AnnotatedValue, T]
def get_type_name(cls):
# type: (Optional[type]) -> Optional[str]
return getattr(cls, "__qualname__", None) or getattr(cls, "__name__", None)
def get_type_module(cls):
# type: (Optional[type]) -> Optional[str]
mod = getattr(cls, "__module__", None)
if mod not in (None, "builtins", "__builtins__"):
return mod
return None
def should_hide_frame(frame):
# type: (FrameType) -> bool
try:
mod = frame.f_globals["__name__"]
if mod.startswith("sentry_sdk."):
return True
except (AttributeError, KeyError):
pass
for flag_name in "__traceback_hide__", "__tracebackhide__":
try:
if frame.f_locals[flag_name]:
return True
except Exception:
pass
return False
def iter_stacks(tb):
# type: (Optional[TracebackType]) -> Iterator[TracebackType]
tb_ = tb # type: Optional[TracebackType]
while tb_ is not None:
if not should_hide_frame(tb_.tb_frame):
yield tb_
tb_ = tb_.tb_next
def get_lines_from_file(
filename, # type: str
lineno, # type: int
loader=None, # type: Optional[Any]
module=None, # type: Optional[str]
):
# type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
context_lines = 5
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source_str = loader.get_source(module) # type: Optional[str]
except (ImportError, IOError):
source_str = None
if source_str is not None:
source = source_str.splitlines()
if source is None:
try:
source = linecache.getlines(filename)
except (OSError, IOError):
return [], None, []
if not source:
return [], None, []
lower_bound = max(0, lineno - context_lines)
upper_bound = min(lineno + 1 + context_lines, len(source))
try:
pre_context = [
strip_string(line.strip("\r\n")) for line in source[lower_bound:lineno]
]
context_line = strip_string(source[lineno].strip("\r\n"))
post_context = [
strip_string(line.strip("\r\n"))
for line in source[(lineno + 1) : upper_bound]
]
return pre_context, context_line, post_context
except IndexError:
# the file may have changed since it was loaded into memory
return [], None, []
def get_source_context(
frame, # type: FrameType
tb_lineno, # type: int
):
# type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
try:
abs_path = frame.f_code.co_filename # type: Optional[str]
except Exception:
abs_path = None
try:
module = frame.f_globals["__name__"]
except Exception:
return [], None, []
try:
loader = frame.f_globals["__loader__"]
except Exception:
loader = None
lineno = tb_lineno - 1
if lineno is not None and abs_path:
return get_lines_from_file(abs_path, lineno, loader, module)
return [], None, []
def safe_str(value):
# type: (Any) -> str
try:
return text_type(value)
except Exception:
return safe_repr(value)
if PY2:
def safe_repr(value):
# type: (Any) -> str
try:
rv = repr(value).decode("utf-8", "replace")
# At this point `rv` contains a bunch of literal escape codes, like
# this (exaggerated example):
#
# u"\\x2f"
#
# But we want to show this string as:
#
# u"/"
try:
# unicode-escape does this job, but can only decode latin1. So we
# attempt to encode in latin1.
return rv.encode("latin1").decode("unicode-escape")
except Exception:
# Since usually strings aren't latin1 this can break. In those
# cases we just give up.
return rv
except Exception:
# If e.g. the call to `repr` already fails
return "<broken repr>"
else:
def safe_repr(value):
# type: (Any) -> str
try:
return repr(value)
except Exception:
return "<broken repr>"
def filename_for_module(module, abs_path):
# type: (Optional[str], Optional[str]) -> Optional[str]
if not abs_path or not module:
return abs_path
try:
if abs_path.endswith(".pyc"):
abs_path = abs_path[:-1]
base_module = module.split(".", 1)[0]
if base_module == module:
return os.path.basename(abs_path)
base_module_path = sys.modules[base_module].__file__
if not base_module_path:
return abs_path
return abs_path.split(base_module_path.rsplit(os.sep, 2)[0], 1)[-1].lstrip(
os.sep
)
except Exception:
return abs_path
def serialize_frame(frame, tb_lineno=None, with_locals=True):
# type: (FrameType, Optional[int], bool) -> Dict[str, Any]
f_code = getattr(frame, "f_code", None)
if not f_code:
abs_path = None
function = None
else:
abs_path = frame.f_code.co_filename
function = frame.f_code.co_name
try:
module = frame.f_globals["__name__"]
except Exception:
module = None
if tb_lineno is None:
tb_lineno = frame.f_lineno
pre_context, context_line, post_context = get_source_context(frame, tb_lineno)
rv = {
"filename": filename_for_module(module, abs_path) or None,
"abs_path": os.path.abspath(abs_path) if abs_path else None,
"function": function or "<unknown>",
"module": module,
"lineno": tb_lineno,
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
} # type: Dict[str, Any]
if with_locals:
rv["vars"] = frame.f_locals
return rv
def current_stacktrace(with_locals=True):
# type: (bool) -> Any
__tracebackhide__ = True
frames = []
f = sys._getframe() # type: Optional[FrameType]
while f is not None:
if not should_hide_frame(f):
frames.append(serialize_frame(f, with_locals=with_locals))
f = f.f_back
frames.reverse()
return {"frames": frames}
def get_errno(exc_value):
# type: (BaseException) -> Optional[Any]
return getattr(exc_value, "errno", None)
def single_exception_from_error_tuple(
exc_type, # type: Optional[type]
exc_value, # type: Optional[BaseException]
tb, # type: Optional[TracebackType]
client_options=None, # type: Optional[Dict[str, Any]]
mechanism=None, # type: Optional[Dict[str, Any]]
):
# type: (...) -> Dict[str, Any]
if exc_value is not None:
errno = get_errno(exc_value)
else:
errno = None
if errno is not None:
mechanism = mechanism or {"type": "generic"}
mechanism.setdefault("meta", {}).setdefault("errno", {}).setdefault(
"number", errno
)
if client_options is None:
with_locals = True
else:
with_locals = client_options["with_locals"]
frames = [
serialize_frame(tb.tb_frame, tb_lineno=tb.tb_lineno, with_locals=with_locals)
for tb in iter_stacks(tb)
]
rv = {
"module": get_type_module(exc_type),
"type": get_type_name(exc_type),
"value": safe_str(exc_value),
"mechanism": mechanism,
}
if frames:
rv["stacktrace"] = {"frames": frames}
return rv
HAS_CHAINED_EXCEPTIONS = hasattr(Exception, "__suppress_context__")
if HAS_CHAINED_EXCEPTIONS:
def walk_exception_chain(exc_info):
# type: (ExcInfo) -> Iterator[ExcInfo]
exc_type, exc_value, tb = exc_info
seen_exceptions = []
seen_exception_ids = set() # type: Set[int]
while (
exc_type is not None
and exc_value is not None
and id(exc_value) not in seen_exception_ids
):
yield exc_type, exc_value, tb
# Avoid hashing random types we don't know anything
# about. Use the list to keep a ref so that the `id` is
# not used for another object.
seen_exceptions.append(exc_value)
seen_exception_ids.add(id(exc_value))
if exc_value.__suppress_context__:
cause = exc_value.__cause__
else:
cause = exc_value.__context__
if cause is None:
break
exc_type = type(cause)
exc_value = cause
tb = getattr(cause, "__traceback__", None)
else:
def walk_exception_chain(exc_info):
# type: (ExcInfo) -> Iterator[ExcInfo]
yield exc_info
def exceptions_from_error_tuple(
exc_info, # type: ExcInfo
client_options=None, # type: Optional[Dict[str, Any]]
mechanism=None, # type: Optional[Dict[str, Any]]
):
# type: (...) -> List[Dict[str, Any]]
exc_type, exc_value, tb = exc_info
rv = []
for exc_type, exc_value, tb in walk_exception_chain(exc_info):
rv.append(
single_exception_from_error_tuple(
exc_type, exc_value, tb, client_options, mechanism
)
)
rv.reverse()
return rv
def to_string(value):
# type: (str) -> str
try:
return text_type(value)
except UnicodeDecodeError:
return repr(value)[1:-1]
def iter_event_stacktraces(event):
# type: (Dict[str, Any]) -> Iterator[Dict[str, Any]]
if "stacktrace" in event:
yield event["stacktrace"]
if "threads" in event:
for thread in event["threads"].get("values") or ():
if "stacktrace" in thread:
yield thread["stacktrace"]
if "exception" in event:
for exception in event["exception"].get("values") or ():
if "stacktrace" in exception:
yield exception["stacktrace"]
def iter_event_frames(event):
# type: (Dict[str, Any]) -> Iterator[Dict[str, Any]]
for stacktrace in iter_event_stacktraces(event):
for frame in stacktrace.get("frames") or ():
yield frame
def handle_in_app(event, in_app_exclude=None, in_app_include=None):
# type: (Dict[str, Any], Optional[List[str]], Optional[List[str]]) -> Dict[str, Any]
for stacktrace in iter_event_stacktraces(event):
handle_in_app_impl(
stacktrace.get("frames"),
in_app_exclude=in_app_exclude,
in_app_include=in_app_include,
)
return event
def handle_in_app_impl(frames, in_app_exclude, in_app_include):
# type: (Any, Optional[List[str]], Optional[List[str]]) -> Optional[Any]
if not frames:
return None
any_in_app = False
for frame in frames:
in_app = frame.get("in_app")
if in_app is not None:
if in_app:
any_in_app = True
continue
module = frame.get("module")
if not module:
continue
elif _module_in_set(module, in_app_include):
frame["in_app"] = True
any_in_app = True
elif _module_in_set(module, in_app_exclude):
frame["in_app"] = False
if not any_in_app:
for frame in frames:
if frame.get("in_app") is None:
frame["in_app"] = True
return frames
def exc_info_from_error(error):
# type: (Union[BaseException, ExcInfo]) -> ExcInfo
if isinstance(error, tuple) and len(error) == 3:
exc_type, exc_value, tb = error
elif isinstance(error, BaseException):
tb = getattr(error, "__traceback__", None)
if tb is not None:
exc_type = type(error)
exc_value = error
else:
exc_type, exc_value, tb = sys.exc_info()
if exc_value is not error:
tb = None
exc_value = error
exc_type = type(error)
else:
raise ValueError("Expected Exception object to report, got %s!" % type(error))
return exc_type, exc_value, tb
def event_from_exception(
exc_info, # type: Union[BaseException, ExcInfo]
client_options=None, # type: Optional[Dict[str, Any]]
mechanism=None, # type: Optional[Dict[str, Any]]
):
# type: (...) -> Tuple[Dict[str, Any], Dict[str, Any]]
exc_info = exc_info_from_error(exc_info)
hint = event_hint_with_exc_info(exc_info)
return (
{
"level": "error",
"exception": {
"values": exceptions_from_error_tuple(
exc_info, client_options, mechanism
)
},
},
hint,
)
def _module_in_set(name, set):
# type: (str, Optional[List[str]]) -> bool
if not set:
return False
for item in set or ():
if item == name or name.startswith(item + "."):
return True
return False
def strip_string(value, max_length=None):
# type: (str, Optional[int]) -> Union[AnnotatedValue, str]
# TODO: read max_length from config
if not value:
return value
if max_length is None:
# This is intentionally not just the default such that one can patch `MAX_STRING_LENGTH` and affect `strip_string`.
max_length = MAX_STRING_LENGTH
length = len(value.encode("utf-8"))
if length > max_length:
return AnnotatedValue(
value=value[: max_length - 3] + "...",
metadata={
"len": length,
"rem": [["!limit", "x", max_length - 3, max_length]],
},
)
return value
def _is_contextvars_broken():
# type: () -> bool
"""
Returns whether gevent/eventlet have patched the stdlib in a way where thread locals are now more "correct" than contextvars.
"""
try:
import gevent # type: ignore
from gevent.monkey import is_object_patched # type: ignore
# Get the MAJOR and MINOR version numbers of Gevent
version_tuple = tuple(
[int(part) for part in re.split(r"a|b|rc|\.", gevent.__version__)[:2]]
)
if is_object_patched("threading", "local"):
# Gevent 20.9.0 depends on Greenlet 0.4.17 which natively handles switching
# context vars when greenlets are switched, so, Gevent 20.9.0+ is all fine.
# Ref: https://github.com/gevent/gevent/blob/83c9e2ae5b0834b8f84233760aabe82c3ba065b4/src/gevent/monkey.py#L604-L609
# Gevent 20.5, that doesn't depend on Greenlet 0.4.17 with native support
# for contextvars, is able to patch both thread locals and contextvars, in
# that case, check if contextvars are effectively patched.
if (
# Gevent 20.9.0+
(sys.version_info >= (3, 7) and version_tuple >= (20, 9))
# Gevent 20.5.0+ or Python < 3.7
or (is_object_patched("contextvars", "ContextVar"))
):
return False
return True
except ImportError:
pass
try:
from eventlet.patcher import is_monkey_patched # type: ignore
if is_monkey_patched("thread"):
return True
except ImportError:
pass
return False
def _make_threadlocal_contextvars(local):
# type: (type) -> type
class ContextVar(object):
# Super-limited impl of ContextVar
def __init__(self, name):
# type: (str) -> None
self._name = name
self._local = local()
def get(self, default):
# type: (Any) -> Any
return getattr(self._local, "value", default)
def set(self, value):
# type: (Any) -> None
self._local.value = value
return ContextVar
def _get_contextvars():
# type: () -> Tuple[bool, type]
"""
Figure out the "right" contextvars installation to use. Returns a
`contextvars.ContextVar`-like class with a limited API.
See https://docs.sentry.io/platforms/python/contextvars/ for more information.
"""
if not _is_contextvars_broken():
# aiocontextvars is a PyPI package that ensures that the contextvars
# backport (also a PyPI package) works with asyncio under Python 3.6
#
# Import it if available.
if sys.version_info < (3, 7):
# `aiocontextvars` is absolutely required for functional
# contextvars on Python 3.6.
try:
from aiocontextvars import ContextVar
return True, ContextVar
except ImportError:
pass
else:
# On Python 3.7 contextvars are functional.
try:
from contextvars import ContextVar
return True, ContextVar
except ImportError:
pass
# Fall back to basic thread-local usage.
from threading import local
return False, _make_threadlocal_contextvars(local)
HAS_REAL_CONTEXTVARS, ContextVar = _get_contextvars()
CONTEXTVARS_ERROR_MESSAGE = """
With asyncio/ASGI applications, the Sentry SDK requires a functional
installation of `contextvars` to avoid leaking scope/context data across
requests.
Please refer to https://docs.sentry.io/platforms/python/contextvars/ for more information.
"""
def qualname_from_function(func):
# type: (Callable[..., Any]) -> Optional[str]
"""Return the qualified name of func. Works with regular function, lambda, partial and partialmethod."""
func_qualname = None # type: Optional[str]
# Python 2
try:
return "%s.%s.%s" % (
func.im_class.__module__, # type: ignore
func.im_class.__name__, # type: ignore
func.__name__,
)
except Exception:
pass
prefix, suffix = "", ""
if (
_PARTIALMETHOD_AVAILABLE
and hasattr(func, "_partialmethod")
and isinstance(func._partialmethod, partialmethod) # type: ignore
):
prefix, suffix = "partialmethod(<function ", ">)"
func = func._partialmethod.func # type: ignore
elif isinstance(func, partial) and hasattr(func.func, "__name__"):
prefix, suffix = "partial(<function ", ">)"
func = func.func
if hasattr(func, "__qualname__"):
func_qualname = func.__qualname__
elif hasattr(func, "__name__"): # Python 2.7 has no __qualname__
func_qualname = func.__name__
# Python 3: methods, functions, classes
if func_qualname is not None:
if hasattr(func, "__module__"):
func_qualname = func.__module__ + "." + func_qualname
func_qualname = prefix + func_qualname + suffix
return func_qualname
def transaction_from_function(func):
# type: (Callable[..., Any]) -> Optional[str]
return qualname_from_function(func)
disable_capture_event = ContextVar("disable_capture_event")
class ServerlessTimeoutWarning(Exception): # noqa: N818
"""Raised when a serverless method is about to reach its timeout."""
pass
class TimeoutThread(threading.Thread):
"""Creates a Thread which runs (sleeps) for a time duration equal to
waiting_time and raises a custom ServerlessTimeout exception.
"""
def __init__(self, waiting_time, configured_timeout):
# type: (float, int) -> None
threading.Thread.__init__(self)
self.waiting_time = waiting_time
self.configured_timeout = configured_timeout
self._stop_event = threading.Event()
def stop(self):
# type: () -> None
self._stop_event.set()
def run(self):
# type: () -> None
self._stop_event.wait(self.waiting_time)
if self._stop_event.is_set():
return
integer_configured_timeout = int(self.configured_timeout)
# Setting up the exact integer value of configured time(in seconds)
if integer_configured_timeout < self.configured_timeout:
integer_configured_timeout = integer_configured_timeout + 1
# Raising Exception after timeout duration is reached
raise ServerlessTimeoutWarning(
"WARNING : Function is expected to get timed out. Configured timeout duration = {} seconds.".format(
integer_configured_timeout
)
)
def to_base64(original):
# type: (str) -> Optional[str]
"""
Convert a string to base64, via UTF-8. Returns None on invalid input.
"""
base64_string = None
try:
utf8_bytes = original.encode("UTF-8")
base64_bytes = base64.b64encode(utf8_bytes)
base64_string = base64_bytes.decode("UTF-8")
except Exception as err:
logger.warning("Unable to encode {orig} to base64:".format(orig=original), err)
return base64_string
def from_base64(base64_string):
# type: (str) -> Optional[str]
"""
Convert a string from base64, via UTF-8. Returns None on invalid input.
"""
utf8_string = None
try:
only_valid_chars = BASE64_ALPHABET.match(base64_string)
assert only_valid_chars
base64_bytes = base64_string.encode("UTF-8")
utf8_bytes = base64.b64decode(base64_bytes)
utf8_string = utf8_bytes.decode("UTF-8")
except Exception as err:
logger.warning(
"Unable to decode {b64} from base64:".format(b64=base64_string), err
)
return utf8_string
if PY37:
def nanosecond_time():
# type: () -> int
return time.perf_counter_ns()
elif PY33:
def nanosecond_time():
# type: () -> int
return int(time.perf_counter() * 1e9)
else:
def nanosecond_time():
# type: () -> int
raise AttributeError
| 32,694 | Python | 27.479965 | 156 | 0.566159 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/consts.py | from sentry_sdk._types import MYPY
if MYPY:
import sentry_sdk
from typing import Optional
from typing import Callable
from typing import Union
from typing import List
from typing import Type
from typing import Dict
from typing import Any
from typing import Sequence
from typing_extensions import TypedDict
from sentry_sdk.integrations import Integration
from sentry_sdk._types import (
BreadcrumbProcessor,
Event,
EventProcessor,
TracesSampler,
TransactionProcessor,
)
# Experiments are feature flags to enable and disable certain unstable SDK
# functionality. Changing them from the defaults (`None`) in production
# code is highly discouraged. They are not subject to any stability
# guarantees such as the ones from semantic versioning.
Experiments = TypedDict(
"Experiments",
{
"max_spans": Optional[int],
"record_sql_params": Optional[bool],
"smart_transaction_trimming": Optional[bool],
"propagate_tracestate": Optional[bool],
"custom_measurements": Optional[bool],
"profiles_sample_rate": Optional[float],
"profiler_mode": Optional[str],
},
total=False,
)
DEFAULT_QUEUE_SIZE = 100
DEFAULT_MAX_BREADCRUMBS = 100
SENSITIVE_DATA_SUBSTITUTE = "[Filtered]"
class INSTRUMENTER:
SENTRY = "sentry"
OTEL = "otel"
class OP:
DB = "db"
DB_REDIS = "db.redis"
EVENT_DJANGO = "event.django"
FUNCTION = "function"
FUNCTION_AWS = "function.aws"
FUNCTION_GCP = "function.gcp"
HTTP_CLIENT = "http.client"
HTTP_CLIENT_STREAM = "http.client.stream"
HTTP_SERVER = "http.server"
MIDDLEWARE_DJANGO = "middleware.django"
MIDDLEWARE_STARLETTE = "middleware.starlette"
MIDDLEWARE_STARLETTE_RECEIVE = "middleware.starlette.receive"
MIDDLEWARE_STARLETTE_SEND = "middleware.starlette.send"
MIDDLEWARE_STARLITE = "middleware.starlite"
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
QUEUE_SUBMIT_CELERY = "queue.submit.celery"
QUEUE_TASK_CELERY = "queue.task.celery"
QUEUE_TASK_RQ = "queue.task.rq"
SUBPROCESS = "subprocess"
SUBPROCESS_WAIT = "subprocess.wait"
SUBPROCESS_COMMUNICATE = "subprocess.communicate"
TEMPLATE_RENDER = "template.render"
VIEW_RENDER = "view.render"
VIEW_RESPONSE_RENDER = "view.response.render"
WEBSOCKET_SERVER = "websocket.server"
# This type exists to trick mypy and PyCharm into thinking `init` and `Client`
# take these arguments (even though they take opaque **kwargs)
class ClientConstructor(object):
def __init__(
self,
dsn=None, # type: Optional[str]
with_locals=True, # type: bool
max_breadcrumbs=DEFAULT_MAX_BREADCRUMBS, # type: int
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
server_name=None, # type: Optional[str]
shutdown_timeout=2, # type: float
integrations=[], # type: Sequence[Integration] # noqa: B006
in_app_include=[], # type: List[str] # noqa: B006
in_app_exclude=[], # type: List[str] # noqa: B006
default_integrations=True, # type: bool
dist=None, # type: Optional[str]
transport=None, # type: Optional[Union[sentry_sdk.transport.Transport, Type[sentry_sdk.transport.Transport], Callable[[Event], None]]]
transport_queue_size=DEFAULT_QUEUE_SIZE, # type: int
sample_rate=1.0, # type: float
send_default_pii=False, # type: bool
http_proxy=None, # type: Optional[str]
https_proxy=None, # type: Optional[str]
ignore_errors=[], # type: List[Union[type, str]] # noqa: B006
request_bodies="medium", # type: str
before_send=None, # type: Optional[EventProcessor]
before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
debug=False, # type: bool
attach_stacktrace=False, # type: bool
ca_certs=None, # type: Optional[str]
propagate_traces=True, # type: bool
traces_sample_rate=None, # type: Optional[float]
traces_sampler=None, # type: Optional[TracesSampler]
auto_enabling_integrations=True, # type: bool
auto_session_tracking=True, # type: bool
send_client_reports=True, # type: bool
_experiments={}, # type: Experiments # noqa: B006
proxy_headers=None, # type: Optional[Dict[str, str]]
instrumenter=INSTRUMENTER.SENTRY, # type: Optional[str]
before_send_transaction=None, # type: Optional[TransactionProcessor]
):
# type: (...) -> None
pass
def _get_default_options():
# type: () -> Dict[str, Any]
import inspect
if hasattr(inspect, "getfullargspec"):
getargspec = inspect.getfullargspec
else:
getargspec = inspect.getargspec # type: ignore
a = getargspec(ClientConstructor.__init__)
defaults = a.defaults or ()
return dict(zip(a.args[-len(defaults) :], defaults))
DEFAULT_OPTIONS = _get_default_options()
del _get_default_options
VERSION = "1.14.0"
| 5,236 | Python | 34.385135 | 143 | 0.648587 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/attachments.py | import os
import mimetypes
from sentry_sdk._types import MYPY
from sentry_sdk.envelope import Item, PayloadRef
if MYPY:
from typing import Optional, Union, Callable
class Attachment(object):
def __init__(
self,
bytes=None, # type: Union[None, bytes, Callable[[], bytes]]
filename=None, # type: Optional[str]
path=None, # type: Optional[str]
content_type=None, # type: Optional[str]
add_to_transactions=False, # type: bool
):
# type: (...) -> None
if bytes is None and path is None:
raise TypeError("path or raw bytes required for attachment")
if filename is None and path is not None:
filename = os.path.basename(path)
if filename is None:
raise TypeError("filename is required for attachment")
if content_type is None:
content_type = mimetypes.guess_type(filename)[0]
self.bytes = bytes
self.filename = filename
self.path = path
self.content_type = content_type
self.add_to_transactions = add_to_transactions
def to_envelope_item(self):
# type: () -> Item
"""Returns an envelope item for this attachment."""
payload = None # type: Union[None, PayloadRef, bytes]
if self.bytes is not None:
if callable(self.bytes):
payload = self.bytes()
else:
payload = self.bytes
else:
payload = PayloadRef(path=self.path)
return Item(
payload=payload,
type="attachment",
content_type=self.content_type,
filename=self.filename,
)
def __repr__(self):
# type: () -> str
return "<Attachment %r>" % (self.filename,)
| 1,793 | Python | 31.035714 | 72 | 0.575014 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/tracing_utils.py | import re
import contextlib
import json
import math
from numbers import Real
from decimal import Decimal
import sentry_sdk
from sentry_sdk.consts import OP
from sentry_sdk.utils import (
capture_internal_exceptions,
Dsn,
logger,
safe_str,
to_base64,
to_string,
from_base64,
)
from sentry_sdk._compat import PY2, iteritems
from sentry_sdk._types import MYPY
if PY2:
from collections import Mapping
from urllib import quote, unquote
else:
from collections.abc import Mapping
from urllib.parse import quote, unquote
if MYPY:
import typing
from typing import Generator
from typing import Optional
from typing import Any
from typing import Dict
from typing import Union
SENTRY_TRACE_REGEX = re.compile(
"^[ \t]*" # whitespace
"([0-9a-f]{32})?" # trace_id
"-?([0-9a-f]{16})?" # span_id
"-?([01])?" # sampled
"[ \t]*$" # whitespace
)
# This is a normal base64 regex, modified to reflect that fact that we strip the
# trailing = or == off
base64_stripped = (
# any of the characters in the base64 "alphabet", in multiples of 4
"([a-zA-Z0-9+/]{4})*"
# either nothing or 2 or 3 base64-alphabet characters (see
# https://en.wikipedia.org/wiki/Base64#Decoding_Base64_without_padding for
# why there's never only 1 extra character)
"([a-zA-Z0-9+/]{2,3})?"
)
# comma-delimited list of entries of the form `xxx=yyy`
tracestate_entry = "[^=]+=[^=]+"
TRACESTATE_ENTRIES_REGEX = re.compile(
# one or more xxxxx=yyyy entries
"^({te})+"
# each entry except the last must be followed by a comma
"(,|$)".format(te=tracestate_entry)
)
# this doesn't check that the value is valid, just that there's something there
# of the form `sentry=xxxx`
SENTRY_TRACESTATE_ENTRY_REGEX = re.compile(
# either sentry is the first entry or there's stuff immediately before it,
# ending in a comma (this prevents matching something like `coolsentry=xxx`)
"(?:^|.+,)"
# sentry's part, not including the potential comma
"(sentry=[^,]*)"
# either there's a comma and another vendor's entry or we end
"(?:,.+|$)"
)
class EnvironHeaders(Mapping): # type: ignore
def __init__(
self,
environ, # type: typing.Mapping[str, str]
prefix="HTTP_", # type: str
):
# type: (...) -> None
self.environ = environ
self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
def has_tracing_enabled(options):
# type: (Dict[str, Any]) -> bool
"""
Returns True if either traces_sample_rate or traces_sampler is
defined, False otherwise.
"""
return bool(
options.get("traces_sample_rate") is not None
or options.get("traces_sampler") is not None
)
def is_valid_sample_rate(rate):
# type: (Any) -> bool
"""
Checks the given sample rate to make sure it is valid type and value (a
boolean or a number between 0 and 1, inclusive).
"""
# both booleans and NaN are instances of Real, so a) checking for Real
# checks for the possibility of a boolean also, and b) we have to check
# separately for NaN and Decimal does not derive from Real so need to check that too
if not isinstance(rate, (Real, Decimal)) or math.isnan(rate):
logger.warning(
"[Tracing] Given sample rate is invalid. Sample rate must be a boolean or a number between 0 and 1. Got {rate} of type {type}.".format(
rate=rate, type=type(rate)
)
)
return False
# in case rate is a boolean, it will get cast to 1 if it's True and 0 if it's False
rate = float(rate)
if rate < 0 or rate > 1:
logger.warning(
"[Tracing] Given sample rate is invalid. Sample rate must be between 0 and 1. Got {rate}.".format(
rate=rate
)
)
return False
return True
@contextlib.contextmanager
def record_sql_queries(
hub, # type: sentry_sdk.Hub
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
):
# type: (...) -> Generator[Span, None, None]
# TODO: Bring back capturing of params by default
if hub.client and hub.client.options["_experiments"].get(
"record_sql_params", False
):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
params_list = None
paramstyle = None
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", data=data)
with hub.start_span(op=OP.DB, description=query) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def maybe_create_breadcrumbs_from_span(hub, span):
# type: (sentry_sdk.Hub, Span) -> None
if span.op == OP.DB_REDIS:
hub.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == OP.HTTP_CLIENT:
hub.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
hub.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
)
def extract_sentrytrace_data(header):
# type: (Optional[str]) -> Optional[typing.Mapping[str, Union[str, bool, None]]]
"""
Given a `sentry-trace` header string, return a dictionary of data.
"""
if not header:
return None
if header.startswith("00-") and header.endswith("-00"):
header = header[3:-3]
match = SENTRY_TRACE_REGEX.match(header)
if not match:
return None
trace_id, parent_span_id, sampled_str = match.groups()
parent_sampled = None
if trace_id:
trace_id = "{:032x}".format(int(trace_id, 16))
if parent_span_id:
parent_span_id = "{:016x}".format(int(parent_span_id, 16))
if sampled_str:
parent_sampled = sampled_str != "0"
return {
"trace_id": trace_id,
"parent_span_id": parent_span_id,
"parent_sampled": parent_sampled,
}
def extract_tracestate_data(header):
# type: (Optional[str]) -> typing.Mapping[str, Optional[str]]
"""
Extracts the sentry tracestate value and any third-party data from the given
tracestate header, returning a dictionary of data.
"""
sentry_entry = third_party_entry = None
before = after = ""
if header:
# find sentry's entry, if any
sentry_match = SENTRY_TRACESTATE_ENTRY_REGEX.search(header)
if sentry_match:
sentry_entry = sentry_match.group(1)
# remove the commas after the split so we don't end up with
# `xxx=yyy,,zzz=qqq` (double commas) when we put them back together
before, after = map(lambda s: s.strip(","), header.split(sentry_entry))
# extract sentry's value from its entry and test to make sure it's
# valid; if it isn't, discard the entire entry so that a new one
# will be created
sentry_value = sentry_entry.replace("sentry=", "")
if not re.search("^{b64}$".format(b64=base64_stripped), sentry_value):
sentry_entry = None
else:
after = header
# if either part is invalid or empty, remove it before gluing them together
third_party_entry = (
",".join(filter(TRACESTATE_ENTRIES_REGEX.search, [before, after])) or None
)
return {
"sentry_tracestate": sentry_entry,
"third_party_tracestate": third_party_entry,
}
def compute_tracestate_value(data):
# type: (typing.Mapping[str, str]) -> str
"""
Computes a new tracestate value using the given data.
Note: Returns just the base64-encoded data, NOT the full `sentry=...`
tracestate entry.
"""
tracestate_json = json.dumps(data, default=safe_str)
# Base64-encoded strings always come out with a length which is a multiple
# of 4. In order to achieve this, the end is padded with one or more `=`
# signs. Because the tracestate standard calls for using `=` signs between
# vendor name and value (`sentry=xxx,dogsaregreat=yyy`), to avoid confusion
# we strip the `=`
return (to_base64(tracestate_json) or "").rstrip("=")
def compute_tracestate_entry(span):
# type: (Span) -> Optional[str]
"""
Computes a new sentry tracestate for the span. Includes the `sentry=`.
Will return `None` if there's no client and/or no DSN.
"""
data = {}
hub = span.hub or sentry_sdk.Hub.current
client = hub.client
scope = hub.scope
if client and client.options.get("dsn"):
options = client.options
user = scope._user
data = {
"trace_id": span.trace_id,
"environment": options["environment"],
"release": options.get("release"),
"public_key": Dsn(options["dsn"]).public_key,
}
if user and (user.get("id") or user.get("segment")):
user_data = {}
if user.get("id"):
user_data["id"] = user["id"]
if user.get("segment"):
user_data["segment"] = user["segment"]
data["user"] = user_data
if span.containing_transaction:
data["transaction"] = span.containing_transaction.name
return "sentry=" + compute_tracestate_value(data)
return None
def reinflate_tracestate(encoded_tracestate):
# type: (str) -> typing.Optional[Mapping[str, str]]
"""
Given a sentry tracestate value in its encoded form, translate it back into
a dictionary of data.
"""
inflated_tracestate = None
if encoded_tracestate:
# Base64-encoded strings always come out with a length which is a
# multiple of 4. In order to achieve this, the end is padded with one or
# more `=` signs. Because the tracestate standard calls for using `=`
# signs between vendor name and value (`sentry=xxx,dogsaregreat=yyy`),
# to avoid confusion we strip the `=` when the data is initially
# encoded. Python's decoding function requires they be put back.
# Fortunately, it doesn't complain if there are too many, so we just
# attach two `=` on spec (there will never be more than 2, see
# https://en.wikipedia.org/wiki/Base64#Decoding_Base64_without_padding).
tracestate_json = from_base64(encoded_tracestate + "==")
try:
assert tracestate_json is not None
inflated_tracestate = json.loads(tracestate_json)
except Exception as err:
logger.warning(
(
"Unable to attach tracestate data to envelope header: {err}"
+ "\nTracestate value is {encoded_tracestate}"
).format(err=err, encoded_tracestate=encoded_tracestate),
)
return inflated_tracestate
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
def has_tracestate_enabled(span=None):
# type: (Optional[Span]) -> bool
client = ((span and span.hub) or sentry_sdk.Hub.current).client
options = client and client.options
return bool(options and options["_experiments"].get("propagate_tracestate"))
def has_custom_measurements_enabled():
# type: () -> bool
client = sentry_sdk.Hub.current.client
options = client and client.options
return bool(options and options["_experiments"].get("custom_measurements"))
class Baggage(object):
__slots__ = ("sentry_items", "third_party_items", "mutable")
SENTRY_PREFIX = "sentry-"
SENTRY_PREFIX_REGEX = re.compile("^sentry-")
# DynamicSamplingContext
DSC_KEYS = [
"trace_id",
"public_key",
"sample_rate",
"release",
"environment",
"transaction",
"user_id",
"user_segment",
]
def __init__(
self,
sentry_items, # type: Dict[str, str]
third_party_items="", # type: str
mutable=True, # type: bool
):
self.sentry_items = sentry_items
self.third_party_items = third_party_items
self.mutable = mutable
@classmethod
def from_incoming_header(cls, header):
# type: (Optional[str]) -> Baggage
"""
freeze if incoming header already has sentry baggage
"""
sentry_items = {}
third_party_items = ""
mutable = True
if header:
for item in header.split(","):
if "=" not in item:
continue
with capture_internal_exceptions():
item = item.strip()
key, val = item.split("=")
if Baggage.SENTRY_PREFIX_REGEX.match(key):
baggage_key = unquote(key.split("-")[1])
sentry_items[baggage_key] = unquote(val)
mutable = False
else:
third_party_items += ("," if third_party_items else "") + item
return Baggage(sentry_items, third_party_items, mutable)
@classmethod
def populate_from_transaction(cls, transaction):
# type: (Transaction) -> Baggage
"""
Populate fresh baggage entry with sentry_items and make it immutable
if this is the head SDK which originates traces.
"""
hub = transaction.hub or sentry_sdk.Hub.current
client = hub.client
sentry_items = {} # type: Dict[str, str]
if not client:
return Baggage(sentry_items)
options = client.options or {}
user = (hub.scope and hub.scope._user) or {}
sentry_items["trace_id"] = transaction.trace_id
if options.get("environment"):
sentry_items["environment"] = options["environment"]
if options.get("release"):
sentry_items["release"] = options["release"]
if options.get("dsn"):
sentry_items["public_key"] = Dsn(options["dsn"]).public_key
if (
transaction.name
and transaction.source not in LOW_QUALITY_TRANSACTION_SOURCES
):
sentry_items["transaction"] = transaction.name
if user.get("segment"):
sentry_items["user_segment"] = user["segment"]
if transaction.sample_rate is not None:
sentry_items["sample_rate"] = str(transaction.sample_rate)
# there's an existing baggage but it was mutable,
# which is why we are creating this new baggage.
# However, if by chance the user put some sentry items in there, give them precedence.
if transaction._baggage and transaction._baggage.sentry_items:
sentry_items.update(transaction._baggage.sentry_items)
return Baggage(sentry_items, mutable=False)
def freeze(self):
# type: () -> None
self.mutable = False
def dynamic_sampling_context(self):
# type: () -> Dict[str, str]
header = {}
for key in Baggage.DSC_KEYS:
item = self.sentry_items.get(key)
if item:
header[key] = item
return header
def serialize(self, include_third_party=False):
# type: (bool) -> str
items = []
for key, val in iteritems(self.sentry_items):
with capture_internal_exceptions():
item = Baggage.SENTRY_PREFIX + quote(key) + "=" + quote(str(val))
items.append(item)
if include_third_party:
items.append(self.third_party_items)
return ",".join(items)
# Circular imports
from sentry_sdk.tracing import LOW_QUALITY_TRANSACTION_SOURCES
if MYPY:
from sentry_sdk.tracing import Span, Transaction
| 17,303 | Python | 29.9 | 147 | 0.597006 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/api.py | import inspect
from sentry_sdk.hub import Hub
from sentry_sdk.scope import Scope
from sentry_sdk._types import MYPY
from sentry_sdk.tracing import NoOpSpan
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
from typing import overload
from typing import Callable
from typing import TypeVar
from typing import ContextManager
from typing import Union
from sentry_sdk._types import Event, Hint, Breadcrumb, BreadcrumbHint, ExcInfo
from sentry_sdk.tracing import Span, Transaction
T = TypeVar("T")
F = TypeVar("F", bound=Callable[..., Any])
else:
def overload(x):
# type: (T) -> T
return x
# When changing this, update __all__ in __init__.py too
__all__ = [
"capture_event",
"capture_message",
"capture_exception",
"add_breadcrumb",
"configure_scope",
"push_scope",
"flush",
"last_event_id",
"start_span",
"start_transaction",
"set_tag",
"set_context",
"set_extra",
"set_user",
"set_level",
]
def hubmethod(f):
# type: (F) -> F
f.__doc__ = "%s\n\n%s" % (
"Alias for :py:meth:`sentry_sdk.Hub.%s`" % f.__name__,
inspect.getdoc(getattr(Hub, f.__name__)),
)
return f
def scopemethod(f):
# type: (F) -> F
f.__doc__ = "%s\n\n%s" % (
"Alias for :py:meth:`sentry_sdk.Scope.%s`" % f.__name__,
inspect.getdoc(getattr(Scope, f.__name__)),
)
return f
@hubmethod
def capture_event(
event, # type: Event
hint=None, # type: Optional[Hint]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
return Hub.current.capture_event(event, hint, scope=scope, **scope_args)
@hubmethod
def capture_message(
message, # type: str
level=None, # type: Optional[str]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
return Hub.current.capture_message(message, level, scope=scope, **scope_args)
@hubmethod
def capture_exception(
error=None, # type: Optional[Union[BaseException, ExcInfo]]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
return Hub.current.capture_exception(error, scope=scope, **scope_args)
@hubmethod
def add_breadcrumb(
crumb=None, # type: Optional[Breadcrumb]
hint=None, # type: Optional[BreadcrumbHint]
**kwargs # type: Any
):
# type: (...) -> None
return Hub.current.add_breadcrumb(crumb, hint, **kwargs)
@overload
def configure_scope():
# type: () -> ContextManager[Scope]
pass
@overload
def configure_scope( # noqa: F811
callback, # type: Callable[[Scope], None]
):
# type: (...) -> None
pass
@hubmethod
def configure_scope( # noqa: F811
callback=None, # type: Optional[Callable[[Scope], None]]
):
# type: (...) -> Optional[ContextManager[Scope]]
return Hub.current.configure_scope(callback)
@overload
def push_scope():
# type: () -> ContextManager[Scope]
pass
@overload
def push_scope( # noqa: F811
callback, # type: Callable[[Scope], None]
):
# type: (...) -> None
pass
@hubmethod
def push_scope( # noqa: F811
callback=None, # type: Optional[Callable[[Scope], None]]
):
# type: (...) -> Optional[ContextManager[Scope]]
return Hub.current.push_scope(callback)
@scopemethod
def set_tag(key, value):
# type: (str, Any) -> None
return Hub.current.scope.set_tag(key, value)
@scopemethod
def set_context(key, value):
# type: (str, Dict[str, Any]) -> None
return Hub.current.scope.set_context(key, value)
@scopemethod
def set_extra(key, value):
# type: (str, Any) -> None
return Hub.current.scope.set_extra(key, value)
@scopemethod
def set_user(value):
# type: (Optional[Dict[str, Any]]) -> None
return Hub.current.scope.set_user(value)
@scopemethod
def set_level(value):
# type: (str) -> None
return Hub.current.scope.set_level(value)
@hubmethod
def flush(
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
return Hub.current.flush(timeout=timeout, callback=callback)
@hubmethod
def last_event_id():
# type: () -> Optional[str]
return Hub.current.last_event_id()
@hubmethod
def start_span(
span=None, # type: Optional[Span]
**kwargs # type: Any
):
# type: (...) -> Span
return Hub.current.start_span(span=span, **kwargs)
@hubmethod
def start_transaction(
transaction=None, # type: Optional[Transaction]
**kwargs # type: Any
):
# type: (...) -> Union[Transaction, NoOpSpan]
return Hub.current.start_transaction(transaction, **kwargs)
| 4,773 | Python | 21.101852 | 82 | 0.622879 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/transport.py | from __future__ import print_function
import io
import urllib3 # type: ignore
import certifi
import gzip
import time
from datetime import datetime, timedelta
from collections import defaultdict
from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions, json_dumps
from sentry_sdk.worker import BackgroundWorker
from sentry_sdk.envelope import Envelope, Item, PayloadRef
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from typing import DefaultDict
from urllib3.poolmanager import PoolManager # type: ignore
from urllib3.poolmanager import ProxyManager
from sentry_sdk._types import Event, EndpointType
DataCategory = Optional[str]
try:
from urllib.request import getproxies
except ImportError:
from urllib import getproxies # type: ignore
class Transport(object):
"""Baseclass for all transports.
A transport is used to send an event to sentry.
"""
parsed_dsn = None # type: Optional[Dsn]
def __init__(
self, options=None # type: Optional[Dict[str, Any]]
):
# type: (...) -> None
self.options = options
if options and options["dsn"] is not None and options["dsn"]:
self.parsed_dsn = Dsn(options["dsn"])
else:
self.parsed_dsn = None
def capture_event(
self, event # type: Event
):
# type: (...) -> None
"""
This gets invoked with the event dictionary when an event should
be sent to sentry.
"""
raise NotImplementedError()
def capture_envelope(
self, envelope # type: Envelope
):
# type: (...) -> None
"""
Send an envelope to Sentry.
Envelopes are a data container format that can hold any type of data
submitted to Sentry. We use it for transactions and sessions, but
regular "error" events should go through `capture_event` for backwards
compat.
"""
raise NotImplementedError()
def flush(
self,
timeout, # type: float
callback=None, # type: Optional[Any]
):
# type: (...) -> None
"""Wait `timeout` seconds for the current events to be sent out."""
pass
def kill(self):
# type: () -> None
"""Forcefully kills the transport."""
pass
def record_lost_event(
self,
reason, # type: str
data_category=None, # type: Optional[str]
item=None, # type: Optional[Item]
):
# type: (...) -> None
"""This increments a counter for event loss by reason and
data category.
"""
return None
def __del__(self):
# type: () -> None
try:
self.kill()
except Exception:
pass
def _parse_rate_limits(header, now=None):
# type: (Any, Optional[datetime]) -> Iterable[Tuple[DataCategory, datetime]]
if now is None:
now = datetime.utcnow()
for limit in header.split(","):
try:
retry_after, categories, _ = limit.strip().split(":", 2)
retry_after = now + timedelta(seconds=int(retry_after))
for category in categories and categories.split(";") or (None,):
yield category, retry_after
except (LookupError, ValueError):
continue
class HttpTransport(Transport):
"""The default HTTP transport."""
def __init__(
self, options # type: Dict[str, Any]
):
# type: (...) -> None
from sentry_sdk.consts import VERSION
Transport.__init__(self, options)
assert self.parsed_dsn is not None
self.options = options # type: Dict[str, Any]
self._worker = BackgroundWorker(queue_size=options["transport_queue_size"])
self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
self._disabled_until = {} # type: Dict[DataCategory, datetime]
self._retry = urllib3.util.Retry()
self._discarded_events = defaultdict(
int
) # type: DefaultDict[Tuple[str, str], int]
self._last_client_report_sent = time.time()
self._pool = self._make_pool(
self.parsed_dsn,
http_proxy=options["http_proxy"],
https_proxy=options["https_proxy"],
ca_certs=options["ca_certs"],
proxy_headers=options["proxy_headers"],
)
from sentry_sdk import Hub
self.hub_cls = Hub
def record_lost_event(
self,
reason, # type: str
data_category=None, # type: Optional[str]
item=None, # type: Optional[Item]
):
# type: (...) -> None
if not self.options["send_client_reports"]:
return
quantity = 1
if item is not None:
data_category = item.data_category
if data_category == "attachment":
# quantity of 0 is actually 1 as we do not want to count
# empty attachments as actually empty.
quantity = len(item.get_bytes()) or 1
elif data_category is None:
raise TypeError("data category not provided")
self._discarded_events[data_category, reason] += quantity
def _update_rate_limits(self, response):
# type: (urllib3.HTTPResponse) -> None
# new sentries with more rate limit insights. We honor this header
# no matter of the status code to update our internal rate limits.
header = response.headers.get("x-sentry-rate-limits")
if header:
logger.warning("Rate-limited via x-sentry-rate-limits")
self._disabled_until.update(_parse_rate_limits(header))
# old sentries only communicate global rate limit hits via the
# retry-after header on 429. This header can also be emitted on new
# sentries if a proxy in front wants to globally slow things down.
elif response.status == 429:
logger.warning("Rate-limited via 429")
self._disabled_until[None] = datetime.utcnow() + timedelta(
seconds=self._retry.get_retry_after(response) or 60
)
def _send_request(
self,
body, # type: bytes
headers, # type: Dict[str, str]
endpoint_type="store", # type: EndpointType
envelope=None, # type: Optional[Envelope]
):
# type: (...) -> None
def record_loss(reason):
# type: (str) -> None
if envelope is None:
self.record_lost_event(reason, data_category="error")
else:
for item in envelope.items:
self.record_lost_event(reason, item=item)
headers.update(
{
"User-Agent": str(self._auth.client),
"X-Sentry-Auth": str(self._auth.to_header()),
}
)
try:
response = self._pool.request(
"POST",
str(self._auth.get_api_url(endpoint_type)),
body=body,
headers=headers,
)
except Exception:
self.on_dropped_event("network")
record_loss("network_error")
raise
try:
self._update_rate_limits(response)
if response.status == 429:
# if we hit a 429. Something was rate limited but we already
# acted on this in `self._update_rate_limits`. Note that we
# do not want to record event loss here as we will have recorded
# an outcome in relay already.
self.on_dropped_event("status_429")
pass
elif response.status >= 300 or response.status < 200:
logger.error(
"Unexpected status code: %s (body: %s)",
response.status,
response.data,
)
self.on_dropped_event("status_{}".format(response.status))
record_loss("network_error")
finally:
response.close()
def on_dropped_event(self, reason):
# type: (str) -> None
return None
def _fetch_pending_client_report(self, force=False, interval=60):
# type: (bool, int) -> Optional[Item]
if not self.options["send_client_reports"]:
return None
if not (force or self._last_client_report_sent < time.time() - interval):
return None
discarded_events = self._discarded_events
self._discarded_events = defaultdict(int)
self._last_client_report_sent = time.time()
if not discarded_events:
return None
return Item(
PayloadRef(
json={
"timestamp": time.time(),
"discarded_events": [
{"reason": reason, "category": category, "quantity": quantity}
for (
(category, reason),
quantity,
) in discarded_events.items()
],
}
),
type="client_report",
)
def _flush_client_reports(self, force=False):
# type: (bool) -> None
client_report = self._fetch_pending_client_report(force=force, interval=60)
if client_report is not None:
self.capture_envelope(Envelope(items=[client_report]))
def _check_disabled(self, category):
# type: (str) -> bool
def _disabled(bucket):
# type: (Any) -> bool
ts = self._disabled_until.get(bucket)
return ts is not None and ts > datetime.utcnow()
return _disabled(category) or _disabled(None)
def _send_event(
self, event # type: Event
):
# type: (...) -> None
if self._check_disabled("error"):
self.on_dropped_event("self_rate_limits")
self.record_lost_event("ratelimit_backoff", data_category="error")
return None
body = io.BytesIO()
with gzip.GzipFile(fileobj=body, mode="w") as f:
f.write(json_dumps(event))
assert self.parsed_dsn is not None
logger.debug(
"Sending event, type:%s level:%s event_id:%s project:%s host:%s"
% (
event.get("type") or "null",
event.get("level") or "null",
event.get("event_id") or "null",
self.parsed_dsn.project_id,
self.parsed_dsn.host,
)
)
self._send_request(
body.getvalue(),
headers={"Content-Type": "application/json", "Content-Encoding": "gzip"},
)
return None
def _send_envelope(
self, envelope # type: Envelope
):
# type: (...) -> None
# remove all items from the envelope which are over quota
new_items = []
for item in envelope.items:
if self._check_disabled(item.data_category):
if item.data_category in ("transaction", "error", "default"):
self.on_dropped_event("self_rate_limits")
self.record_lost_event("ratelimit_backoff", item=item)
else:
new_items.append(item)
# Since we're modifying the envelope here make a copy so that others
# that hold references do not see their envelope modified.
envelope = Envelope(headers=envelope.headers, items=new_items)
if not envelope.items:
return None
# since we're already in the business of sending out an envelope here
# check if we have one pending for the stats session envelopes so we
# can attach it to this enveloped scheduled for sending. This will
# currently typically attach the client report to the most recent
# session update.
client_report_item = self._fetch_pending_client_report(interval=30)
if client_report_item is not None:
envelope.items.append(client_report_item)
body = io.BytesIO()
with gzip.GzipFile(fileobj=body, mode="w") as f:
envelope.serialize_into(f)
assert self.parsed_dsn is not None
logger.debug(
"Sending envelope [%s] project:%s host:%s",
envelope.description,
self.parsed_dsn.project_id,
self.parsed_dsn.host,
)
self._send_request(
body.getvalue(),
headers={
"Content-Type": "application/x-sentry-envelope",
"Content-Encoding": "gzip",
},
endpoint_type="envelope",
envelope=envelope,
)
return None
def _get_pool_options(self, ca_certs):
# type: (Optional[Any]) -> Dict[str, Any]
return {
"num_pools": 2,
"cert_reqs": "CERT_REQUIRED",
"ca_certs": ca_certs or certifi.where(),
}
def _in_no_proxy(self, parsed_dsn):
# type: (Dsn) -> bool
no_proxy = getproxies().get("no")
if not no_proxy:
return False
for host in no_proxy.split(","):
host = host.strip()
if parsed_dsn.host.endswith(host) or parsed_dsn.netloc.endswith(host):
return True
return False
def _make_pool(
self,
parsed_dsn, # type: Dsn
http_proxy, # type: Optional[str]
https_proxy, # type: Optional[str]
ca_certs, # type: Optional[Any]
proxy_headers, # type: Optional[Dict[str, str]]
):
# type: (...) -> Union[PoolManager, ProxyManager]
proxy = None
no_proxy = self._in_no_proxy(parsed_dsn)
# try HTTPS first
if parsed_dsn.scheme == "https" and (https_proxy != ""):
proxy = https_proxy or (not no_proxy and getproxies().get("https"))
# maybe fallback to HTTP proxy
if not proxy and (http_proxy != ""):
proxy = http_proxy or (not no_proxy and getproxies().get("http"))
opts = self._get_pool_options(ca_certs)
if proxy:
if proxy_headers:
opts["proxy_headers"] = proxy_headers
return urllib3.ProxyManager(proxy, **opts)
else:
return urllib3.PoolManager(**opts)
def capture_event(
self, event # type: Event
):
# type: (...) -> None
hub = self.hub_cls.current
def send_event_wrapper():
# type: () -> None
with hub:
with capture_internal_exceptions():
self._send_event(event)
self._flush_client_reports()
if not self._worker.submit(send_event_wrapper):
self.on_dropped_event("full_queue")
self.record_lost_event("queue_overflow", data_category="error")
def capture_envelope(
self, envelope # type: Envelope
):
# type: (...) -> None
hub = self.hub_cls.current
def send_envelope_wrapper():
# type: () -> None
with hub:
with capture_internal_exceptions():
self._send_envelope(envelope)
self._flush_client_reports()
if not self._worker.submit(send_envelope_wrapper):
self.on_dropped_event("full_queue")
for item in envelope.items:
self.record_lost_event("queue_overflow", item=item)
def flush(
self,
timeout, # type: float
callback=None, # type: Optional[Any]
):
# type: (...) -> None
logger.debug("Flushing HTTP transport")
if timeout > 0:
self._worker.submit(lambda: self._flush_client_reports(force=True))
self._worker.flush(timeout, callback)
def kill(self):
# type: () -> None
logger.debug("Killing HTTP transport")
self._worker.kill()
class _FunctionTransport(Transport):
def __init__(
self, func # type: Callable[[Event], None]
):
# type: (...) -> None
Transport.__init__(self)
self._func = func
def capture_event(
self, event # type: Event
):
# type: (...) -> None
self._func(event)
return None
def make_transport(options):
# type: (Dict[str, Any]) -> Optional[Transport]
ref_transport = options["transport"]
# If no transport is given, we use the http transport class
if ref_transport is None:
transport_cls = HttpTransport # type: Type[Transport]
elif isinstance(ref_transport, Transport):
return ref_transport
elif isinstance(ref_transport, type) and issubclass(ref_transport, Transport):
transport_cls = ref_transport
elif callable(ref_transport):
return _FunctionTransport(ref_transport) # type: ignore
# if a transport class is given only instantiate it if the dsn is not
# empty or None
if options["dsn"]:
return transport_cls(options)
return None
| 17,320 | Python | 31.255121 | 86 | 0.55537 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/client.py | import os
import uuid
import random
from datetime import datetime
import socket
from sentry_sdk._compat import string_types, text_type, iteritems
from sentry_sdk.utils import (
capture_internal_exceptions,
current_stacktrace,
disable_capture_event,
format_timestamp,
get_sdk_name,
get_type_name,
get_default_release,
handle_in_app,
logger,
)
from sentry_sdk.serializer import serialize
from sentry_sdk.transport import make_transport
from sentry_sdk.consts import (
DEFAULT_OPTIONS,
INSTRUMENTER,
VERSION,
ClientConstructor,
)
from sentry_sdk.integrations import setup_integrations
from sentry_sdk.utils import ContextVar
from sentry_sdk.sessions import SessionFlusher
from sentry_sdk.envelope import Envelope
from sentry_sdk.profiler import setup_profiler
from sentry_sdk.tracing_utils import has_tracestate_enabled, reinflate_tracestate
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from sentry_sdk.scope import Scope
from sentry_sdk._types import Event, Hint
from sentry_sdk.session import Session
_client_init_debug = ContextVar("client_init_debug")
SDK_INFO = {
"name": "sentry.python", # SDK name will be overridden after integrations have been loaded with sentry_sdk.integrations.setup_integrations()
"version": VERSION,
"packages": [{"name": "pypi:sentry-sdk", "version": VERSION}],
}
def _get_options(*args, **kwargs):
# type: (*Optional[str], **Any) -> Dict[str, Any]
if args and (isinstance(args[0], (text_type, bytes, str)) or args[0] is None):
dsn = args[0] # type: Optional[str]
args = args[1:]
else:
dsn = None
if len(args) > 1:
raise TypeError("Only single positional argument is expected")
rv = dict(DEFAULT_OPTIONS)
options = dict(*args, **kwargs)
if dsn is not None and options.get("dsn") is None:
options["dsn"] = dsn
for key, value in iteritems(options):
if key not in rv:
raise TypeError("Unknown option %r" % (key,))
rv[key] = value
if rv["dsn"] is None:
rv["dsn"] = os.environ.get("SENTRY_DSN")
if rv["release"] is None:
rv["release"] = get_default_release()
if rv["environment"] is None:
rv["environment"] = os.environ.get("SENTRY_ENVIRONMENT") or "production"
if rv["server_name"] is None and hasattr(socket, "gethostname"):
rv["server_name"] = socket.gethostname()
if rv["instrumenter"] is None:
rv["instrumenter"] = INSTRUMENTER.SENTRY
return rv
class _Client(object):
"""The client is internally responsible for capturing the events and
forwarding them to sentry through the configured transport. It takes
the client options as keyword arguments and optionally the DSN as first
argument.
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.options = get_options(*args, **kwargs) # type: Dict[str, Any]
self._init_impl()
def __getstate__(self):
# type: () -> Any
return {"options": self.options}
def __setstate__(self, state):
# type: (Any) -> None
self.options = state["options"]
self._init_impl()
def _init_impl(self):
# type: () -> None
old_debug = _client_init_debug.get(False)
def _capture_envelope(envelope):
# type: (Envelope) -> None
if self.transport is not None:
self.transport.capture_envelope(envelope)
try:
_client_init_debug.set(self.options["debug"])
self.transport = make_transport(self.options)
self.session_flusher = SessionFlusher(capture_func=_capture_envelope)
request_bodies = ("always", "never", "small", "medium")
if self.options["request_bodies"] not in request_bodies:
raise ValueError(
"Invalid value for request_bodies. Must be one of {}".format(
request_bodies
)
)
self.integrations = setup_integrations(
self.options["integrations"],
with_defaults=self.options["default_integrations"],
with_auto_enabling_integrations=self.options[
"auto_enabling_integrations"
],
)
sdk_name = get_sdk_name(list(self.integrations.keys()))
SDK_INFO["name"] = sdk_name
logger.debug("Setting SDK name to '%s'", sdk_name)
finally:
_client_init_debug.set(old_debug)
profiles_sample_rate = self.options["_experiments"].get("profiles_sample_rate")
if profiles_sample_rate is not None and profiles_sample_rate > 0:
try:
setup_profiler(self.options)
except ValueError as e:
logger.debug(str(e))
@property
def dsn(self):
# type: () -> Optional[str]
"""Returns the configured DSN as string."""
return self.options["dsn"]
def _prepare_event(
self,
event, # type: Event
hint, # type: Hint
scope, # type: Optional[Scope]
):
# type: (...) -> Optional[Event]
if event.get("timestamp") is None:
event["timestamp"] = datetime.utcnow()
if scope is not None:
is_transaction = event.get("type") == "transaction"
event_ = scope.apply_to_event(event, hint)
# one of the event/error processors returned None
if event_ is None:
if self.transport:
self.transport.record_lost_event(
"event_processor",
data_category=("transaction" if is_transaction else "error"),
)
return None
event = event_
if (
self.options["attach_stacktrace"]
and "exception" not in event
and "stacktrace" not in event
and "threads" not in event
):
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
self.options["with_locals"]
),
"crashed": False,
"current": True,
}
]
}
for key in "release", "environment", "server_name", "dist":
if event.get(key) is None and self.options[key] is not None:
event[key] = text_type(self.options[key]).strip()
if event.get("sdk") is None:
sdk_info = dict(SDK_INFO)
sdk_info["integrations"] = sorted(self.integrations.keys())
event["sdk"] = sdk_info
if event.get("platform") is None:
event["platform"] = "python"
event = handle_in_app(
event, self.options["in_app_exclude"], self.options["in_app_include"]
)
# Postprocess the event here so that annotated types do
# generally not surface in before_send
if event is not None:
event = serialize(
event,
smart_transaction_trimming=self.options["_experiments"].get(
"smart_transaction_trimming"
),
)
before_send = self.options["before_send"]
if before_send is not None and event.get("type") != "transaction":
new_event = None
with capture_internal_exceptions():
new_event = before_send(event, hint or {})
if new_event is None:
logger.info("before send dropped event (%s)", event)
if self.transport:
self.transport.record_lost_event(
"before_send", data_category="error"
)
event = new_event # type: ignore
before_send_transaction = self.options["before_send_transaction"]
if before_send_transaction is not None and event.get("type") == "transaction":
new_event = None
with capture_internal_exceptions():
new_event = before_send_transaction(event, hint or {})
if new_event is None:
logger.info("before send transaction dropped event (%s)", event)
if self.transport:
self.transport.record_lost_event(
"before_send", data_category="transaction"
)
event = new_event # type: ignore
return event
def _is_ignored_error(self, event, hint):
# type: (Event, Hint) -> bool
exc_info = hint.get("exc_info")
if exc_info is None:
return False
error = exc_info[0]
error_type_name = get_type_name(exc_info[0])
error_full_name = "%s.%s" % (exc_info[0].__module__, error_type_name)
for ignored_error in self.options["ignore_errors"]:
# String types are matched against the type name in the
# exception only
if isinstance(ignored_error, string_types):
if ignored_error == error_full_name or ignored_error == error_type_name:
return True
else:
if issubclass(error, ignored_error):
return True
return False
def _should_capture(
self,
event, # type: Event
hint, # type: Hint
scope=None, # type: Optional[Scope]
):
# type: (...) -> bool
# Transactions are sampled independent of error events.
is_transaction = event.get("type") == "transaction"
if is_transaction:
return True
ignoring_prevents_recursion = scope is not None and not scope._should_capture
if ignoring_prevents_recursion:
return False
ignored_by_config_option = self._is_ignored_error(event, hint)
if ignored_by_config_option:
return False
return True
def _should_sample_error(
self,
event, # type: Event
):
# type: (...) -> bool
not_in_sample_rate = (
self.options["sample_rate"] < 1.0
and random.random() >= self.options["sample_rate"]
)
if not_in_sample_rate:
# because we will not sample this event, record a "lost event".
if self.transport:
self.transport.record_lost_event("sample_rate", data_category="error")
return False
return True
def _update_session_from_event(
self,
session, # type: Session
event, # type: Event
):
# type: (...) -> None
crashed = False
errored = False
user_agent = None
exceptions = (event.get("exception") or {}).get("values")
if exceptions:
errored = True
for error in exceptions:
mechanism = error.get("mechanism")
if mechanism and mechanism.get("handled") is False:
crashed = True
break
user = event.get("user")
if session.user_agent is None:
headers = (event.get("request") or {}).get("headers")
for (k, v) in iteritems(headers or {}):
if k.lower() == "user-agent":
user_agent = v
break
session.update(
status="crashed" if crashed else None,
user=user,
user_agent=user_agent,
errors=session.errors + (errored or crashed),
)
def capture_event(
self,
event, # type: Event
hint=None, # type: Optional[Hint]
scope=None, # type: Optional[Scope]
):
# type: (...) -> Optional[str]
"""Captures an event.
:param event: A ready-made event that can be directly sent to Sentry.
:param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
:returns: An event ID. May be `None` if there is no DSN set or of if the SDK decided to discard the event for other reasons. In such situations setting `debug=True` on `init()` may help.
"""
if disable_capture_event.get(False):
return None
if self.transport is None:
return None
if hint is None:
hint = {}
event_id = event.get("event_id")
hint = dict(hint or ()) # type: Hint
if event_id is None:
event["event_id"] = event_id = uuid.uuid4().hex
if not self._should_capture(event, hint, scope):
return None
profile = event.pop("profile", None)
event_opt = self._prepare_event(event, hint, scope)
if event_opt is None:
return None
# whenever we capture an event we also check if the session needs
# to be updated based on that information.
session = scope._session if scope else None
if session:
self._update_session_from_event(session, event)
is_transaction = event_opt.get("type") == "transaction"
if not is_transaction and not self._should_sample_error(event):
return None
attachments = hint.get("attachments")
# this is outside of the `if` immediately below because even if we don't
# use the value, we want to make sure we remove it before the event is
# sent
raw_tracestate = (
event_opt.get("contexts", {}).get("trace", {}).pop("tracestate", "")
)
dynamic_sampling_context = (
event_opt.get("contexts", {})
.get("trace", {})
.pop("dynamic_sampling_context", {})
)
# Transactions or events with attachments should go to the /envelope/
# endpoint.
if is_transaction or attachments:
headers = {
"event_id": event_opt["event_id"],
"sent_at": format_timestamp(datetime.utcnow()),
}
if has_tracestate_enabled():
tracestate_data = raw_tracestate and reinflate_tracestate(
raw_tracestate.replace("sentry=", "")
)
if tracestate_data:
headers["trace"] = tracestate_data
elif dynamic_sampling_context:
headers["trace"] = dynamic_sampling_context
envelope = Envelope(headers=headers)
if is_transaction:
if profile is not None:
envelope.add_profile(profile.to_json(event_opt, self.options))
envelope.add_transaction(event_opt)
else:
envelope.add_event(event_opt)
for attachment in attachments or ():
envelope.add_item(attachment.to_envelope_item())
self.transport.capture_envelope(envelope)
else:
# All other events go to the /store/ endpoint.
self.transport.capture_event(event_opt)
return event_id
def capture_session(
self, session # type: Session
):
# type: (...) -> None
if not session.release:
logger.info("Discarded session update because of missing release")
else:
self.session_flusher.add_session(session)
def close(
self,
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
"""
Close the client and shut down the transport. Arguments have the same
semantics as :py:meth:`Client.flush`.
"""
if self.transport is not None:
self.flush(timeout=timeout, callback=callback)
self.session_flusher.kill()
self.transport.kill()
self.transport = None
def flush(
self,
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
"""
Wait for the current events to be sent.
:param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
:param callback: Is invoked with the number of pending events and the configured timeout.
"""
if self.transport is not None:
if timeout is None:
timeout = self.options["shutdown_timeout"]
self.session_flusher.flush()
self.transport.flush(timeout=timeout, callback=callback)
def __enter__(self):
# type: () -> _Client
return self
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
self.close()
from sentry_sdk._types import MYPY
if MYPY:
# Make mypy, PyCharm and other static analyzers think `get_options` is a
# type to have nicer autocompletion for params.
#
# Use `ClientConstructor` to define the argument types of `init` and
# `Dict[str, Any]` to tell static analyzers about the return type.
class get_options(ClientConstructor, Dict[str, Any]): # noqa: N801
pass
class Client(ClientConstructor, _Client):
pass
else:
# Alias `get_options` for actual usage. Go through the lambda indirection
# to throw PyCharm off of the weakly typed signature (it would otherwise
# discover both the weakly typed signature of `_init` and our faked `init`
# type).
get_options = (lambda: _get_options)()
Client = (lambda: _Client)()
| 17,961 | Python | 32.386617 | 194 | 0.560325 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/_functools.py | """
A backport of Python 3 functools to Python 2/3. The only important change
we rely upon is that `update_wrapper` handles AttributeError gracefully.
"""
from functools import partial
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
WRAPPER_ASSIGNMENTS = (
"__module__",
"__name__",
"__qualname__",
"__doc__",
"__annotations__",
)
WRAPPER_UPDATES = ("__dict__",)
def update_wrapper(
wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES
):
# type: (Any, Any, Any, Any) -> Any
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES):
# type: (Callable[..., Any], Any, Any) -> Callable[[Callable[..., Any]], Callable[..., Any]]
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)
| 2,276 | Python | 32.985074 | 96 | 0.688489 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/session.py | import uuid
from datetime import datetime
from sentry_sdk._types import MYPY
from sentry_sdk.utils import format_timestamp
if MYPY:
from typing import Optional
from typing import Union
from typing import Any
from typing import Dict
from sentry_sdk._types import SessionStatus
def _minute_trunc(ts):
# type: (datetime) -> datetime
return ts.replace(second=0, microsecond=0)
def _make_uuid(
val, # type: Union[str, uuid.UUID]
):
# type: (...) -> uuid.UUID
if isinstance(val, uuid.UUID):
return val
return uuid.UUID(val)
class Session(object):
def __init__(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
session_mode="application", # type: str
):
# type: (...) -> None
if sid is None:
sid = uuid.uuid4()
if started is None:
started = datetime.utcnow()
if status is None:
status = "ok"
self.status = status
self.did = None # type: Optional[str]
self.started = started
self.release = None # type: Optional[str]
self.environment = None # type: Optional[str]
self.duration = None # type: Optional[float]
self.user_agent = None # type: Optional[str]
self.ip_address = None # type: Optional[str]
self.session_mode = session_mode # type: str
self.errors = 0
self.update(
sid=sid,
did=did,
timestamp=timestamp,
duration=duration,
release=release,
environment=environment,
user_agent=user_agent,
ip_address=ip_address,
errors=errors,
user=user,
)
@property
def truncated_started(self):
# type: (...) -> datetime
return _minute_trunc(self.started)
def update(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
# If a user is supplied we pull some data form it
if user:
if ip_address is None:
ip_address = user.get("ip_address")
if did is None:
did = user.get("id") or user.get("email") or user.get("username")
if sid is not None:
self.sid = _make_uuid(sid)
if did is not None:
self.did = str(did)
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
if started is not None:
self.started = started
if duration is not None:
self.duration = duration
if release is not None:
self.release = release
if environment is not None:
self.environment = environment
if ip_address is not None:
self.ip_address = ip_address
if user_agent is not None:
self.user_agent = user_agent
if errors is not None:
self.errors = errors
if status is not None:
self.status = status
def close(
self, status=None # type: Optional[SessionStatus]
):
# type: (...) -> Any
if status is None and self.status == "ok":
status = "exited"
if status is not None:
self.update(status=status)
def get_json_attrs(
self, with_user_info=True # type: Optional[bool]
):
# type: (...) -> Any
attrs = {}
if self.release is not None:
attrs["release"] = self.release
if self.environment is not None:
attrs["environment"] = self.environment
if with_user_info:
if self.ip_address is not None:
attrs["ip_address"] = self.ip_address
if self.user_agent is not None:
attrs["user_agent"] = self.user_agent
return attrs
def to_json(self):
# type: (...) -> Any
rv = {
"sid": str(self.sid),
"init": True,
"started": format_timestamp(self.started),
"timestamp": format_timestamp(self.timestamp),
"status": self.status,
} # type: Dict[str, Any]
if self.errors:
rv["errors"] = self.errors
if self.did is not None:
rv["did"] = self.did
if self.duration is not None:
rv["duration"] = self.duration
attrs = self.get_json_attrs()
if attrs:
rv["attrs"] = attrs
return rv
| 5,543 | Python | 30.68 | 81 | 0.546275 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/sessions.py | import os
import time
from threading import Thread, Lock
from contextlib import contextmanager
import sentry_sdk
from sentry_sdk.envelope import Envelope
from sentry_sdk.session import Session
from sentry_sdk._types import MYPY
from sentry_sdk.utils import format_timestamp
if MYPY:
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Union
def is_auto_session_tracking_enabled(hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Union[Any, bool, None]
"""Utility function to find out if session tracking is enabled."""
if hub is None:
hub = sentry_sdk.Hub.current
should_track = hub.scope._force_auto_session_tracking
if should_track is None:
client_options = hub.client.options if hub.client else {}
should_track = client_options.get("auto_session_tracking", False)
return should_track
@contextmanager
def auto_session_tracking(hub=None, session_mode="application"):
# type: (Optional[sentry_sdk.Hub], str) -> Generator[None, None, None]
"""Starts and stops a session automatically around a block."""
if hub is None:
hub = sentry_sdk.Hub.current
should_track = is_auto_session_tracking_enabled(hub)
if should_track:
hub.start_session(session_mode=session_mode)
try:
yield
finally:
if should_track:
hub.end_session()
TERMINAL_SESSION_STATES = ("exited", "abnormal", "crashed")
MAX_ENVELOPE_ITEMS = 100
def make_aggregate_envelope(aggregate_states, attrs):
# type: (Any, Any) -> Any
return {"attrs": dict(attrs), "aggregates": list(aggregate_states.values())}
class SessionFlusher(object):
def __init__(
self,
capture_func, # type: Callable[[Envelope], None]
flush_interval=60, # type: int
):
# type: (...) -> None
self.capture_func = capture_func
self.flush_interval = flush_interval
self.pending_sessions = [] # type: List[Any]
self.pending_aggregates = {} # type: Dict[Any, Any]
self._thread = None # type: Optional[Thread]
self._thread_lock = Lock()
self._aggregate_lock = Lock()
self._thread_for_pid = None # type: Optional[int]
self._running = True
def flush(self):
# type: (...) -> None
pending_sessions = self.pending_sessions
self.pending_sessions = []
with self._aggregate_lock:
pending_aggregates = self.pending_aggregates
self.pending_aggregates = {}
envelope = Envelope()
for session in pending_sessions:
if len(envelope.items) == MAX_ENVELOPE_ITEMS:
self.capture_func(envelope)
envelope = Envelope()
envelope.add_session(session)
for (attrs, states) in pending_aggregates.items():
if len(envelope.items) == MAX_ENVELOPE_ITEMS:
self.capture_func(envelope)
envelope = Envelope()
envelope.add_sessions(make_aggregate_envelope(states, attrs))
if len(envelope.items) > 0:
self.capture_func(envelope)
def _ensure_running(self):
# type: (...) -> None
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
with self._thread_lock:
if self._thread_for_pid == os.getpid() and self._thread is not None:
return None
def _thread():
# type: (...) -> None
while self._running:
time.sleep(self.flush_interval)
if self._running:
self.flush()
thread = Thread(target=_thread)
thread.daemon = True
thread.start()
self._thread = thread
self._thread_for_pid = os.getpid()
return None
def add_aggregate_session(
self, session # type: Session
):
# type: (...) -> None
# NOTE on `session.did`:
# the protocol can deal with buckets that have a distinct-id, however
# in practice we expect the python SDK to have an extremely high cardinality
# here, effectively making aggregation useless, therefore we do not
# aggregate per-did.
# For this part we can get away with using the global interpreter lock
with self._aggregate_lock:
attrs = session.get_json_attrs(with_user_info=False)
primary_key = tuple(sorted(attrs.items()))
secondary_key = session.truncated_started # (, session.did)
states = self.pending_aggregates.setdefault(primary_key, {})
state = states.setdefault(secondary_key, {})
if "started" not in state:
state["started"] = format_timestamp(session.truncated_started)
# if session.did is not None:
# state["did"] = session.did
if session.status == "crashed":
state["crashed"] = state.get("crashed", 0) + 1
elif session.status == "abnormal":
state["abnormal"] = state.get("abnormal", 0) + 1
elif session.errors > 0:
state["errored"] = state.get("errored", 0) + 1
else:
state["exited"] = state.get("exited", 0) + 1
def add_session(
self, session # type: Session
):
# type: (...) -> None
if session.session_mode == "request":
self.add_aggregate_session(session)
else:
self.pending_sessions.append(session.to_json())
self._ensure_running()
def kill(self):
# type: (...) -> None
self._running = False
def __del__(self):
# type: (...) -> None
self.kill()
| 5,884 | Python | 32.4375 | 84 | 0.586506 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/hub.py | import copy
import sys
from datetime import datetime
from contextlib import contextmanager
from sentry_sdk._compat import with_metaclass
from sentry_sdk.consts import INSTRUMENTER
from sentry_sdk.scope import Scope
from sentry_sdk.client import Client
from sentry_sdk.tracing import NoOpSpan, Span, Transaction
from sentry_sdk.session import Session
from sentry_sdk.utils import (
exc_info_from_error,
event_from_exception,
logger,
ContextVar,
)
from sentry_sdk._types import MYPY
if MYPY:
from typing import Union
from typing import Any
from typing import Optional
from typing import Tuple
from typing import Dict
from typing import List
from typing import Callable
from typing import Generator
from typing import Type
from typing import TypeVar
from typing import overload
from typing import ContextManager
from sentry_sdk.integrations import Integration
from sentry_sdk._types import (
Event,
Hint,
Breadcrumb,
BreadcrumbHint,
ExcInfo,
)
from sentry_sdk.consts import ClientConstructor
T = TypeVar("T")
else:
def overload(x):
# type: (T) -> T
return x
_local = ContextVar("sentry_current_hub")
def _update_scope(base, scope_change, scope_kwargs):
# type: (Scope, Optional[Any], Dict[str, Any]) -> Scope
if scope_change and scope_kwargs:
raise TypeError("cannot provide scope and kwargs")
if scope_change is not None:
final_scope = copy.copy(base)
if callable(scope_change):
scope_change(final_scope)
else:
final_scope.update_from_scope(scope_change)
elif scope_kwargs:
final_scope = copy.copy(base)
final_scope.update_from_kwargs(**scope_kwargs)
else:
final_scope = base
return final_scope
def _should_send_default_pii():
# type: () -> bool
client = Hub.current.client
if not client:
return False
return client.options["send_default_pii"]
class _InitGuard(object):
def __init__(self, client):
# type: (Client) -> None
self._client = client
def __enter__(self):
# type: () -> _InitGuard
return self
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
c = self._client
if c is not None:
c.close()
def _check_python_deprecations():
# type: () -> None
version = sys.version_info[:2]
if version == (3, 4) or version == (3, 5):
logger.warning(
"sentry-sdk 2.0.0 will drop support for Python %s.",
"{}.{}".format(*version),
)
logger.warning(
"Please upgrade to the latest version to continue receiving upgrades and bugfixes."
)
def _init(*args, **kwargs):
# type: (*Optional[str], **Any) -> ContextManager[Any]
"""Initializes the SDK and optionally integrations.
This takes the same arguments as the client constructor.
"""
client = Client(*args, **kwargs) # type: ignore
Hub.current.bind_client(client)
_check_python_deprecations()
rv = _InitGuard(client)
return rv
from sentry_sdk._types import MYPY
if MYPY:
# Make mypy, PyCharm and other static analyzers think `init` is a type to
# have nicer autocompletion for params.
#
# Use `ClientConstructor` to define the argument types of `init` and
# `ContextManager[Any]` to tell static analyzers about the return type.
class init(ClientConstructor, _InitGuard): # noqa: N801
pass
else:
# Alias `init` for actual usage. Go through the lambda indirection to throw
# PyCharm off of the weakly typed signature (it would otherwise discover
# both the weakly typed signature of `_init` and our faked `init` type).
init = (lambda: _init)()
class HubMeta(type):
@property
def current(cls):
# type: () -> Hub
"""Returns the current instance of the hub."""
rv = _local.get(None)
if rv is None:
rv = Hub(GLOBAL_HUB)
_local.set(rv)
return rv
@property
def main(cls):
# type: () -> Hub
"""Returns the main instance of the hub."""
return GLOBAL_HUB
class _ScopeManager(object):
def __init__(self, hub):
# type: (Hub) -> None
self._hub = hub
self._original_len = len(hub._stack)
self._layer = hub._stack[-1]
def __enter__(self):
# type: () -> Scope
scope = self._layer[1]
assert scope is not None
return scope
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
current_len = len(self._hub._stack)
if current_len < self._original_len:
logger.error(
"Scope popped too soon. Popped %s scopes too many.",
self._original_len - current_len,
)
return
elif current_len > self._original_len:
logger.warning(
"Leaked %s scopes: %s",
current_len - self._original_len,
self._hub._stack[self._original_len :],
)
layer = self._hub._stack[self._original_len - 1]
del self._hub._stack[self._original_len - 1 :]
if layer[1] != self._layer[1]:
logger.error(
"Wrong scope found. Meant to pop %s, but popped %s.",
layer[1],
self._layer[1],
)
elif layer[0] != self._layer[0]:
warning = (
"init() called inside of pushed scope. This might be entirely "
"legitimate but usually occurs when initializing the SDK inside "
"a request handler or task/job function. Try to initialize the "
"SDK as early as possible instead."
)
logger.warning(warning)
class Hub(with_metaclass(HubMeta)): # type: ignore
"""The hub wraps the concurrency management of the SDK. Each thread has
its own hub but the hub might transfer with the flow of execution if
context vars are available.
If the hub is used with a with statement it's temporarily activated.
"""
_stack = None # type: List[Tuple[Optional[Client], Scope]]
# Mypy doesn't pick up on the metaclass.
if MYPY:
current = None # type: Hub
main = None # type: Hub
def __init__(
self,
client_or_hub=None, # type: Optional[Union[Hub, Client]]
scope=None, # type: Optional[Any]
):
# type: (...) -> None
if isinstance(client_or_hub, Hub):
hub = client_or_hub
client, other_scope = hub._stack[-1]
if scope is None:
scope = copy.copy(other_scope)
else:
client = client_or_hub
if scope is None:
scope = Scope()
self._stack = [(client, scope)]
self._last_event_id = None # type: Optional[str]
self._old_hubs = [] # type: List[Hub]
def __enter__(self):
# type: () -> Hub
self._old_hubs.append(Hub.current)
_local.set(self)
return self
def __exit__(
self,
exc_type, # type: Optional[type]
exc_value, # type: Optional[BaseException]
tb, # type: Optional[Any]
):
# type: (...) -> None
old = self._old_hubs.pop()
_local.set(old)
def run(
self, callback # type: Callable[[], T]
):
# type: (...) -> T
"""Runs a callback in the context of the hub. Alternatively the
with statement can be used on the hub directly.
"""
with self:
return callback()
def get_integration(
self, name_or_class # type: Union[str, Type[Integration]]
):
# type: (...) -> Any
"""Returns the integration for this hub by name or class. If there
is no client bound or the client does not have that integration
then `None` is returned.
If the return value is not `None` the hub is guaranteed to have a
client attached.
"""
if isinstance(name_or_class, str):
integration_name = name_or_class
elif name_or_class.identifier is not None:
integration_name = name_or_class.identifier
else:
raise ValueError("Integration has no name")
client = self.client
if client is not None:
rv = client.integrations.get(integration_name)
if rv is not None:
return rv
@property
def client(self):
# type: () -> Optional[Client]
"""Returns the current client on the hub."""
return self._stack[-1][0]
@property
def scope(self):
# type: () -> Scope
"""Returns the current scope on the hub."""
return self._stack[-1][1]
def last_event_id(self):
# type: () -> Optional[str]
"""Returns the last event ID."""
return self._last_event_id
def bind_client(
self, new # type: Optional[Client]
):
# type: (...) -> None
"""Binds a new client to the hub."""
top = self._stack[-1]
self._stack[-1] = (new, top[1])
def capture_event(
self,
event, # type: Event
hint=None, # type: Optional[Hint]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
"""Captures an event. Alias of :py:meth:`sentry_sdk.Client.capture_event`."""
client, top_scope = self._stack[-1]
scope = _update_scope(top_scope, scope, scope_args)
if client is not None:
is_transaction = event.get("type") == "transaction"
rv = client.capture_event(event, hint, scope)
if rv is not None and not is_transaction:
self._last_event_id = rv
return rv
return None
def capture_message(
self,
message, # type: str
level=None, # type: Optional[str]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
"""Captures a message. The message is just a string. If no level
is provided the default level is `info`.
:returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.Client.capture_event`).
"""
if self.client is None:
return None
if level is None:
level = "info"
return self.capture_event(
{"message": message, "level": level}, scope=scope, **scope_args
)
def capture_exception(
self,
error=None, # type: Optional[Union[BaseException, ExcInfo]]
scope=None, # type: Optional[Any]
**scope_args # type: Any
):
# type: (...) -> Optional[str]
"""Captures an exception.
:param error: An exception to catch. If `None`, `sys.exc_info()` will be used.
:returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.Client.capture_event`).
"""
client = self.client
if client is None:
return None
if error is not None:
exc_info = exc_info_from_error(error)
else:
exc_info = sys.exc_info()
event, hint = event_from_exception(exc_info, client_options=client.options)
try:
return self.capture_event(event, hint=hint, scope=scope, **scope_args)
except Exception:
self._capture_internal_exception(sys.exc_info())
return None
def _capture_internal_exception(
self, exc_info # type: Any
):
# type: (...) -> Any
"""
Capture an exception that is likely caused by a bug in the SDK
itself.
These exceptions do not end up in Sentry and are just logged instead.
"""
logger.error("Internal error in sentry_sdk", exc_info=exc_info)
def add_breadcrumb(
self,
crumb=None, # type: Optional[Breadcrumb]
hint=None, # type: Optional[BreadcrumbHint]
**kwargs # type: Any
):
# type: (...) -> None
"""
Adds a breadcrumb.
:param crumb: Dictionary with the data as the sentry v7/v8 protocol expects.
:param hint: An optional value that can be used by `before_breadcrumb`
to customize the breadcrumbs that are emitted.
"""
client, scope = self._stack[-1]
if client is None:
logger.info("Dropped breadcrumb because no client bound")
return
crumb = dict(crumb or ()) # type: Breadcrumb
crumb.update(kwargs)
if not crumb:
return
hint = dict(hint or ()) # type: Hint
if crumb.get("timestamp") is None:
crumb["timestamp"] = datetime.utcnow()
if crumb.get("type") is None:
crumb["type"] = "default"
if client.options["before_breadcrumb"] is not None:
new_crumb = client.options["before_breadcrumb"](crumb, hint)
else:
new_crumb = crumb
if new_crumb is not None:
scope._breadcrumbs.append(new_crumb)
else:
logger.info("before breadcrumb dropped breadcrumb (%s)", crumb)
max_breadcrumbs = client.options["max_breadcrumbs"] # type: int
while len(scope._breadcrumbs) > max_breadcrumbs:
scope._breadcrumbs.popleft()
def start_span(
self,
span=None, # type: Optional[Span]
instrumenter=INSTRUMENTER.SENTRY, # type: str
**kwargs # type: Any
):
# type: (...) -> Span
"""
Create and start timing a new span whose parent is the currently active
span or transaction, if any. The return value is a span instance,
typically used as a context manager to start and stop timing in a `with`
block.
Only spans contained in a transaction are sent to Sentry. Most
integrations start a transaction at the appropriate time, for example
for every incoming HTTP request. Use `start_transaction` to start a new
transaction when one is not already in progress.
"""
configuration_instrumenter = self.client and self.client.options["instrumenter"]
if instrumenter != configuration_instrumenter:
return NoOpSpan()
# TODO: consider removing this in a future release.
# This is for backwards compatibility with releases before
# start_transaction existed, to allow for a smoother transition.
if isinstance(span, Transaction) or "transaction" in kwargs:
deprecation_msg = (
"Deprecated: use start_transaction to start transactions and "
"Transaction.start_child to start spans."
)
if isinstance(span, Transaction):
logger.warning(deprecation_msg)
return self.start_transaction(span)
if "transaction" in kwargs:
logger.warning(deprecation_msg)
name = kwargs.pop("transaction")
return self.start_transaction(name=name, **kwargs)
if span is not None:
return span
kwargs.setdefault("hub", self)
span = self.scope.span
if span is not None:
return span.start_child(**kwargs)
return Span(**kwargs)
def start_transaction(
self,
transaction=None, # type: Optional[Transaction]
instrumenter=INSTRUMENTER.SENTRY, # type: str
**kwargs # type: Any
):
# type: (...) -> Union[Transaction, NoOpSpan]
"""
Start and return a transaction.
Start an existing transaction if given, otherwise create and start a new
transaction with kwargs.
This is the entry point to manual tracing instrumentation.
A tree structure can be built by adding child spans to the transaction,
and child spans to other spans. To start a new child span within the
transaction or any span, call the respective `.start_child()` method.
Every child span must be finished before the transaction is finished,
otherwise the unfinished spans are discarded.
When used as context managers, spans and transactions are automatically
finished at the end of the `with` block. If not using context managers,
call the `.finish()` method.
When the transaction is finished, it will be sent to Sentry with all its
finished child spans.
"""
configuration_instrumenter = self.client and self.client.options["instrumenter"]
if instrumenter != configuration_instrumenter:
return NoOpSpan()
custom_sampling_context = kwargs.pop("custom_sampling_context", {})
# if we haven't been given a transaction, make one
if transaction is None:
kwargs.setdefault("hub", self)
transaction = Transaction(**kwargs)
# use traces_sample_rate, traces_sampler, and/or inheritance to make a
# sampling decision
sampling_context = {
"transaction_context": transaction.to_json(),
"parent_sampled": transaction.parent_sampled,
}
sampling_context.update(custom_sampling_context)
transaction._set_initial_sampling_decision(sampling_context=sampling_context)
# we don't bother to keep spans if we already know we're not going to
# send the transaction
if transaction.sampled:
max_spans = (
self.client and self.client.options["_experiments"].get("max_spans")
) or 1000
transaction.init_span_recorder(maxlen=max_spans)
return transaction
@overload
def push_scope(
self, callback=None # type: Optional[None]
):
# type: (...) -> ContextManager[Scope]
pass
@overload
def push_scope( # noqa: F811
self, callback # type: Callable[[Scope], None]
):
# type: (...) -> None
pass
def push_scope( # noqa
self, callback=None # type: Optional[Callable[[Scope], None]]
):
# type: (...) -> Optional[ContextManager[Scope]]
"""
Pushes a new layer on the scope stack.
:param callback: If provided, this method pushes a scope, calls
`callback`, and pops the scope again.
:returns: If no `callback` is provided, a context manager that should
be used to pop the scope again.
"""
if callback is not None:
with self.push_scope() as scope:
callback(scope)
return None
client, scope = self._stack[-1]
new_layer = (client, copy.copy(scope))
self._stack.append(new_layer)
return _ScopeManager(self)
def pop_scope_unsafe(self):
# type: () -> Tuple[Optional[Client], Scope]
"""
Pops a scope layer from the stack.
Try to use the context manager :py:meth:`push_scope` instead.
"""
rv = self._stack.pop()
assert self._stack, "stack must have at least one layer"
return rv
@overload
def configure_scope(
self, callback=None # type: Optional[None]
):
# type: (...) -> ContextManager[Scope]
pass
@overload
def configure_scope( # noqa: F811
self, callback # type: Callable[[Scope], None]
):
# type: (...) -> None
pass
def configure_scope( # noqa
self, callback=None # type: Optional[Callable[[Scope], None]]
):
# type: (...) -> Optional[ContextManager[Scope]]
"""
Reconfigures the scope.
:param callback: If provided, call the callback with the current scope.
:returns: If no callback is provided, returns a context manager that returns the scope.
"""
client, scope = self._stack[-1]
if callback is not None:
if client is not None:
callback(scope)
return None
@contextmanager
def inner():
# type: () -> Generator[Scope, None, None]
if client is not None:
yield scope
else:
yield Scope()
return inner()
def start_session(
self, session_mode="application" # type: str
):
# type: (...) -> None
"""Starts a new session."""
self.end_session()
client, scope = self._stack[-1]
scope._session = Session(
release=client.options["release"] if client else None,
environment=client.options["environment"] if client else None,
user=scope._user,
session_mode=session_mode,
)
def end_session(self):
# type: (...) -> None
"""Ends the current session if there is one."""
client, scope = self._stack[-1]
session = scope._session
self.scope._session = None
if session is not None:
session.close()
if client is not None:
client.capture_session(session)
def stop_auto_session_tracking(self):
# type: (...) -> None
"""Stops automatic session tracking.
This temporarily session tracking for the current scope when called.
To resume session tracking call `resume_auto_session_tracking`.
"""
self.end_session()
client, scope = self._stack[-1]
scope._force_auto_session_tracking = False
def resume_auto_session_tracking(self):
# type: (...) -> None
"""Resumes automatic session tracking for the current scope if
disabled earlier. This requires that generally automatic session
tracking is enabled.
"""
client, scope = self._stack[-1]
scope._force_auto_session_tracking = None
def flush(
self,
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
"""
Alias for :py:meth:`sentry_sdk.Client.flush`
"""
client, scope = self._stack[-1]
if client is not None:
return client.flush(timeout=timeout, callback=callback)
def iter_trace_propagation_headers(self, span=None):
# type: (Optional[Span]) -> Generator[Tuple[str, str], None, None]
"""
Return HTTP headers which allow propagation of trace data. Data taken
from the span representing the request, if available, or the current
span on the scope if not.
"""
span = span or self.scope.span
if not span:
return
client = self._stack[-1][0]
propagate_traces = client and client.options["propagate_traces"]
if not propagate_traces:
return
for header in span.iter_headers():
yield header
def trace_propagation_meta(self, span=None):
# type: (Optional[Span]) -> str
"""
Return meta tags which should be injected into the HTML template
to allow propagation of trace data.
"""
meta = ""
for name, content in self.iter_trace_propagation_headers(span):
meta += '<meta name="%s" content="%s">' % (name, content)
return meta
GLOBAL_HUB = Hub()
_local.set(GLOBAL_HUB)
| 23,584 | Python | 30.488651 | 118 | 0.576705 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/worker.py | import os
import threading
from time import sleep, time
from sentry_sdk._compat import check_thread_support
from sentry_sdk._queue import Queue, FullError
from sentry_sdk.utils import logger
from sentry_sdk.consts import DEFAULT_QUEUE_SIZE
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Optional
from typing import Callable
_TERMINATOR = object()
class BackgroundWorker(object):
def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
# type: (int) -> None
check_thread_support()
self._queue = Queue(queue_size) # type: Queue
self._lock = threading.Lock()
self._thread = None # type: Optional[threading.Thread]
self._thread_for_pid = None # type: Optional[int]
@property
def is_alive(self):
# type: () -> bool
if self._thread_for_pid != os.getpid():
return False
if not self._thread:
return False
return self._thread.is_alive()
def _ensure_thread(self):
# type: () -> None
if not self.is_alive:
self.start()
def _timed_queue_join(self, timeout):
# type: (float) -> bool
deadline = time() + timeout
queue = self._queue
queue.all_tasks_done.acquire()
try:
while queue.unfinished_tasks:
delay = deadline - time()
if delay <= 0:
return False
queue.all_tasks_done.wait(timeout=delay)
return True
finally:
queue.all_tasks_done.release()
def start(self):
# type: () -> None
with self._lock:
if not self.is_alive:
self._thread = threading.Thread(
target=self._target, name="raven-sentry.BackgroundWorker"
)
self._thread.daemon = True
self._thread.start()
self._thread_for_pid = os.getpid()
def kill(self):
# type: () -> None
"""
Kill worker thread. Returns immediately. Not useful for
waiting on shutdown for events, use `flush` for that.
"""
logger.debug("background worker got kill request")
with self._lock:
if self._thread:
try:
self._queue.put_nowait(_TERMINATOR)
except FullError:
logger.debug("background worker queue full, kill failed")
self._thread = None
self._thread_for_pid = None
def flush(self, timeout, callback=None):
# type: (float, Optional[Any]) -> None
logger.debug("background worker got flush request")
with self._lock:
if self.is_alive and timeout > 0.0:
self._wait_flush(timeout, callback)
logger.debug("background worker flushed")
def _wait_flush(self, timeout, callback):
# type: (float, Optional[Any]) -> None
initial_timeout = min(0.1, timeout)
if not self._timed_queue_join(initial_timeout):
pending = self._queue.qsize() + 1
logger.debug("%d event(s) pending on flush", pending)
if callback is not None:
callback(pending, timeout)
if not self._timed_queue_join(timeout - initial_timeout):
pending = self._queue.qsize() + 1
logger.error("flush timed out, dropped %s events", pending)
def submit(self, callback):
# type: (Callable[[], None]) -> bool
self._ensure_thread()
try:
self._queue.put_nowait(callback)
return True
except FullError:
return False
def _target(self):
# type: () -> None
while True:
callback = self._queue.get()
try:
if callback is _TERMINATOR:
break
try:
callback()
except Exception:
logger.error("Failed processing job", exc_info=True)
finally:
self._queue.task_done()
sleep(0)
| 4,154 | Python | 30.007462 | 77 | 0.539721 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/asgi.py | """
An ASGI middleware.
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`.
"""
import asyncio
import inspect
import urllib
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.integrations.modules import _get_installed_modules
from sentry_sdk.profiler import start_profiling
from sentry_sdk.sessions import auto_session_tracking
from sentry_sdk.tracing import (
SOURCE_FOR_STYLE,
TRANSACTION_SOURCE_ROUTE,
)
from sentry_sdk.utils import (
ContextVar,
event_from_exception,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
logger,
transaction_from_function,
)
from sentry_sdk.tracing import Transaction
if MYPY:
from typing import Dict
from typing import Any
from typing import Optional
from typing import Callable
from typing_extensions import Literal
from sentry_sdk._types import Event, Hint
_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
TRANSACTION_STYLE_VALUES = ("endpoint", "url")
def _capture_exception(hub, exc, mechanism_type="asgi"):
# type: (Hub, Any, str) -> None
# Check client here as it might have been unset while streaming response
if hub.client is not None:
event, hint = event_from_exception(
exc,
client_options=hub.client.options,
mechanism={"type": mechanism_type, "handled": False},
)
hub.capture_event(event, hint=hint)
def _looks_like_asgi3(app):
# type: (Any) -> bool
"""
Try to figure out if an application object supports ASGI3.
This is how uvicorn figures out the application version as well.
"""
if inspect.isclass(app):
return hasattr(app, "__await__")
elif inspect.isfunction(app):
return asyncio.iscoroutinefunction(app)
else:
call = getattr(app, "__call__", None) # noqa
return asyncio.iscoroutinefunction(call)
class SentryAsgiMiddleware:
__slots__ = ("app", "__call__", "transaction_style", "mechanism_type")
def __init__(
self,
app,
unsafe_context_data=False,
transaction_style="endpoint",
mechanism_type="asgi",
):
# type: (Any, bool, str, str) -> None
"""
Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
"""
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
asgi_middleware_while_using_starlette_or_fastapi = (
mechanism_type == "asgi" and "starlette" in _get_installed_modules()
)
if asgi_middleware_while_using_starlette_or_fastapi:
logger.warning(
"The Sentry Python SDK can now automatically support ASGI frameworks like Starlette and FastAPI. "
"Please remove 'SentryAsgiMiddleware' from your project. "
"See https://docs.sentry.io/platforms/python/guides/asgi/ for more information."
)
self.transaction_style = transaction_style
self.mechanism_type = mechanism_type
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope)(receive, send))
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)
if is_recursive_asgi_middleware:
try:
return await callback()
except Exception as exc:
_capture_exception(Hub.current, exc, mechanism_type=self.mechanism_type)
raise exc from None
_asgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with auto_session_tracking(hub, session_mode="request"):
with hub:
with hub.configure_scope() as sentry_scope:
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
if ty in ("http", "websocket"):
transaction = Transaction.continue_from_headers(
self._get_headers(scope),
op="{}.server".format(ty),
)
else:
transaction = Transaction(op=OP.HTTP_SERVER)
transaction.name = _DEFAULT_TRANSACTION_NAME
transaction.source = TRANSACTION_SOURCE_ROUTE
transaction.set_tag("asgi.type", ty)
with hub.start_transaction(
transaction, custom_sampling_context={"asgi_scope": scope}
), start_profiling(transaction, hub):
# XXX: Would be cool to have correct span status, but we
# would have to wrap send(). That is a bit hard to do with
# the current abstraction over ASGI 2/3.
try:
return await callback()
except Exception as exc:
_capture_exception(
hub, exc, mechanism_type=self.mechanism_type
)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_info = event.get("request", {})
ty = asgi_scope["type"]
if ty in ("http", "websocket"):
request_info["method"] = asgi_scope.get("method")
request_info["headers"] = headers = _filter_headers(
self._get_headers(asgi_scope)
)
request_info["query_string"] = self._get_query(asgi_scope)
request_info["url"] = self._get_url(
asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
)
client = asgi_scope.get("client")
if client and _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": self._get_ip(asgi_scope)}
self._set_transaction_name_and_source(event, self.transaction_style, asgi_scope)
event["request"] = request_info
return event
# Helper functions for extracting request data.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
# for that.
def _set_transaction_name_and_source(self, event, transaction_style, asgi_scope):
# type: (Event, str, Any) -> None
transaction_name_already_set = (
event.get("transaction", _DEFAULT_TRANSACTION_NAME)
!= _DEFAULT_TRANSACTION_NAME
)
if transaction_name_already_set:
return
name = ""
if transaction_style == "endpoint":
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
name = transaction_from_function(endpoint) or ""
elif transaction_style == "url":
# FastAPI includes the route object in the scope to let Sentry extract the
# path from it for the transaction name
route = asgi_scope.get("route")
if route:
path = getattr(route, "path", None)
if path is not None:
name = path
if not name:
event["transaction"] = _DEFAULT_TRANSACTION_NAME
event["transaction_info"] = {"source": TRANSACTION_SOURCE_ROUTE}
return
event["transaction"] = name
event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
def _get_url(self, scope, default_scheme, host):
# type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str
"""
Extract URL from the ASGI scope, without also including the querystring.
"""
scheme = scope.get("scheme", default_scheme)
server = scope.get("server", None)
path = scope.get("root_path", "") + scope.get("path", "")
if host:
return "%s://%s%s" % (scheme, host, path)
if server is not None:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port != default_port:
return "%s://%s:%s%s" % (scheme, host, port, path)
return "%s://%s%s" % (scheme, host, path)
return path
def _get_query(self, scope):
# type: (Any) -> Any
"""
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
"""
qs = scope.get("query_string")
if not qs:
return None
return urllib.parse.unquote(qs.decode("latin-1"))
def _get_ip(self, scope):
# type: (Any) -> str
"""
Extract IP Address from the ASGI scope based on request headers with fallback to scope client.
"""
headers = self._get_headers(scope)
try:
return headers["x-forwarded-for"].split(",")[0].strip()
except (KeyError, IndexError):
pass
try:
return headers["x-real-ip"]
except KeyError:
pass
return scope.get("client")[0]
def _get_headers(self, scope):
# type: (Any) -> Dict[str, str]
"""
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
"""
headers = {} # type: Dict[str, str]
for raw_key, raw_value in scope["headers"]:
key = raw_key.decode("latin-1")
value = raw_value.decode("latin-1")
if key in headers:
headers[key] = headers[key] + ", " + value
else:
headers[key] = value
return headers
| 11,827 | Python | 35.506173 | 161 | 0.572673 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/pyramid.py | from __future__ import absolute_import
import os
import sys
import weakref
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.scope import Scope
from sentry_sdk.tracing import SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._compat import reraise, iteritems
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
try:
from pyramid.httpexceptions import HTTPException
from pyramid.request import Request
except ImportError:
raise DidNotEnable("Pyramid not installed")
from sentry_sdk._types import MYPY
if MYPY:
from pyramid.response import Response
from typing import Any
from sentry_sdk.integrations.wsgi import _ScopedResponse
from typing import Callable
from typing import Dict
from typing import Optional
from webob.cookies import RequestCookies # type: ignore
from webob.compat import cgi_FieldStorage # type: ignore
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
if getattr(Request, "authenticated_userid", None):
def authenticated_userid(request):
# type: (Request) -> Optional[Any]
return request.authenticated_userid
else:
# bw-compat for pyramid < 1.5
from pyramid.security import authenticated_userid # type: ignore
TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
class PyramidIntegration(Integration):
identifier = "pyramid"
transaction_style = ""
def __init__(self, transaction_style="route_name"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
from pyramid import router
old_call_view = router._call_view
def sentry_patched_call_view(registry, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Response
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is not None:
with hub.configure_scope() as scope:
_set_transaction_name_and_source(
scope, integration.transaction_style, request
)
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
return old_call_view(registry, request, *args, **kwargs)
router._call_view = sentry_patched_call_view
if hasattr(Request, "invoke_exception_view"):
old_invoke_exception_view = Request.invoke_exception_view
def sentry_patched_invoke_exception_view(self, *args, **kwargs):
# type: (Request, *Any, **Any) -> Any
rv = old_invoke_exception_view(self, *args, **kwargs)
if (
self.exc_info
and all(self.exc_info)
and rv.status_int == 500
and Hub.current.get_integration(PyramidIntegration) is not None
):
_capture_exception(self.exc_info)
return rv
Request.invoke_exception_view = sentry_patched_invoke_exception_view
old_wsgi_call = router.Router.__call__
def sentry_patched_wsgi_call(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is None:
return old_wsgi_call(self, environ, start_response)
def sentry_patched_inner_wsgi_call(environ, start_response):
# type: (Dict[str, Any], Callable[..., Any]) -> Any
try:
return old_wsgi_call(self, environ, start_response)
except Exception:
einfo = sys.exc_info()
_capture_exception(einfo)
reraise(*einfo)
return SentryWsgiMiddleware(sentry_patched_inner_wsgi_call)(
environ, start_response
)
router.Router.__call__ = sentry_patched_wsgi_call
def _capture_exception(exc_info):
# type: (ExcInfo) -> None
if exc_info[0] is None or issubclass(exc_info[0], HTTPException):
return
hub = Hub.current
if hub.get_integration(PyramidIntegration) is None:
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "pyramid", "handled": False},
)
hub.capture_event(event, hint=hint)
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (Scope, str, Request) -> None
try:
name_for_style = {
"route_name": request.matched_route.name,
"route_pattern": request.matched_route.pattern,
}
scope.set_transaction_name(
name_for_style[transaction_style],
source=SOURCE_FOR_STYLE[transaction_style],
)
except Exception:
pass
class PyramidRequestExtractor(RequestExtractor):
def url(self):
# type: () -> str
return self.request.path_url
def env(self):
# type: () -> Dict[str, str]
return self.request.environ
def cookies(self):
# type: () -> RequestCookies
return self.request.cookies
def raw_data(self):
# type: () -> str
return self.request.text
def form(self):
# type: () -> Dict[str, str]
return {
key: value
for key, value in iteritems(self.request.POST)
if not getattr(value, "filename", None)
}
def files(self):
# type: () -> Dict[str, cgi_FieldStorage]
return {
key: value
for key, value in iteritems(self.request.POST)
if getattr(value, "filename", None)
}
def size_of_file(self, postdata):
# type: (cgi_FieldStorage) -> int
file = postdata.file
try:
return os.fstat(file.fileno()).st_size
except Exception:
return 0
def _make_event_processor(weak_request, integration):
# type: (Callable[[], Request], PyramidIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
request = weak_request()
if request is None:
return event
with capture_internal_exceptions():
PyramidRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
user_info = event.setdefault("user", {})
user_info.setdefault("id", authenticated_userid(request))
return event
return event_processor
| 7,424 | Python | 30.595745 | 83 | 0.603314 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/gnu_backtrace.py | import re
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.utils import capture_internal_exceptions
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Dict
MODULE_RE = r"[a-zA-Z0-9/._:\\-]+"
TYPE_RE = r"[a-zA-Z0-9._:<>,-]+"
HEXVAL_RE = r"[A-Fa-f0-9]+"
FRAME_RE = r"""
^(?P<index>\d+)\.\s
(?P<package>{MODULE_RE})\(
(?P<retval>{TYPE_RE}\ )?
((?P<function>{TYPE_RE})
(?P<args>\(.*\))?
)?
((?P<constoffset>\ const)?\+0x(?P<offset>{HEXVAL_RE}))?
\)\s
\[0x(?P<retaddr>{HEXVAL_RE})\]$
""".format(
MODULE_RE=MODULE_RE, HEXVAL_RE=HEXVAL_RE, TYPE_RE=TYPE_RE
)
FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE)
class GnuBacktraceIntegration(Integration):
identifier = "gnu_backtrace"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def process_gnu_backtrace(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
with capture_internal_exceptions():
return _process_gnu_backtrace(event, hint)
def _process_gnu_backtrace(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
if Hub.current.get_integration(GnuBacktraceIntegration) is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception in values:
frames = exception.get("stacktrace", {}).get("frames", [])
if not frames:
continue
msg = exception.get("value", None)
if not msg:
continue
additional_frames = []
new_msg = []
for line in msg.splitlines():
match = FRAME_RE.match(line)
if match:
additional_frames.append(
(
int(match.group("index")),
{
"package": match.group("package") or None,
"function": match.group("function") or None,
"platform": "native",
},
)
)
else:
# Put garbage lines back into message, not sure what to do with them.
new_msg.append(line)
if additional_frames:
additional_frames.sort(key=lambda x: -x[0])
for _, frame in additional_frames:
frames.append(frame)
new_msg.append("<stacktrace parsed and removed by GnuBacktraceIntegration>")
exception["value"] = "\n".join(new_msg)
return event
| 2,912 | Python | 25.972222 | 88 | 0.550824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/argv.py | from __future__ import absolute_import
import sys
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk._types import MYPY
if MYPY:
from typing import Optional
from sentry_sdk._types import Event, Hint
class ArgvIntegration(Integration):
identifier = "argv"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def processor(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if Hub.current.get_integration(ArgvIntegration) is not None:
extra = event.setdefault("extra", {})
# If some event processor decided to set extra to e.g. an
# `int`, don't crash. Not here.
if isinstance(extra, dict):
extra["sys.argv"] = sys.argv
return event
| 945 | Python | 26.823529 | 73 | 0.621164 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/wsgi.py | import sys
from sentry_sdk._functools import partial
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import (
ContextVar,
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._compat import PY2, reraise, iteritems
from sentry_sdk.tracing import Transaction, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.sessions import auto_session_tracking
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.profiler import start_profiling
from sentry_sdk._types import MYPY
if MYPY:
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import Any
from typing import Tuple
from typing import Optional
from typing import TypeVar
from typing import Protocol
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
WsgiResponseIter = TypeVar("WsgiResponseIter")
WsgiResponseHeaders = TypeVar("WsgiResponseHeaders")
WsgiExcInfo = TypeVar("WsgiExcInfo")
class StartResponse(Protocol):
def __call__(self, status, response_headers, exc_info=None):
# type: (str, WsgiResponseHeaders, Optional[WsgiExcInfo]) -> WsgiResponseIter
pass
_wsgi_middleware_applied = ContextVar("sentry_wsgi_middleware_applied")
if PY2:
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
# type: (str, str, str) -> str
return s.decode(charset, errors)
else:
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
# type: (str, str, str) -> str
return s.encode("latin1").decode(charset, errors)
def get_host(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
rv = environ["HTTP_X_FORWARDED_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
def get_request_url(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""Return the absolute URL without query string for the given WSGI
environment."""
return "%s://%s/%s" % (
environ.get("wsgi.url_scheme"),
get_host(environ, use_x_forwarded_for),
wsgi_decoding_dance(environ.get("PATH_INFO") or "").lstrip("/"),
)
class SentryWsgiMiddleware(object):
__slots__ = ("app", "use_x_forwarded_for")
def __init__(self, app, use_x_forwarded_for=False):
# type: (Callable[[Dict[str, str], Callable[..., Any]], Any], bool) -> None
self.app = app
self.use_x_forwarded_for = use_x_forwarded_for
def __call__(self, environ, start_response):
# type: (Dict[str, str], Callable[..., Any]) -> _ScopedResponse
if _wsgi_middleware_applied.get(False):
return self.app(environ, start_response)
_wsgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with auto_session_tracking(hub, session_mode="request"):
with hub:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope._name = "wsgi"
scope.add_event_processor(
_make_wsgi_event_processor(
environ, self.use_x_forwarded_for
)
)
transaction = Transaction.continue_from_environ(
environ,
op=OP.HTTP_SERVER,
name="generic WSGI request",
source=TRANSACTION_SOURCE_ROUTE,
)
with hub.start_transaction(
transaction, custom_sampling_context={"wsgi_environ": environ}
), start_profiling(transaction, hub):
try:
rv = self.app(
environ,
partial(
_sentry_start_response, start_response, transaction
),
)
except BaseException:
reraise(*_capture_exception(hub))
finally:
_wsgi_middleware_applied.set(False)
return _ScopedResponse(hub, rv)
def _sentry_start_response(
old_start_response, # type: StartResponse
transaction, # type: Transaction
status, # type: str
response_headers, # type: WsgiResponseHeaders
exc_info=None, # type: Optional[WsgiExcInfo]
):
# type: (...) -> WsgiResponseIter
with capture_internal_exceptions():
status_int = int(status.split(" ", 1)[0])
transaction.set_http_status(status_int)
if exc_info is None:
# The Django Rest Framework WSGI test client, and likely other
# (incorrect) implementations, cannot deal with the exc_info argument
# if one is present. Avoid providing a third argument if not necessary.
return old_start_response(status, response_headers)
else:
return old_start_response(status, response_headers, exc_info)
def _get_environ(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns our explicitly included environment variables we want to
capture (server name, port and remote addr if pii is enabled).
"""
keys = ["SERVER_NAME", "SERVER_PORT"]
if _should_send_default_pii():
# make debugging of proxy setup easier. Proxy headers are
# in headers.
keys += ["REMOTE_ADDR"]
for key in keys:
if key in environ:
yield key, environ[key]
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
def get_client_ip(environ):
# type: (Dict[str, str]) -> Optional[Any]
"""
Infer the user IP address from various headers. This cannot be used in
security sensitive situations since the value may be forged from a client,
but it's good enough for the event payload.
"""
try:
return environ["HTTP_X_FORWARDED_FOR"].split(",")[0].strip()
except (KeyError, IndexError):
pass
try:
return environ["HTTP_X_REAL_IP"]
except KeyError:
pass
return environ.get("REMOTE_ADDR")
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
# Check client here as it might have been unset while streaming response
if hub.client is not None:
e = exc_info[1]
# SystemExit(0) is the only uncaught exception that is expected behavior
should_skip_capture = isinstance(e, SystemExit) and e.code in (0, None)
if not should_skip_capture:
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "wsgi", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
class _ScopedResponse(object):
__slots__ = ("_response", "_hub")
def __init__(self, hub, response):
# type: (Hub, Iterator[bytes]) -> None
self._hub = hub
self._response = response
def __iter__(self):
# type: () -> Iterator[bytes]
iterator = iter(self._response)
while True:
with self._hub:
try:
chunk = next(iterator)
except StopIteration:
break
except BaseException:
reraise(*_capture_exception(self._hub))
yield chunk
def close(self):
# type: () -> None
with self._hub:
try:
self._response.close() # type: ignore
except AttributeError:
pass
except BaseException:
reraise(*_capture_exception(self._hub))
def _make_wsgi_event_processor(environ, use_x_forwarded_for):
# type: (Dict[str, str], bool) -> EventProcessor
# It's a bit unfortunate that we have to extract and parse the request data
# from the environ so eagerly, but there are a few good reasons for this.
#
# We might be in a situation where the scope/hub never gets torn down
# properly. In that case we will have an unnecessary strong reference to
# all objects in the environ (some of which may take a lot of memory) when
# we're really just interested in a few of them.
#
# Keeping the environment around for longer than the request lifecycle is
# also not necessarily something uWSGI can deal with:
# https://github.com/unbit/uwsgi/issues/1950
client_ip = get_client_ip(environ)
request_url = get_request_url(environ, use_x_forwarded_for)
query_string = environ.get("QUERY_STRING")
method = environ.get("REQUEST_METHOD")
env = dict(_get_environ(environ))
headers = _filter_headers(dict(_get_headers(environ)))
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
with capture_internal_exceptions():
# if the code below fails halfway through we at least have some data
request_info = event.setdefault("request", {})
if _should_send_default_pii():
user_info = event.setdefault("user", {})
if client_ip:
user_info.setdefault("ip_address", client_ip)
request_info["url"] = request_url
request_info["query_string"] = query_string
request_info["method"] = method
request_info["env"] = env
request_info["headers"] = headers
return event
return event_processor
| 11,397 | Python | 33.96319 | 89 | 0.583224 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/aiohttp.py | import sys
import weakref
from sentry_sdk._compat import reraise
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.sessions import auto_session_tracking
from sentry_sdk.integrations._wsgi_common import (
_filter_headers,
request_body_within_bounds,
)
from sentry_sdk.tracing import SOURCE_FOR_STYLE, Transaction, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
AnnotatedValue,
)
try:
import asyncio
from aiohttp import __version__ as AIOHTTP_VERSION
from aiohttp.web import Application, HTTPException, UrlDispatcher
except ImportError:
raise DidNotEnable("AIOHTTP not installed")
from sentry_sdk._types import MYPY
if MYPY:
from aiohttp.web_request import Request
from aiohttp.abc import AbstractMatchInfo
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Callable
from typing import Union
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
TRANSACTION_STYLE_VALUES = ("handler_name", "method_and_path_pattern")
class AioHttpIntegration(Integration):
identifier = "aiohttp"
def __init__(self, transaction_style="handler_name"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, AIOHTTP_VERSION.split(".")[:2]))
except (TypeError, ValueError):
raise DidNotEnable("AIOHTTP version unparsable: {}".format(AIOHTTP_VERSION))
if version < (3, 4):
raise DidNotEnable("AIOHTTP 3.4 or newer required.")
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise DidNotEnable(
"The aiohttp integration for Sentry requires Python 3.7+ "
" or aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
ignore_logger("aiohttp.server")
old_handle = Application._handle
async def sentry_app_handle(self, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(AioHttpIntegration) is None:
return await old_handle(self, request, *args, **kwargs)
weak_request = weakref.ref(request)
with Hub(hub) as hub:
with auto_session_tracking(hub, session_mode="request"):
# Scope data will not leak between requests because aiohttp
# create a task to wrap each request.
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
transaction = Transaction.continue_from_headers(
request.headers,
op=OP.HTTP_SERVER,
# If this transaction name makes it to the UI, AIOHTTP's
# URL resolver did not find a route or died trying.
name="generic AIOHTTP request",
source=TRANSACTION_SOURCE_ROUTE,
)
with hub.start_transaction(
transaction,
custom_sampling_context={"aiohttp_request": request},
):
try:
response = await old_handle(self, request)
except HTTPException as e:
transaction.set_http_status(e.status_code)
raise
except (asyncio.CancelledError, ConnectionResetError):
transaction.set_status("cancelled")
raise
except Exception:
# This will probably map to a 500 but seems like we
# have no way to tell. Do not set span status.
reraise(*_capture_exception(hub))
transaction.set_http_status(response.status)
return response
Application._handle = sentry_app_handle
old_urldispatcher_resolve = UrlDispatcher.resolve
async def sentry_urldispatcher_resolve(self, request):
# type: (UrlDispatcher, Request) -> AbstractMatchInfo
rv = await old_urldispatcher_resolve(self, request)
hub = Hub.current
integration = hub.get_integration(AioHttpIntegration)
name = None
try:
if integration.transaction_style == "handler_name":
name = transaction_from_function(rv.handler)
elif integration.transaction_style == "method_and_path_pattern":
route_info = rv.get_info()
pattern = route_info.get("path") or route_info.get("formatter")
name = "{} {}".format(request.method, pattern)
except Exception:
pass
if name is not None:
with Hub.current.configure_scope() as scope:
scope.set_transaction_name(
name,
source=SOURCE_FOR_STYLE[integration.transaction_style],
)
return rv
UrlDispatcher.resolve = sentry_urldispatcher_resolve
def _make_request_processor(weak_request):
# type: (Callable[[], Request]) -> EventProcessor
def aiohttp_processor(
event, # type: Dict[str, Any]
hint, # type: Dict[str, Tuple[type, BaseException, Any]]
):
# type: (...) -> Dict[str, Any]
request = weak_request()
if request is None:
return event
with capture_internal_exceptions():
request_info = event.setdefault("request", {})
request_info["url"] = "%s://%s%s" % (
request.scheme,
request.host,
request.path,
)
request_info["query_string"] = request.query_string
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote}
hub = Hub.current
request_info["headers"] = _filter_headers(dict(request.headers))
# Just attach raw data here if it is within bounds, if available.
# Unfortunately there's no way to get structured data from aiohttp
# without awaiting on some coroutine.
request_info["data"] = get_aiohttp_request_data(hub, request)
return event
return aiohttp_processor
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options, # type: ignore
mechanism={"type": "aiohttp", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]"
def get_aiohttp_request_data(hub, request):
# type: (Hub, Request) -> Union[Optional[str], AnnotatedValue]
bytes_body = request._read_bytes
if bytes_body is not None:
# we have body to show
if not request_body_within_bounds(hub.client, len(bytes_body)):
return AnnotatedValue.removed_because_over_size_limit()
encoding = request.charset or "utf-8"
return bytes_body.decode(encoding, "replace")
if request.can_read_body:
# body exists but we can't show it
return BODY_NOT_READ_MESSAGE
# request has no body
return None
| 8,393 | Python | 34.567796 | 88 | 0.583939 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/sqlalchemy.py | from __future__ import absolute_import
import re
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.tracing_utils import record_sql_queries
try:
from sqlalchemy.engine import Engine # type: ignore
from sqlalchemy.event import listen # type: ignore
from sqlalchemy import __version__ as SQLALCHEMY_VERSION # type: ignore
except ImportError:
raise DidNotEnable("SQLAlchemy not installed.")
if MYPY:
from typing import Any
from typing import ContextManager
from typing import Optional
from sentry_sdk.tracing import Span
class SqlalchemyIntegration(Integration):
identifier = "sqlalchemy"
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(
map(int, re.split("b|rc", SQLALCHEMY_VERSION)[0].split("."))
)
except (TypeError, ValueError):
raise DidNotEnable(
"Unparsable SQLAlchemy version: {}".format(SQLALCHEMY_VERSION)
)
if version < (1, 2):
raise DidNotEnable("SQLAlchemy 1.2 or newer required.")
listen(Engine, "before_cursor_execute", _before_cursor_execute)
listen(Engine, "after_cursor_execute", _after_cursor_execute)
listen(Engine, "handle_error", _handle_error)
def _before_cursor_execute(
conn, cursor, statement, parameters, context, executemany, *args
):
# type: (Any, Any, Any, Any, Any, bool, *Any) -> None
hub = Hub.current
if hub.get_integration(SqlalchemyIntegration) is None:
return
ctx_mgr = record_sql_queries(
hub,
cursor,
statement,
parameters,
paramstyle=context and context.dialect and context.dialect.paramstyle or None,
executemany=executemany,
)
context._sentry_sql_span_manager = ctx_mgr
span = ctx_mgr.__enter__()
if span is not None:
context._sentry_sql_span = span
def _after_cursor_execute(conn, cursor, statement, parameters, context, *args):
# type: (Any, Any, Any, Any, Any, *Any) -> None
ctx_mgr = getattr(
context, "_sentry_sql_span_manager", None
) # type: Optional[ContextManager[Any]]
if ctx_mgr is not None:
context._sentry_sql_span_manager = None
ctx_mgr.__exit__(None, None, None)
def _handle_error(context, *args):
# type: (Any, *Any) -> None
execution_context = context.execution_context
if execution_context is None:
return
span = getattr(execution_context, "_sentry_sql_span", None) # type: Optional[Span]
if span is not None:
span.set_status("internal_error")
# _after_cursor_execute does not get called for crashing SQL stmts. Judging
# from SQLAlchemy codebase it does seem like any error coming into this
# handler is going to be fatal.
ctx_mgr = getattr(
execution_context, "_sentry_sql_span_manager", None
) # type: Optional[ContextManager[Any]]
if ctx_mgr is not None:
execution_context._sentry_sql_span_manager = None
ctx_mgr.__exit__(None, None, None)
| 3,165 | Python | 29.152381 | 87 | 0.649605 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/pymongo.py | from __future__ import absolute_import
import copy
from sentry_sdk import Hub
from sentry_sdk.hub import _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.tracing import Span
from sentry_sdk.utils import capture_internal_exceptions
from sentry_sdk._types import MYPY
try:
from pymongo import monitoring
except ImportError:
raise DidNotEnable("Pymongo not installed")
if MYPY:
from typing import Any, Dict, Union
from pymongo.monitoring import (
CommandFailedEvent,
CommandStartedEvent,
CommandSucceededEvent,
)
SAFE_COMMAND_ATTRIBUTES = [
"insert",
"ordered",
"find",
"limit",
"singleBatch",
"aggregate",
"createIndexes",
"indexes",
"delete",
"findAndModify",
"renameCollection",
"to",
"drop",
]
def _strip_pii(command):
# type: (Dict[str, Any]) -> Dict[str, Any]
for key in command:
is_safe_field = key in SAFE_COMMAND_ATTRIBUTES
if is_safe_field:
# Skip if safe key
continue
update_db_command = key == "update" and "findAndModify" not in command
if update_db_command:
# Also skip "update" db command because it is save.
# There is also an "update" key in the "findAndModify" command, which is NOT safe!
continue
# Special stripping for documents
is_document = key == "documents"
if is_document:
for doc in command[key]:
for doc_key in doc:
doc[doc_key] = "%s"
continue
# Special stripping for dict style fields
is_dict_field = key in ["filter", "query", "update"]
if is_dict_field:
for item_key in command[key]:
command[key][item_key] = "%s"
continue
# For pipeline fields strip the `$match` dict
is_pipeline_field = key == "pipeline"
if is_pipeline_field:
for pipeline in command[key]:
for match_key in pipeline["$match"] if "$match" in pipeline else []:
pipeline["$match"][match_key] = "%s"
continue
# Default stripping
command[key] = "%s"
return command
class CommandTracer(monitoring.CommandListener):
def __init__(self):
# type: () -> None
self._ongoing_operations = {} # type: Dict[int, Span]
def _operation_key(self, event):
# type: (Union[CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent]) -> int
return event.request_id
def started(self, event):
# type: (CommandStartedEvent) -> None
hub = Hub.current
if hub.get_integration(PyMongoIntegration) is None:
return
with capture_internal_exceptions():
command = dict(copy.deepcopy(event.command))
command.pop("$db", None)
command.pop("$clusterTime", None)
command.pop("$signature", None)
op = "db.query"
tags = {
"db.name": event.database_name,
"db.system": "mongodb",
"db.operation": event.command_name,
}
try:
tags["net.peer.name"] = event.connection_id[0]
tags["net.peer.port"] = str(event.connection_id[1])
except TypeError:
pass
data = {"operation_ids": {}} # type: Dict[str, Dict[str, Any]]
data["operation_ids"]["operation"] = event.operation_id
data["operation_ids"]["request"] = event.request_id
try:
lsid = command.pop("lsid")["id"]
data["operation_ids"]["session"] = str(lsid)
except KeyError:
pass
if not _should_send_default_pii():
command = _strip_pii(command)
query = "{} {}".format(event.command_name, command)
span = hub.start_span(op=op, description=query)
for tag, value in tags.items():
span.set_tag(tag, value)
for key, value in data.items():
span.set_data(key, value)
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", type=op, data=tags)
self._ongoing_operations[self._operation_key(event)] = span.__enter__()
def failed(self, event):
# type: (CommandFailedEvent) -> None
hub = Hub.current
if hub.get_integration(PyMongoIntegration) is None:
return
try:
span = self._ongoing_operations.pop(self._operation_key(event))
span.set_status("internal_error")
span.__exit__(None, None, None)
except KeyError:
return
def succeeded(self, event):
# type: (CommandSucceededEvent) -> None
hub = Hub.current
if hub.get_integration(PyMongoIntegration) is None:
return
try:
span = self._ongoing_operations.pop(self._operation_key(event))
span.set_status("ok")
span.__exit__(None, None, None)
except KeyError:
pass
class PyMongoIntegration(Integration):
identifier = "pymongo"
@staticmethod
def setup_once():
# type: () -> None
monitoring.register(CommandTracer())
| 5,404 | Python | 28.375 | 94 | 0.563101 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/gcp.py | from datetime import datetime, timedelta
from os import environ
import sys
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT, Transaction
from sentry_sdk._compat import reraise
from sentry_sdk.utils import (
AnnotatedValue,
capture_internal_exceptions,
event_from_exception,
logger,
TimeoutThread,
)
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk._types import MYPY
# Constants
TIMEOUT_WARNING_BUFFER = 1.5 # Buffer time required to send timeout warning to Sentry
MILLIS_TO_SECONDS = 1000.0
if MYPY:
from typing import Any
from typing import TypeVar
from typing import Callable
from typing import Optional
from sentry_sdk._types import EventProcessor, Event, Hint
F = TypeVar("F", bound=Callable[..., Any])
def _wrap_func(func):
# type: (F) -> F
def sentry_func(functionhandler, gcp_event, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(GcpIntegration)
if integration is None:
return func(functionhandler, gcp_event, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = environ.get("FUNCTION_TIMEOUT_SEC")
if not configured_time:
logger.debug(
"The configured timeout could not be fetched from Cloud Functions configuration."
)
return func(functionhandler, gcp_event, *args, **kwargs)
configured_time = int(configured_time)
initial_time = datetime.utcnow()
with hub.push_scope() as scope:
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(
gcp_event, configured_time, initial_time
)
)
scope.set_tag("gcp_region", environ.get("FUNCTION_REGION"))
timeout_thread = None
if (
integration.timeout_warning
and configured_time > TIMEOUT_WARNING_BUFFER
):
waiting_time = configured_time - TIMEOUT_WARNING_BUFFER
timeout_thread = TimeoutThread(waiting_time, configured_time)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = {}
if hasattr(gcp_event, "headers"):
headers = gcp_event.headers
transaction = Transaction.continue_from_headers(
headers,
op=OP.FUNCTION_GCP,
name=environ.get("FUNCTION_NAME", ""),
source=TRANSACTION_SOURCE_COMPONENT,
)
sampling_context = {
"gcp_env": {
"function_name": environ.get("FUNCTION_NAME"),
"function_entry_point": environ.get("ENTRY_POINT"),
"function_identity": environ.get("FUNCTION_IDENTITY"),
"function_region": environ.get("FUNCTION_REGION"),
"function_project": environ.get("GCP_PROJECT"),
},
"gcp_event": gcp_event,
}
with hub.start_transaction(
transaction, custom_sampling_context=sampling_context
):
try:
return func(functionhandler, gcp_event, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "gcp", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
# Flush out the event queue
hub.flush()
return sentry_func # type: ignore
class GcpIntegration(Integration):
identifier = "gcp"
def __init__(self, timeout_warning=False):
# type: (bool) -> None
self.timeout_warning = timeout_warning
@staticmethod
def setup_once():
# type: () -> None
import __main__ as gcp_functions
if not hasattr(gcp_functions, "worker_v1"):
logger.warning(
"GcpIntegration currently supports only Python 3.7 runtime environment."
)
return
worker1 = gcp_functions.worker_v1
worker1.FunctionHandler.invoke_user_function = _wrap_func(
worker1.FunctionHandler.invoke_user_function
)
def _make_request_event_processor(gcp_event, configured_timeout, initial_time):
# type: (Any, Any, Any) -> EventProcessor
def event_processor(event, hint):
# type: (Event, Hint) -> Optional[Event]
final_time = datetime.utcnow()
time_diff = final_time - initial_time
execution_duration_in_millis = time_diff.microseconds / MILLIS_TO_SECONDS
extra = event.setdefault("extra", {})
extra["google cloud functions"] = {
"function_name": environ.get("FUNCTION_NAME"),
"function_entry_point": environ.get("ENTRY_POINT"),
"function_identity": environ.get("FUNCTION_IDENTITY"),
"function_region": environ.get("FUNCTION_REGION"),
"function_project": environ.get("GCP_PROJECT"),
"execution_duration_in_millis": execution_duration_in_millis,
"configured_timeout_in_seconds": configured_timeout,
}
extra["google cloud logs"] = {
"url": _get_google_cloud_logs_url(final_time),
}
request = event.get("request", {})
request["url"] = "gcp:///{}".format(environ.get("FUNCTION_NAME"))
if hasattr(gcp_event, "method"):
request["method"] = gcp_event.method
if hasattr(gcp_event, "query_string"):
request["query_string"] = gcp_event.query_string.decode("utf-8")
if hasattr(gcp_event, "headers"):
request["headers"] = _filter_headers(gcp_event.headers)
if _should_send_default_pii():
if hasattr(gcp_event, "data"):
request["data"] = gcp_event.data
else:
if hasattr(gcp_event, "data"):
# Unfortunately couldn't find a way to get structured body from GCP
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue.removed_because_raw_data()
event["request"] = request
return event
return event_processor
def _get_google_cloud_logs_url(final_time):
# type: (datetime) -> str
"""
Generates a Google Cloud Logs console URL based on the environment variables
Arguments:
final_time {datetime} -- Final time
Returns:
str -- Google Cloud Logs Console URL to logs.
"""
hour_ago = final_time - timedelta(hours=1)
formatstring = "%Y-%m-%dT%H:%M:%SZ"
url = (
"https://console.cloud.google.com/logs/viewer?project={project}&resource=cloud_function"
"%2Ffunction_name%2F{function_name}%2Fregion%2F{region}&minLogLevel=0&expandAll=false"
"×tamp={timestamp_end}&customFacets=&limitCustomFacetWidth=true"
"&dateRangeStart={timestamp_start}&dateRangeEnd={timestamp_end}"
"&interval=PT1H&scrollTimestamp={timestamp_end}"
).format(
project=environ.get("GCP_PROJECT"),
function_name=environ.get("FUNCTION_NAME"),
region=environ.get("FUNCTION_REGION"),
timestamp_end=final_time.strftime(formatstring),
timestamp_start=hour_ago.strftime(formatstring),
)
return url
| 8,148 | Python | 34.430435 | 97 | 0.58137 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/executing.py | from __future__ import absolute_import
from sentry_sdk import Hub
from sentry_sdk._types import MYPY
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.utils import walk_exception_chain, iter_stacks
if MYPY:
from typing import Optional
from sentry_sdk._types import Event, Hint
try:
import executing
except ImportError:
raise DidNotEnable("executing is not installed")
class ExecutingIntegration(Integration):
identifier = "executing"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def add_executing_info(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if Hub.current.get_integration(ExecutingIntegration) is None:
return event
if hint is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception, (_exc_type, _exc_value, exc_tb) in zip(
reversed(values), walk_exception_chain(exc_info)
):
sentry_frames = [
frame
for frame in exception.get("stacktrace", {}).get("frames", [])
if frame.get("function")
]
tbs = list(iter_stacks(exc_tb))
if len(sentry_frames) != len(tbs):
continue
for sentry_frame, tb in zip(sentry_frames, tbs):
frame = tb.tb_frame
source = executing.Source.for_frame(frame)
sentry_frame["function"] = source.code_qualname(frame.f_code)
return event
| 2,023 | Python | 28.333333 | 82 | 0.562531 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/modules.py | from __future__ import absolute_import
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Iterator
from sentry_sdk._types import Event
_installed_modules = None
def _generate_installed_modules():
# type: () -> Iterator[Tuple[str, str]]
try:
import pkg_resources
except ImportError:
return
for info in pkg_resources.working_set:
yield info.key, info.version
def _get_installed_modules():
# type: () -> Dict[str, str]
global _installed_modules
if _installed_modules is None:
_installed_modules = dict(_generate_installed_modules())
return _installed_modules
class ModulesIntegration(Integration):
identifier = "modules"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def processor(event, hint):
# type: (Event, Any) -> Dict[str, Any]
if event.get("type") == "transaction":
return event
if Hub.current.get_integration(ModulesIntegration) is None:
return event
event["modules"] = _get_installed_modules()
return event
| 1,393 | Python | 23.45614 | 71 | 0.642498 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/trytond.py | import sentry_sdk.hub
import sentry_sdk.utils
import sentry_sdk.integrations
import sentry_sdk.integrations.wsgi
from sentry_sdk._types import MYPY
from trytond.exceptions import TrytonException # type: ignore
from trytond.wsgi import app # type: ignore
if MYPY:
from typing import Any
# TODO: trytond-worker, trytond-cron and trytond-admin intergations
class TrytondWSGIIntegration(sentry_sdk.integrations.Integration):
identifier = "trytond_wsgi"
def __init__(self): # type: () -> None
pass
@staticmethod
def setup_once(): # type: () -> None
app.wsgi_app = sentry_sdk.integrations.wsgi.SentryWsgiMiddleware(app.wsgi_app)
def error_handler(e): # type: (Exception) -> None
hub = sentry_sdk.hub.Hub.current
if hub.get_integration(TrytondWSGIIntegration) is None:
return
elif isinstance(e, TrytonException):
return
else:
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = sentry_sdk.utils.event_from_exception(
e,
client_options=client.options,
mechanism={"type": "trytond", "handled": False},
)
hub.capture_event(event, hint=hint)
# Expected error handlers signature was changed
# when the error_handler decorator was introduced
# in Tryton-5.4
if hasattr(app, "error_handler"):
@app.error_handler
def _(app, request, e): # type: ignore
error_handler(e)
else:
app.error_handlers.append(error_handler)
| 1,728 | Python | 29.874999 | 86 | 0.600116 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/httpx.py | from sentry_sdk import Hub
from sentry_sdk.consts import OP
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.utils import logger
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
try:
from httpx import AsyncClient, Client, Request, Response # type: ignore
except ImportError:
raise DidNotEnable("httpx is not installed")
__all__ = ["HttpxIntegration"]
class HttpxIntegration(Integration):
identifier = "httpx"
@staticmethod
def setup_once():
# type: () -> None
"""
httpx has its own transport layer and can be customized when needed,
so patch Client.send and AsyncClient.send to support both synchronous and async interfaces.
"""
_install_httpx_client()
_install_httpx_async_client()
def _install_httpx_client():
# type: () -> None
real_send = Client.send
def send(self, request, **kwargs):
# type: (Client, Request, **Any) -> Response
hub = Hub.current
if hub.get_integration(HttpxIntegration) is None:
return real_send(self, request, **kwargs)
with hub.start_span(
op=OP.HTTP_CLIENT, description="%s %s" % (request.method, request.url)
) as span:
span.set_data("method", request.method)
span.set_data("url", str(request.url))
for key, value in hub.iter_trace_propagation_headers():
logger.debug(
"[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format(
key=key, value=value, url=request.url
)
)
request.headers[key] = value
rv = real_send(self, request, **kwargs)
span.set_data("status_code", rv.status_code)
span.set_http_status(rv.status_code)
span.set_data("reason", rv.reason_phrase)
return rv
Client.send = send
def _install_httpx_async_client():
# type: () -> None
real_send = AsyncClient.send
async def send(self, request, **kwargs):
# type: (AsyncClient, Request, **Any) -> Response
hub = Hub.current
if hub.get_integration(HttpxIntegration) is None:
return await real_send(self, request, **kwargs)
with hub.start_span(
op=OP.HTTP_CLIENT, description="%s %s" % (request.method, request.url)
) as span:
span.set_data("method", request.method)
span.set_data("url", str(request.url))
for key, value in hub.iter_trace_propagation_headers():
logger.debug(
"[Tracing] Adding `{key}` header {value} to outgoing request to {url}.".format(
key=key, value=value, url=request.url
)
)
request.headers[key] = value
rv = await real_send(self, request, **kwargs)
span.set_data("status_code", rv.status_code)
span.set_http_status(rv.status_code)
span.set_data("reason", rv.reason_phrase)
return rv
AsyncClient.send = send
| 3,162 | Python | 31.947916 | 99 | 0.580645 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/redis.py | from __future__ import absolute_import
from sentry_sdk import Hub
from sentry_sdk.consts import OP
from sentry_sdk.utils import capture_internal_exceptions, logger
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any, Sequence
_SINGLE_KEY_COMMANDS = frozenset(
["decr", "decrby", "get", "incr", "incrby", "pttl", "set", "setex", "setnx", "ttl"]
)
_MULTI_KEY_COMMANDS = frozenset(["del", "touch", "unlink"])
#: Trim argument lists to this many values
_MAX_NUM_ARGS = 10
def patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):
# type: (Any, bool, Any) -> None
old_execute = pipeline_cls.execute
def sentry_patched_execute(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(RedisIntegration) is None:
return old_execute(self, *args, **kwargs)
with hub.start_span(
op=OP.DB_REDIS, description="redis.pipeline.execute"
) as span:
with capture_internal_exceptions():
span.set_tag("redis.is_cluster", is_cluster)
transaction = self.transaction if not is_cluster else False
span.set_tag("redis.transaction", transaction)
commands = []
for i, arg in enumerate(self.command_stack):
if i > _MAX_NUM_ARGS:
break
command_args = []
for j, command_arg in enumerate(get_command_args_fn(arg)):
if j > 0:
command_arg = repr(command_arg)
command_args.append(command_arg)
commands.append(" ".join(command_args))
span.set_data(
"redis.commands",
{"count": len(self.command_stack), "first_ten": commands},
)
return old_execute(self, *args, **kwargs)
pipeline_cls.execute = sentry_patched_execute
def _get_redis_command_args(command):
# type: (Any) -> Sequence[Any]
return command[0]
def _parse_rediscluster_command(command):
# type: (Any) -> Sequence[Any]
return command.args
def _patch_rediscluster():
# type: () -> None
try:
import rediscluster # type: ignore
except ImportError:
return
patch_redis_client(rediscluster.RedisCluster, is_cluster=True)
# up to v1.3.6, __version__ attribute is a tuple
# from v2.0.0, __version__ is a string and VERSION a tuple
version = getattr(rediscluster, "VERSION", rediscluster.__version__)
# StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0
# https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst
if (0, 2, 0) < version < (2, 0, 0):
pipeline_cls = rediscluster.pipeline.StrictClusterPipeline
patch_redis_client(rediscluster.StrictRedisCluster, is_cluster=True)
else:
pipeline_cls = rediscluster.pipeline.ClusterPipeline
patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)
class RedisIntegration(Integration):
identifier = "redis"
@staticmethod
def setup_once():
# type: () -> None
try:
import redis
except ImportError:
raise DidNotEnable("Redis client not installed")
patch_redis_client(redis.StrictRedis, is_cluster=False)
patch_redis_pipeline(redis.client.Pipeline, False, _get_redis_command_args)
try:
strict_pipeline = redis.client.StrictPipeline # type: ignore
except AttributeError:
pass
else:
patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)
try:
import rb.clients # type: ignore
except ImportError:
pass
else:
patch_redis_client(rb.clients.FanoutClient, is_cluster=False)
patch_redis_client(rb.clients.MappingClient, is_cluster=False)
patch_redis_client(rb.clients.RoutingClient, is_cluster=False)
try:
_patch_rediscluster()
except Exception:
logger.exception("Error occurred while patching `rediscluster` library")
def patch_redis_client(cls, is_cluster):
# type: (Any, bool) -> None
"""
This function can be used to instrument custom redis client classes or
subclasses.
"""
old_execute_command = cls.execute_command
def sentry_patched_execute_command(self, name, *args, **kwargs):
# type: (Any, str, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(RedisIntegration) is None:
return old_execute_command(self, name, *args, **kwargs)
description = name
with capture_internal_exceptions():
description_parts = [name]
for i, arg in enumerate(args):
if i > _MAX_NUM_ARGS:
break
description_parts.append(repr(arg))
description = " ".join(description_parts)
with hub.start_span(op=OP.DB_REDIS, description=description) as span:
span.set_tag("redis.is_cluster", is_cluster)
if name:
span.set_tag("redis.command", name)
if name and args:
name_low = name.lower()
if (name_low in _SINGLE_KEY_COMMANDS) or (
name_low in _MULTI_KEY_COMMANDS and len(args) == 1
):
span.set_tag("redis.key", args[0])
return old_execute_command(self, name, *args, **kwargs)
cls.execute_command = sentry_patched_execute_command
| 5,740 | Python | 32.184971 | 87 | 0.59547 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/__init__.py | """This package"""
from __future__ import absolute_import
from threading import Lock
from sentry_sdk._compat import iteritems
from sentry_sdk.utils import logger
from sentry_sdk._types import MYPY
if MYPY:
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Set
from typing import Tuple
from typing import Type
_installer_lock = Lock()
_installed_integrations = set() # type: Set[str]
def _generate_default_integrations_iterator(integrations, auto_enabling_integrations):
# type: (Tuple[str, ...], Tuple[str, ...]) -> Callable[[bool], Iterator[Type[Integration]]]
def iter_default_integrations(with_auto_enabling_integrations):
# type: (bool) -> Iterator[Type[Integration]]
"""Returns an iterator of the default integration classes:"""
from importlib import import_module
if with_auto_enabling_integrations:
all_import_strings = integrations + auto_enabling_integrations
else:
all_import_strings = integrations
for import_string in all_import_strings:
try:
module, cls = import_string.rsplit(".", 1)
yield getattr(import_module(module), cls)
except (DidNotEnable, SyntaxError) as e:
logger.debug(
"Did not import default integration %s: %s", import_string, e
)
if isinstance(iter_default_integrations.__doc__, str):
for import_string in integrations:
iter_default_integrations.__doc__ += "\n- `{}`".format(import_string)
return iter_default_integrations
_AUTO_ENABLING_INTEGRATIONS = (
"sentry_sdk.integrations.django.DjangoIntegration",
"sentry_sdk.integrations.flask.FlaskIntegration",
"sentry_sdk.integrations.starlette.StarletteIntegration",
"sentry_sdk.integrations.fastapi.FastApiIntegration",
"sentry_sdk.integrations.bottle.BottleIntegration",
"sentry_sdk.integrations.falcon.FalconIntegration",
"sentry_sdk.integrations.sanic.SanicIntegration",
"sentry_sdk.integrations.celery.CeleryIntegration",
"sentry_sdk.integrations.rq.RqIntegration",
"sentry_sdk.integrations.aiohttp.AioHttpIntegration",
"sentry_sdk.integrations.tornado.TornadoIntegration",
"sentry_sdk.integrations.sqlalchemy.SqlalchemyIntegration",
"sentry_sdk.integrations.redis.RedisIntegration",
"sentry_sdk.integrations.pyramid.PyramidIntegration",
"sentry_sdk.integrations.boto3.Boto3Integration",
)
iter_default_integrations = _generate_default_integrations_iterator(
integrations=(
# stdlib/base runtime integrations
"sentry_sdk.integrations.logging.LoggingIntegration",
"sentry_sdk.integrations.stdlib.StdlibIntegration",
"sentry_sdk.integrations.excepthook.ExcepthookIntegration",
"sentry_sdk.integrations.dedupe.DedupeIntegration",
"sentry_sdk.integrations.atexit.AtexitIntegration",
"sentry_sdk.integrations.modules.ModulesIntegration",
"sentry_sdk.integrations.argv.ArgvIntegration",
"sentry_sdk.integrations.threading.ThreadingIntegration",
),
auto_enabling_integrations=_AUTO_ENABLING_INTEGRATIONS,
)
del _generate_default_integrations_iterator
def setup_integrations(
integrations, with_defaults=True, with_auto_enabling_integrations=False
):
# type: (List[Integration], bool, bool) -> Dict[str, Integration]
"""Given a list of integration instances this installs them all. When
`with_defaults` is set to `True` then all default integrations are added
unless they were already provided before.
"""
integrations = dict(
(integration.identifier, integration) for integration in integrations or ()
)
logger.debug("Setting up integrations (with default = %s)", with_defaults)
# Integrations that are not explicitly set up by the user.
used_as_default_integration = set()
if with_defaults:
for integration_cls in iter_default_integrations(
with_auto_enabling_integrations
):
if integration_cls.identifier not in integrations:
instance = integration_cls()
integrations[instance.identifier] = instance
used_as_default_integration.add(instance.identifier)
for identifier, integration in iteritems(integrations):
with _installer_lock:
if identifier not in _installed_integrations:
logger.debug(
"Setting up previously not enabled integration %s", identifier
)
try:
type(integration).setup_once()
except NotImplementedError:
if getattr(integration, "install", None) is not None:
logger.warning(
"Integration %s: The install method is "
"deprecated. Use `setup_once`.",
identifier,
)
integration.install()
else:
raise
except DidNotEnable as e:
if identifier not in used_as_default_integration:
raise
logger.debug(
"Did not enable default integration %s: %s", identifier, e
)
_installed_integrations.add(identifier)
for identifier in integrations:
logger.debug("Enabling integration %s", identifier)
return integrations
class DidNotEnable(Exception): # noqa: N818
"""
The integration could not be enabled due to a trivial user error like
`flask` not being installed for the `FlaskIntegration`.
This exception is silently swallowed for default integrations, but reraised
for explicitly enabled integrations.
"""
class Integration(object):
"""Baseclass for all integrations.
To accept options for an integration, implement your own constructor that
saves those options on `self`.
"""
install = None
"""Legacy method, do not implement."""
identifier = None # type: str
"""String unique ID of integration type"""
@staticmethod
def setup_once():
# type: () -> None
"""
Initialize the integration.
This function is only called once, ever. Configuration is not available
at this point, so the only thing to do here is to hook into exception
handlers, and perhaps do monkeypatches.
Inside those hooks `Integration.current` can be used to access the
instance again.
"""
raise NotImplementedError()
| 6,755 | Python | 34.93617 | 95 | 0.650777 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/celery.py | from __future__ import absolute_import
import sys
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk.tracing import TRANSACTION_SOURCE_TASK
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk.tracing import Transaction
from sentry_sdk._compat import reraise
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk._types import MYPY
from sentry_sdk._functools import wraps
if MYPY:
from typing import Any
from typing import TypeVar
from typing import Callable
from typing import Optional
from sentry_sdk._types import EventProcessor, Event, Hint, ExcInfo
F = TypeVar("F", bound=Callable[..., Any])
try:
from celery import VERSION as CELERY_VERSION
from celery.exceptions import ( # type: ignore
SoftTimeLimitExceeded,
Retry,
Ignore,
Reject,
)
from celery.app.trace import task_has_custom
except ImportError:
raise DidNotEnable("Celery not installed")
CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
class CeleryIntegration(Integration):
identifier = "celery"
def __init__(self, propagate_traces=True):
# type: (bool) -> None
self.propagate_traces = propagate_traces
@staticmethod
def setup_once():
# type: () -> None
if CELERY_VERSION < (3,):
raise DidNotEnable("Celery 3 or newer required.")
import celery.app.trace as trace # type: ignore
old_build_tracer = trace.build_tracer
def sentry_build_tracer(name, task, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
if not getattr(task, "_sentry_is_patched", False):
# determine whether Celery will use __call__ or run and patch
# accordingly
if task_has_custom(task, "__call__"):
type(task).__call__ = _wrap_task_call(task, type(task).__call__)
else:
task.run = _wrap_task_call(task, task.run)
# `build_tracer` is apparently called for every task
# invocation. Can't wrap every celery task for every invocation
# or we will get infinitely nested wrapper functions.
task._sentry_is_patched = True
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
trace.build_tracer = sentry_build_tracer
from celery.app.task import Task # type: ignore
Task.apply_async = _wrap_apply_async(Task.apply_async)
_patch_worker_exit()
# This logger logs every status of every task that ran on the worker.
# Meaning that every task's breadcrumbs are full of stuff like "Task
# <foo> raised unexpected <bar>".
ignore_logger("celery.worker.job")
ignore_logger("celery.app.trace")
# This is stdout/err redirected to a logger, can't deal with this
# (need event_level=logging.WARN to reproduce)
ignore_logger("celery.redirected")
def _wrap_apply_async(f):
# type: (F) -> F
@wraps(f)
def apply_async(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(CeleryIntegration)
if integration is not None and integration.propagate_traces:
with hub.start_span(
op=OP.QUEUE_SUBMIT_CELERY, description=args[0].name
) as span:
with capture_internal_exceptions():
headers = dict(hub.iter_trace_propagation_headers(span))
if headers:
# Note: kwargs can contain headers=None, so no setdefault!
# Unsure which backend though.
kwarg_headers = kwargs.get("headers") or {}
kwarg_headers.update(headers)
# https://github.com/celery/celery/issues/4875
#
# Need to setdefault the inner headers too since other
# tracing tools (dd-trace-py) also employ this exact
# workaround and we don't want to break them.
kwarg_headers.setdefault("headers", {}).update(headers)
kwargs["headers"] = kwarg_headers
return f(*args, **kwargs)
else:
return f(*args, **kwargs)
return apply_async # type: ignore
def _wrap_tracer(task, f):
# type: (Any, F) -> F
# Need to wrap tracer for pushing the scope before prerun is sent, and
# popping it after postrun is sent.
#
# This is the reason we don't use signals for hooking in the first place.
# Also because in Celery 3, signal dispatch returns early if one handler
# crashes.
@wraps(f)
def _inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(CeleryIntegration) is None:
return f(*args, **kwargs)
with hub.push_scope() as scope:
scope._name = "celery"
scope.clear_breadcrumbs()
scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
transaction = None
# Celery task objects are not a thing to be trusted. Even
# something such as attribute access can fail.
with capture_internal_exceptions():
transaction = Transaction.continue_from_headers(
args[3].get("headers") or {},
op=OP.QUEUE_TASK_CELERY,
name="unknown celery task",
source=TRANSACTION_SOURCE_TASK,
)
transaction.name = task.name
transaction.set_status("ok")
if transaction is None:
return f(*args, **kwargs)
with hub.start_transaction(
transaction,
custom_sampling_context={
"celery_job": {
"task": task.name,
# for some reason, args[1] is a list if non-empty but a
# tuple if empty
"args": list(args[1]),
"kwargs": args[2],
}
},
):
return f(*args, **kwargs)
return _inner # type: ignore
def _wrap_task_call(task, f):
# type: (Any, F) -> F
# Need to wrap task call because the exception is caught before we get to
# see it. Also celery's reported stacktrace is untrustworthy.
# functools.wraps is important here because celery-once looks at this
# method's name.
# https://github.com/getsentry/sentry-python/issues/421
@wraps(f)
def _inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
try:
return f(*args, **kwargs)
except Exception:
exc_info = sys.exc_info()
with capture_internal_exceptions():
_capture_exception(task, exc_info)
reraise(*exc_info)
return _inner # type: ignore
def _make_event_processor(task, uuid, args, kwargs, request=None):
# type: (Any, Any, Any, Any, Optional[Any]) -> EventProcessor
def event_processor(event, hint):
# type: (Event, Hint) -> Optional[Event]
with capture_internal_exceptions():
tags = event.setdefault("tags", {})
tags["celery_task_id"] = uuid
extra = event.setdefault("extra", {})
extra["celery-job"] = {
"task_name": task.name,
"args": args,
"kwargs": kwargs,
}
if "exc_info" in hint:
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
"celery",
"SoftTimeLimitExceeded",
getattr(task, "name", task),
]
return event
return event_processor
def _capture_exception(task, exc_info):
# type: (Any, ExcInfo) -> None
hub = Hub.current
if hub.get_integration(CeleryIntegration) is None:
return
if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
# ??? Doesn't map to anything
_set_status(hub, "aborted")
return
_set_status(hub, "internal_error")
if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "celery", "handled": False},
)
hub.capture_event(event, hint=hint)
def _set_status(hub, status):
# type: (Hub, str) -> None
with capture_internal_exceptions():
with hub.configure_scope() as scope:
if scope.span is not None:
scope.span.set_status(status)
def _patch_worker_exit():
# type: () -> None
# Need to flush queue before worker shutdown because a crashing worker will
# call os._exit
from billiard.pool import Worker # type: ignore
old_workloop = Worker.workloop
def sentry_workloop(*args, **kwargs):
# type: (*Any, **Any) -> Any
try:
return old_workloop(*args, **kwargs)
finally:
with capture_internal_exceptions():
hub = Hub.current
if hub.get_integration(CeleryIntegration) is not None:
hub.flush()
Worker.workloop = sentry_workloop
| 9,823 | Python | 32.077441 | 84 | 0.568462 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/dedupe.py | from sentry_sdk.hub import Hub
from sentry_sdk.utils import ContextVar
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk._types import MYPY
if MYPY:
from typing import Optional
from sentry_sdk._types import Event, Hint
class DedupeIntegration(Integration):
identifier = "dedupe"
def __init__(self):
# type: () -> None
self._last_seen = ContextVar("last-seen")
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def processor(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if hint is None:
return event
integration = Hub.current.get_integration(DedupeIntegration)
if integration is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exc = exc_info[1]
if integration._last_seen.get(None) is exc:
return None
integration._last_seen.set(exc)
return event
| 1,166 | Python | 25.522727 | 72 | 0.59434 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/flask.py | from __future__ import absolute_import
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from sentry_sdk.scope import Scope
from sentry_sdk.tracing import SENTRY_TRACE_HEADER_NAME, SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
if MYPY:
from typing import Any, Callable, Dict, Union
from sentry_sdk._types import EventProcessor
from sentry_sdk.integrations.wsgi import _ScopedResponse
from werkzeug.datastructures import FileStorage, ImmutableMultiDict
try:
import flask_login # type: ignore
except ImportError:
flask_login = None
try:
from flask import Flask, Markup, Request # type: ignore
from flask import __version__ as FLASK_VERSION
from flask import request as flask_request
from flask.signals import (
before_render_template,
got_request_exception,
request_started,
)
except ImportError:
raise DidNotEnable("Flask is not installed")
try:
import blinker # noqa
except ImportError:
raise DidNotEnable("blinker is not installed")
TRANSACTION_STYLE_VALUES = ("endpoint", "url")
class FlaskIntegration(Integration):
identifier = "flask"
transaction_style = ""
def __init__(self, transaction_style="endpoint"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
# This version parsing is absolutely naive but the alternative is to
# import pkg_resources which slows down the SDK a lot.
try:
version = tuple(map(int, FLASK_VERSION.split(".")[:3]))
except (ValueError, TypeError):
# It's probably a release candidate, we assume it's fine.
pass
else:
if version < (0, 10):
raise DidNotEnable("Flask 0.10 or newer is required.")
before_render_template.connect(_add_sentry_trace)
request_started.connect(_request_started)
got_request_exception.connect(_capture_exception)
old_app = Flask.__call__
def sentry_patched_wsgi_app(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
if Hub.current.get_integration(FlaskIntegration) is None:
return old_app(self, environ, start_response)
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
environ, start_response
)
Flask.__call__ = sentry_patched_wsgi_app
def _add_sentry_trace(sender, template, context, **extra):
# type: (Flask, Any, Dict[str, Any], **Any) -> None
if "sentry_trace" in context:
return
sentry_span = Hub.current.scope.span
context["sentry_trace"] = (
Markup(
'<meta name="%s" content="%s" />'
% (
SENTRY_TRACE_HEADER_NAME,
sentry_span.to_traceparent(),
)
)
if sentry_span
else ""
)
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (Scope, str, Request) -> None
try:
name_for_style = {
"url": request.url_rule.rule,
"endpoint": request.url_rule.endpoint,
}
scope.set_transaction_name(
name_for_style[transaction_style],
source=SOURCE_FOR_STYLE[transaction_style],
)
except Exception:
pass
def _request_started(app, **kwargs):
# type: (Flask, **Any) -> None
hub = Hub.current
integration = hub.get_integration(FlaskIntegration)
if integration is None:
return
with hub.configure_scope() as scope:
# Set the transaction name and source here,
# but rely on WSGI middleware to actually start the transaction
request = flask_request._get_current_object()
_set_transaction_name_and_source(scope, integration.transaction_style, request)
evt_processor = _make_request_event_processor(app, request, integration)
scope.add_event_processor(evt_processor)
class FlaskRequestExtractor(RequestExtractor):
def env(self):
# type: () -> Dict[str, str]
return self.request.environ
def cookies(self):
# type: () -> Dict[Any, Any]
return {
k: v[0] if isinstance(v, list) and len(v) == 1 else v
for k, v in self.request.cookies.items()
}
def raw_data(self):
# type: () -> bytes
return self.request.get_data()
def form(self):
# type: () -> ImmutableMultiDict[str, Any]
return self.request.form
def files(self):
# type: () -> ImmutableMultiDict[str, Any]
return self.request.files
def is_json(self):
# type: () -> bool
return self.request.is_json
def json(self):
# type: () -> Any
return self.request.get_json()
def size_of_file(self, file):
# type: (FileStorage) -> int
return file.content_length
def _make_request_event_processor(app, request, integration):
# type: (Flask, Callable[[], Request], FlaskIntegration) -> EventProcessor
def inner(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
if request is None:
return event
with capture_internal_exceptions():
FlaskRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
_add_user_to_event(event)
return event
return inner
def _capture_exception(sender, exception, **kwargs):
# type: (Flask, Union[ValueError, BaseException], **Any) -> None
hub = Hub.current
if hub.get_integration(FlaskIntegration) is None:
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exception,
client_options=client.options,
mechanism={"type": "flask", "handled": False},
)
hub.capture_event(event, hint=hint)
def _add_user_to_event(event):
# type: (Dict[str, Any]) -> None
if flask_login is None:
return
user = flask_login.current_user
if user is None:
return
with capture_internal_exceptions():
# Access this object as late as possible as accessing the user
# is relatively costly
user_info = event.setdefault("user", {})
try:
user_info.setdefault("id", user.get_id())
# TODO: more configurable user attrs here
except AttributeError:
# might happen if:
# - flask_login could not be imported
# - flask_login is not configured
# - no user is logged in
pass
# The following attribute accesses are ineffective for the general
# Flask-Login case, because the User interface of Flask-Login does not
# care about anything but the ID. However, Flask-User (based on
# Flask-Login) documents a few optional extra attributes.
#
# https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/docs/source/data_models.rst#fixed-data-model-property-names
try:
user_info.setdefault("email", user.email)
except Exception:
pass
try:
user_info.setdefault("username", user.username)
user_info.setdefault("username", user.email)
except Exception:
pass
| 8,239 | Python | 29.861423 | 154 | 0.618886 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/pure_eval.py | from __future__ import absolute_import
import ast
from sentry_sdk import Hub, serializer
from sentry_sdk._types import MYPY
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.utils import walk_exception_chain, iter_stacks
if MYPY:
from typing import Optional, Dict, Any, Tuple, List
from types import FrameType
from sentry_sdk._types import Event, Hint
try:
import executing
except ImportError:
raise DidNotEnable("executing is not installed")
try:
import pure_eval
except ImportError:
raise DidNotEnable("pure_eval is not installed")
try:
# Used implicitly, just testing it's available
import asttokens # noqa
except ImportError:
raise DidNotEnable("asttokens is not installed")
class PureEvalIntegration(Integration):
identifier = "pure_eval"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def add_executing_info(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if Hub.current.get_integration(PureEvalIntegration) is None:
return event
if hint is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception, (_exc_type, _exc_value, exc_tb) in zip(
reversed(values), walk_exception_chain(exc_info)
):
sentry_frames = [
frame
for frame in exception.get("stacktrace", {}).get("frames", [])
if frame.get("function")
]
tbs = list(iter_stacks(exc_tb))
if len(sentry_frames) != len(tbs):
continue
for sentry_frame, tb in zip(sentry_frames, tbs):
sentry_frame["vars"] = (
pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
)
return event
def pure_eval_frame(frame):
# type: (FrameType) -> Dict[str, Any]
source = executing.Source.for_frame(frame)
if not source.tree:
return {}
statements = source.statements_at_line(frame.f_lineno)
if not statements:
return {}
scope = stmt = list(statements)[0]
while True:
# Get the parent first in case the original statement is already
# a function definition, e.g. if we're calling a decorator
# In that case we still want the surrounding scope, not that function
scope = scope.parent
if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
break
evaluator = pure_eval.Evaluator.from_frame(frame)
expressions = evaluator.interesting_expressions_grouped(scope)
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> Tuple[int, int]
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
def start(n):
# type: (ast.expr) -> Tuple[int, int]
return (n.lineno, n.col_offset)
nodes_before_stmt = [
node for node in nodes if start(node) < stmt.last_token.end # type: ignore
]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(start(node) for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
lineno, col_offset = min(start(node) for node in nodes)
return (-lineno, -col_offset)
# This adds the first_token and last_token attributes to nodes
atok = source.asttokens()
expressions.sort(key=closeness, reverse=True)
return {
atok.get_text(nodes[0]): value
for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
}
| 4,536 | Python | 31.640288 | 87 | 0.606702 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/quart.py | from __future__ import absolute_import
from sentry_sdk.hub import _should_send_default_pii, Hub
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.scope import Scope
from sentry_sdk.tracing import SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Dict
from typing import Union
from sentry_sdk._types import EventProcessor
try:
import quart_auth # type: ignore
except ImportError:
quart_auth = None
try:
from quart import ( # type: ignore
has_request_context,
has_websocket_context,
Request,
Quart,
request,
websocket,
)
from quart.signals import ( # type: ignore
got_background_exception,
got_request_exception,
got_websocket_exception,
request_started,
websocket_started,
)
except ImportError:
raise DidNotEnable("Quart is not installed")
TRANSACTION_STYLE_VALUES = ("endpoint", "url")
class QuartIntegration(Integration):
identifier = "quart"
transaction_style = ""
def __init__(self, transaction_style="endpoint"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
request_started.connect(_request_websocket_started)
websocket_started.connect(_request_websocket_started)
got_background_exception.connect(_capture_exception)
got_request_exception.connect(_capture_exception)
got_websocket_exception.connect(_capture_exception)
old_app = Quart.__call__
async def sentry_patched_asgi_app(self, scope, receive, send):
# type: (Any, Any, Any, Any) -> Any
if Hub.current.get_integration(QuartIntegration) is None:
return await old_app(self, scope, receive, send)
middleware = SentryAsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))
middleware.__call__ = middleware._run_asgi3
return await middleware(scope, receive, send)
Quart.__call__ = sentry_patched_asgi_app
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (Scope, str, Request) -> None
try:
name_for_style = {
"url": request.url_rule.rule,
"endpoint": request.url_rule.endpoint,
}
scope.set_transaction_name(
name_for_style[transaction_style],
source=SOURCE_FOR_STYLE[transaction_style],
)
except Exception:
pass
def _request_websocket_started(app, **kwargs):
# type: (Quart, **Any) -> None
hub = Hub.current
integration = hub.get_integration(QuartIntegration)
if integration is None:
return
with hub.configure_scope() as scope:
if has_request_context():
request_websocket = request._get_current_object()
if has_websocket_context():
request_websocket = websocket._get_current_object()
# Set the transaction name here, but rely on ASGI middleware
# to actually start the transaction
_set_transaction_name_and_source(
scope, integration.transaction_style, request_websocket
)
evt_processor = _make_request_event_processor(
app, request_websocket, integration
)
scope.add_event_processor(evt_processor)
def _make_request_event_processor(app, request, integration):
# type: (Quart, Request, QuartIntegration) -> EventProcessor
def inner(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
if request is None:
return event
with capture_internal_exceptions():
# TODO: Figure out what to do with request body. Methods on request
# are async, but event processors are not.
request_info = event.setdefault("request", {})
request_info["url"] = request.url
request_info["query_string"] = request.query_string
request_info["method"] = request.method
request_info["headers"] = _filter_headers(dict(request.headers))
if _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": request.access_route[0]}
_add_user_to_event(event)
return event
return inner
def _capture_exception(sender, exception, **kwargs):
# type: (Quart, Union[ValueError, BaseException], **Any) -> None
hub = Hub.current
if hub.get_integration(QuartIntegration) is None:
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exception,
client_options=client.options,
mechanism={"type": "quart", "handled": False},
)
hub.capture_event(event, hint=hint)
def _add_user_to_event(event):
# type: (Dict[str, Any]) -> None
if quart_auth is None:
return
user = quart_auth.current_user
if user is None:
return
with capture_internal_exceptions():
user_info = event.setdefault("user", {})
user_info["id"] = quart_auth.current_user._auth_id
| 5,867 | Python | 30.047619 | 87 | 0.631328 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/threading.py | from __future__ import absolute_import
import sys
from threading import Thread, current_thread
from sentry_sdk import Hub
from sentry_sdk._compat import reraise
from sentry_sdk._types import MYPY
from sentry_sdk.integrations import Integration
from sentry_sdk.utils import event_from_exception, capture_internal_exceptions
if MYPY:
from typing import Any
from typing import TypeVar
from typing import Callable
from typing import Optional
from sentry_sdk._types import ExcInfo
F = TypeVar("F", bound=Callable[..., Any])
class ThreadingIntegration(Integration):
identifier = "threading"
def __init__(self, propagate_hub=False):
# type: (bool) -> None
self.propagate_hub = propagate_hub
@staticmethod
def setup_once():
# type: () -> None
old_start = Thread.start
def sentry_start(self, *a, **kw):
# type: (Thread, *Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(ThreadingIntegration)
if integration is not None:
if not integration.propagate_hub:
hub_ = None
else:
hub_ = Hub(hub)
# Patching instance methods in `start()` creates a reference cycle if
# done in a naive way. See
# https://github.com/getsentry/sentry-python/pull/434
#
# In threading module, using current_thread API will access current thread instance
# without holding it to avoid a reference cycle in an easier way.
with capture_internal_exceptions():
new_run = _wrap_run(hub_, getattr(self.run, "__func__", self.run))
self.run = new_run # type: ignore
return old_start(self, *a, **kw)
Thread.start = sentry_start # type: ignore
def _wrap_run(parent_hub, old_run_func):
# type: (Optional[Hub], F) -> F
def run(*a, **kw):
# type: (*Any, **Any) -> Any
hub = parent_hub or Hub.current
with hub:
try:
self = current_thread()
return old_run_func(self, *a, **kw)
except Exception:
reraise(*_capture_exception())
return run # type: ignore
def _capture_exception():
# type: () -> ExcInfo
hub = Hub.current
exc_info = sys.exc_info()
if hub.get_integration(ThreadingIntegration) is not None:
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "threading", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
| 2,840 | Python | 30.21978 | 99 | 0.580986 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/boto3.py | from __future__ import absolute_import
from sentry_sdk import Hub
from sentry_sdk.consts import OP
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.tracing import Span
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
from typing import Type
try:
from botocore import __version__ as BOTOCORE_VERSION # type: ignore
from botocore.client import BaseClient # type: ignore
from botocore.response import StreamingBody # type: ignore
from botocore.awsrequest import AWSRequest # type: ignore
except ImportError:
raise DidNotEnable("botocore is not installed")
class Boto3Integration(Integration):
identifier = "boto3"
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, BOTOCORE_VERSION.split(".")[:3]))
except (ValueError, TypeError):
raise DidNotEnable(
"Unparsable botocore version: {}".format(BOTOCORE_VERSION)
)
if version < (1, 12):
raise DidNotEnable("Botocore 1.12 or newer is required.")
orig_init = BaseClient.__init__
def sentry_patched_init(self, *args, **kwargs):
# type: (Type[BaseClient], *Any, **Any) -> None
orig_init(self, *args, **kwargs)
meta = self.meta
service_id = meta.service_model.service_id.hyphenize()
meta.events.register(
"request-created",
partial(_sentry_request_created, service_id=service_id),
)
meta.events.register("after-call", _sentry_after_call)
meta.events.register("after-call-error", _sentry_after_call_error)
BaseClient.__init__ = sentry_patched_init
def _sentry_request_created(service_id, request, operation_name, **kwargs):
# type: (str, AWSRequest, str, **Any) -> None
hub = Hub.current
if hub.get_integration(Boto3Integration) is None:
return
description = "aws.%s.%s" % (service_id, operation_name)
span = hub.start_span(
hub=hub,
op=OP.HTTP_CLIENT,
description=description,
)
span.set_tag("aws.service_id", service_id)
span.set_tag("aws.operation_name", operation_name)
span.set_data("aws.request.url", request.url)
# We do it in order for subsequent http calls/retries be
# attached to this span.
span.__enter__()
# request.context is an open-ended data-structure
# where we can add anything useful in request life cycle.
request.context["_sentrysdk_span"] = span
def _sentry_after_call(context, parsed, **kwargs):
# type: (Dict[str, Any], Dict[str, Any], **Any) -> None
span = context.pop("_sentrysdk_span", None) # type: Optional[Span]
# Span could be absent if the integration is disabled.
if span is None:
return
span.__exit__(None, None, None)
body = parsed.get("Body")
if not isinstance(body, StreamingBody):
return
streaming_span = span.start_child(
op=OP.HTTP_CLIENT_STREAM,
description=span.description,
)
orig_read = body.read
orig_close = body.close
def sentry_streaming_body_read(*args, **kwargs):
# type: (*Any, **Any) -> bytes
try:
ret = orig_read(*args, **kwargs)
if not ret:
streaming_span.finish()
return ret
except Exception:
streaming_span.finish()
raise
body.read = sentry_streaming_body_read
def sentry_streaming_body_close(*args, **kwargs):
# type: (*Any, **Any) -> None
streaming_span.finish()
orig_close(*args, **kwargs)
body.close = sentry_streaming_body_close
def _sentry_after_call_error(context, exception, **kwargs):
# type: (Dict[str, Any], Type[BaseException], **Any) -> None
span = context.pop("_sentrysdk_span", None) # type: Optional[Span]
# Span could be absent if the integration is disabled.
if span is None:
return
span.__exit__(type(exception), exception, None)
| 4,180 | Python | 30.674242 | 78 | 0.625359 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/aws_lambda.py | from datetime import datetime, timedelta
from os import environ
import sys
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT, Transaction
from sentry_sdk._compat import reraise
from sentry_sdk.utils import (
AnnotatedValue,
capture_internal_exceptions,
event_from_exception,
logger,
TimeoutThread,
)
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import TypeVar
from typing import Callable
from typing import Optional
from sentry_sdk._types import EventProcessor, Event, Hint
F = TypeVar("F", bound=Callable[..., Any])
# Constants
TIMEOUT_WARNING_BUFFER = 1500 # Buffer time required to send timeout warning to Sentry
MILLIS_TO_SECONDS = 1000.0
def _wrap_init_error(init_error):
# type: (F) -> F
def sentry_init_error(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return init_error(*args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
exc_info = sys.exc_info()
if exc_info and all(exc_info):
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
return init_error(*args, **kwargs)
return sentry_init_error # type: ignore
def _wrap_handler(handler):
# type: (F) -> F
def sentry_handler(aws_event, aws_context, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
# Per https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html,
# `event` here is *likely* a dictionary, but also might be a number of
# other types (str, int, float, None).
#
# In some cases, it is a list (if the user is batch-invoking their
# function, for example), in which case we'll use the first entry as a
# representative from which to try pulling request data. (Presumably it
# will be the same for all events in the list, since they're all hitting
# the lambda in the same request.)
if isinstance(aws_event, list):
request_data = aws_event[0]
batch_size = len(aws_event)
else:
request_data = aws_event
batch_size = 1
if not isinstance(request_data, dict):
# If we're not dealing with a dictionary, we won't be able to get
# headers, path, http method, etc in any case, so it's fine that
# this is empty
request_data = {}
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is None:
return handler(aws_event, aws_context, *args, **kwargs)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
configured_time = aws_context.get_remaining_time_in_millis()
with hub.push_scope() as scope:
timeout_thread = None
with capture_internal_exceptions():
scope.clear_breadcrumbs()
scope.add_event_processor(
_make_request_event_processor(
request_data, aws_context, configured_time
)
)
scope.set_tag(
"aws_region", aws_context.invoked_function_arn.split(":")[3]
)
if batch_size > 1:
scope.set_tag("batch_request", True)
scope.set_tag("batch_size", batch_size)
# Starting the Timeout thread only if the configured time is greater than Timeout warning
# buffer and timeout_warning parameter is set True.
if (
integration.timeout_warning
and configured_time > TIMEOUT_WARNING_BUFFER
):
waiting_time = (
configured_time - TIMEOUT_WARNING_BUFFER
) / MILLIS_TO_SECONDS
timeout_thread = TimeoutThread(
waiting_time,
configured_time / MILLIS_TO_SECONDS,
)
# Starting the thread to raise timeout warning exception
timeout_thread.start()
headers = request_data.get("headers")
# AWS Service may set an explicit `{headers: None}`, we can't rely on `.get()`'s default.
if headers is None:
headers = {}
transaction = Transaction.continue_from_headers(
headers,
op=OP.FUNCTION_AWS,
name=aws_context.function_name,
source=TRANSACTION_SOURCE_COMPONENT,
)
with hub.start_transaction(
transaction,
custom_sampling_context={
"aws_event": aws_event,
"aws_context": aws_context,
},
):
try:
return handler(aws_event, aws_context, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
sentry_event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "aws_lambda", "handled": False},
)
hub.capture_event(sentry_event, hint=hint)
reraise(*exc_info)
finally:
if timeout_thread:
timeout_thread.stop()
return sentry_handler # type: ignore
def _drain_queue():
# type: () -> None
with capture_internal_exceptions():
hub = Hub.current
integration = hub.get_integration(AwsLambdaIntegration)
if integration is not None:
# Flush out the event queue before AWS kills the
# process.
hub.flush()
class AwsLambdaIntegration(Integration):
identifier = "aws_lambda"
def __init__(self, timeout_warning=False):
# type: (bool) -> None
self.timeout_warning = timeout_warning
@staticmethod
def setup_once():
# type: () -> None
lambda_bootstrap = get_lambda_bootstrap()
if not lambda_bootstrap:
logger.warning(
"Not running in AWS Lambda environment, "
"AwsLambdaIntegration disabled (could not find bootstrap module)"
)
return
if not hasattr(lambda_bootstrap, "handle_event_request"):
logger.warning(
"Not running in AWS Lambda environment, "
"AwsLambdaIntegration disabled (could not find handle_event_request)"
)
return
pre_37 = hasattr(lambda_bootstrap, "handle_http_request") # Python 3.6 or 2.7
if pre_37:
old_handle_event_request = lambda_bootstrap.handle_event_request
def sentry_handle_event_request(request_handler, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
request_handler = _wrap_handler(request_handler)
return old_handle_event_request(request_handler, *args, **kwargs)
lambda_bootstrap.handle_event_request = sentry_handle_event_request
old_handle_http_request = lambda_bootstrap.handle_http_request
def sentry_handle_http_request(request_handler, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
request_handler = _wrap_handler(request_handler)
return old_handle_http_request(request_handler, *args, **kwargs)
lambda_bootstrap.handle_http_request = sentry_handle_http_request
# Patch to_json to drain the queue. This should work even when the
# SDK is initialized inside of the handler
old_to_json = lambda_bootstrap.to_json
def sentry_to_json(*args, **kwargs):
# type: (*Any, **Any) -> Any
_drain_queue()
return old_to_json(*args, **kwargs)
lambda_bootstrap.to_json = sentry_to_json
else:
lambda_bootstrap.LambdaRuntimeClient.post_init_error = _wrap_init_error(
lambda_bootstrap.LambdaRuntimeClient.post_init_error
)
old_handle_event_request = lambda_bootstrap.handle_event_request
def sentry_handle_event_request( # type: ignore
lambda_runtime_client, request_handler, *args, **kwargs
):
request_handler = _wrap_handler(request_handler)
return old_handle_event_request(
lambda_runtime_client, request_handler, *args, **kwargs
)
lambda_bootstrap.handle_event_request = sentry_handle_event_request
# Patch the runtime client to drain the queue. This should work
# even when the SDK is initialized inside of the handler
def _wrap_post_function(f):
# type: (F) -> F
def inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
_drain_queue()
return f(*args, **kwargs)
return inner # type: ignore
lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = (
_wrap_post_function(
lambda_bootstrap.LambdaRuntimeClient.post_invocation_result
)
)
lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = (
_wrap_post_function(
lambda_bootstrap.LambdaRuntimeClient.post_invocation_error
)
)
def get_lambda_bootstrap():
# type: () -> Optional[Any]
# Python 2.7: Everything is in `__main__`.
#
# Python 3.7: If the bootstrap module is *already imported*, it is the
# one we actually want to use (no idea what's in __main__)
#
# Python 3.8: bootstrap is also importable, but will be the same file
# as __main__ imported under a different name:
#
# sys.modules['__main__'].__file__ == sys.modules['bootstrap'].__file__
# sys.modules['__main__'] is not sys.modules['bootstrap']
#
# Python 3.9: bootstrap is in __main__.awslambdaricmain
#
# On container builds using the `aws-lambda-python-runtime-interface-client`
# (awslamdaric) module, bootstrap is located in sys.modules['__main__'].bootstrap
#
# Such a setup would then make all monkeypatches useless.
if "bootstrap" in sys.modules:
return sys.modules["bootstrap"]
elif "__main__" in sys.modules:
module = sys.modules["__main__"]
# python3.9 runtime
if hasattr(module, "awslambdaricmain") and hasattr(
module.awslambdaricmain, "bootstrap"
):
return module.awslambdaricmain.bootstrap
elif hasattr(module, "bootstrap"):
# awslambdaric python module in container builds
return module.bootstrap
# python3.8 runtime
return module
else:
return None
def _make_request_event_processor(aws_event, aws_context, configured_timeout):
# type: (Any, Any, Any) -> EventProcessor
start_time = datetime.utcnow()
def event_processor(sentry_event, hint, start_time=start_time):
# type: (Event, Hint, datetime) -> Optional[Event]
remaining_time_in_milis = aws_context.get_remaining_time_in_millis()
exec_duration = configured_timeout - remaining_time_in_milis
extra = sentry_event.setdefault("extra", {})
extra["lambda"] = {
"function_name": aws_context.function_name,
"function_version": aws_context.function_version,
"invoked_function_arn": aws_context.invoked_function_arn,
"aws_request_id": aws_context.aws_request_id,
"execution_duration_in_millis": exec_duration,
"remaining_time_in_millis": remaining_time_in_milis,
}
extra["cloudwatch logs"] = {
"url": _get_cloudwatch_logs_url(aws_context, start_time),
"log_group": aws_context.log_group_name,
"log_stream": aws_context.log_stream_name,
}
request = sentry_event.get("request", {})
if "httpMethod" in aws_event:
request["method"] = aws_event["httpMethod"]
request["url"] = _get_url(aws_event, aws_context)
if "queryStringParameters" in aws_event:
request["query_string"] = aws_event["queryStringParameters"]
if "headers" in aws_event:
request["headers"] = _filter_headers(aws_event["headers"])
if _should_send_default_pii():
user_info = sentry_event.setdefault("user", {})
identity = aws_event.get("identity")
if identity is None:
identity = {}
id = identity.get("userArn")
if id is not None:
user_info.setdefault("id", id)
ip = identity.get("sourceIp")
if ip is not None:
user_info.setdefault("ip_address", ip)
if "body" in aws_event:
request["data"] = aws_event.get("body", "")
else:
if aws_event.get("body", None):
# Unfortunately couldn't find a way to get structured body from AWS
# event. Meaning every body is unstructured to us.
request["data"] = AnnotatedValue.removed_because_raw_data()
sentry_event["request"] = request
return sentry_event
return event_processor
def _get_url(aws_event, aws_context):
# type: (Any, Any) -> str
path = aws_event.get("path", None)
headers = aws_event.get("headers")
if headers is None:
headers = {}
host = headers.get("Host", None)
proto = headers.get("X-Forwarded-Proto", None)
if proto and host and path:
return "{}://{}{}".format(proto, host, path)
return "awslambda:///{}".format(aws_context.function_name)
def _get_cloudwatch_logs_url(aws_context, start_time):
# type: (Any, datetime) -> str
"""
Generates a CloudWatchLogs console URL based on the context object
Arguments:
aws_context {Any} -- context from lambda handler
Returns:
str -- AWS Console URL to logs.
"""
formatstring = "%Y-%m-%dT%H:%M:%SZ"
region = environ.get("AWS_REGION", "")
url = (
"https://console.{domain}/cloudwatch/home?region={region}"
"#logEventViewer:group={log_group};stream={log_stream}"
";start={start_time};end={end_time}"
).format(
domain="amazonaws.cn" if region.startswith("cn-") else "aws.amazon.com",
region=region,
log_group=aws_context.log_group_name,
log_stream=aws_context.log_stream_name,
start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
end_time=(datetime.utcnow() + timedelta(seconds=2)).strftime(formatstring),
)
return url
| 15,751 | Python | 35.378753 | 105 | 0.57044 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/serverless.py | import sys
from sentry_sdk.hub import Hub
from sentry_sdk.utils import event_from_exception
from sentry_sdk._compat import reraise
from sentry_sdk._functools import wraps
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
from typing import TypeVar
from typing import Union
from typing import Optional
from typing import overload
F = TypeVar("F", bound=Callable[..., Any])
else:
def overload(x):
# type: (F) -> F
return x
@overload
def serverless_function(f, flush=True):
# type: (F, bool) -> F
pass
@overload
def serverless_function(f=None, flush=True): # noqa: F811
# type: (None, bool) -> Callable[[F], F]
pass
def serverless_function(f=None, flush=True): # noqa
# type: (Optional[F], bool) -> Union[F, Callable[[F], F]]
def wrapper(f):
# type: (F) -> F
@wraps(f)
def inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
with Hub(Hub.current) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
try:
return f(*args, **kwargs)
except Exception:
_capture_and_reraise()
finally:
if flush:
_flush_client()
return inner # type: ignore
if f is None:
return wrapper
else:
return wrapper(f)
def _capture_and_reraise():
# type: () -> None
exc_info = sys.exc_info()
hub = Hub.current
if hub.client is not None:
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "serverless", "handled": False},
)
hub.capture_event(event, hint=hint)
reraise(*exc_info)
def _flush_client():
# type: () -> None
return Hub.current.flush()
| 1,957 | Python | 21.767442 | 63 | 0.556975 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/starlite.py | from typing import TYPE_CHECKING
from pydantic import BaseModel # type: ignore
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.utils import event_from_exception, transaction_from_function
try:
from starlite import Request, Starlite, State # type: ignore
from starlite.handlers.base import BaseRouteHandler # type: ignore
from starlite.middleware import DefineMiddleware # type: ignore
from starlite.plugins.base import get_plugin_for_value # type: ignore
from starlite.routes.http import HTTPRoute # type: ignore
from starlite.utils import ConnectionDataExtractor, is_async_callable, Ref # type: ignore
if TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Union
from starlite.types import ( # type: ignore
ASGIApp,
HTTPReceiveMessage,
HTTPScope,
Message,
Middleware,
Receive,
Scope,
Send,
WebSocketReceiveMessage,
)
from starlite import MiddlewareProtocol
from sentry_sdk._types import Event
except ImportError:
raise DidNotEnable("Starlite is not installed")
_DEFAULT_TRANSACTION_NAME = "generic Starlite request"
class SentryStarliteASGIMiddleware(SentryAsgiMiddleware):
def __init__(self, app: "ASGIApp"):
super().__init__(
app=app,
unsafe_context_data=False,
transaction_style="endpoint",
mechanism_type="asgi",
)
class StarliteIntegration(Integration):
identifier = "starlite"
@staticmethod
def setup_once() -> None:
patch_app_init()
patch_middlewares()
patch_http_route_handle()
def patch_app_init() -> None:
"""
Replaces the Starlite class's `__init__` function in order to inject `after_exception` handlers and set the
`SentryStarliteASGIMiddleware` as the outmost middleware in the stack.
See:
- https://starlite-api.github.io/starlite/usage/0-the-starlite-app/5-application-hooks/#after-exception
- https://starlite-api.github.io/starlite/usage/7-middleware/0-middleware-intro/
"""
old__init__ = Starlite.__init__
def injection_wrapper(self: "Starlite", *args: "Any", **kwargs: "Any") -> None:
after_exception = kwargs.pop("after_exception", [])
kwargs.update(
after_exception=[
exception_handler,
*(
after_exception
if isinstance(after_exception, list)
else [after_exception]
),
]
)
SentryStarliteASGIMiddleware.__call__ = SentryStarliteASGIMiddleware._run_asgi3
middleware = kwargs.pop("middleware", None) or []
kwargs["middleware"] = [SentryStarliteASGIMiddleware, *middleware]
old__init__(self, *args, **kwargs)
Starlite.__init__ = injection_wrapper
def patch_middlewares() -> None:
old__resolve_middleware_stack = BaseRouteHandler.resolve_middleware
def resolve_middleware_wrapper(self: "Any") -> "List[Middleware]":
return [
enable_span_for_middleware(middleware)
for middleware in old__resolve_middleware_stack(self)
]
BaseRouteHandler.resolve_middleware = resolve_middleware_wrapper
def enable_span_for_middleware(middleware: "Middleware") -> "Middleware":
if (
not hasattr(middleware, "__call__") # noqa: B004
or middleware is SentryStarliteASGIMiddleware
):
return middleware
if isinstance(middleware, DefineMiddleware):
old_call: "ASGIApp" = middleware.middleware.__call__
else:
old_call = middleware.__call__
async def _create_span_call(
self: "MiddlewareProtocol", scope: "Scope", receive: "Receive", send: "Send"
) -> None:
hub = Hub.current
integration = hub.get_integration(StarliteIntegration)
if integration is not None:
middleware_name = self.__class__.__name__
with hub.start_span(
op=OP.MIDDLEWARE_STARLITE, description=middleware_name
) as middleware_span:
middleware_span.set_tag("starlite.middleware_name", middleware_name)
# Creating spans for the "receive" callback
async def _sentry_receive(
*args: "Any", **kwargs: "Any"
) -> "Union[HTTPReceiveMessage, WebSocketReceiveMessage]":
hub = Hub.current
with hub.start_span(
op=OP.MIDDLEWARE_STARLITE_RECEIVE,
description=getattr(receive, "__qualname__", str(receive)),
) as span:
span.set_tag("starlite.middleware_name", middleware_name)
return await receive(*args, **kwargs)
receive_name = getattr(receive, "__name__", str(receive))
receive_patched = receive_name == "_sentry_receive"
new_receive = _sentry_receive if not receive_patched else receive
# Creating spans for the "send" callback
async def _sentry_send(message: "Message") -> None:
hub = Hub.current
with hub.start_span(
op=OP.MIDDLEWARE_STARLITE_SEND,
description=getattr(send, "__qualname__", str(send)),
) as span:
span.set_tag("starlite.middleware_name", middleware_name)
return await send(message)
send_name = getattr(send, "__name__", str(send))
send_patched = send_name == "_sentry_send"
new_send = _sentry_send if not send_patched else send
return await old_call(self, scope, new_receive, new_send)
else:
return await old_call(self, scope, receive, send)
not_yet_patched = old_call.__name__ not in ["_create_span_call"]
if not_yet_patched:
if isinstance(middleware, DefineMiddleware):
middleware.middleware.__call__ = _create_span_call
else:
middleware.__call__ = _create_span_call
return middleware
def patch_http_route_handle() -> None:
old_handle = HTTPRoute.handle
async def handle_wrapper(
self: "HTTPRoute", scope: "HTTPScope", receive: "Receive", send: "Send"
) -> None:
hub = Hub.current
integration: StarliteIntegration = hub.get_integration(StarliteIntegration)
if integration is None:
return await old_handle(self, scope, receive, send)
with hub.configure_scope() as sentry_scope:
request: "Request[Any, Any]" = scope["app"].request_class(
scope=scope, receive=receive, send=send
)
extracted_request_data = ConnectionDataExtractor(
parse_body=True, parse_query=True
)(request)
body = extracted_request_data.pop("body")
request_data = await body
def event_processor(event: "Event", _: "Dict[str, Any]") -> "Event":
route_handler = scope.get("route_handler")
request_info = event.get("request", {})
request_info["content_length"] = len(scope.get("_body", b""))
if _should_send_default_pii():
request_info["cookies"] = extracted_request_data["cookies"]
if request_data is not None:
request_info["data"] = request_data
func = None
if route_handler.name is not None:
tx_name = route_handler.name
elif isinstance(route_handler.fn, Ref):
func = route_handler.fn.value
else:
func = route_handler.fn
if func is not None:
tx_name = transaction_from_function(func)
tx_info = {"source": SOURCE_FOR_STYLE["endpoint"]}
if not tx_name:
tx_name = _DEFAULT_TRANSACTION_NAME
tx_info = {"source": TRANSACTION_SOURCE_ROUTE}
event.update(
request=request_info, transaction=tx_name, transaction_info=tx_info
)
return event
sentry_scope._name = StarliteIntegration.identifier
sentry_scope.add_event_processor(event_processor)
return await old_handle(self, scope, receive, send)
HTTPRoute.handle = handle_wrapper
def retrieve_user_from_scope(scope: "Scope") -> "Optional[Dict[str, Any]]":
scope_user = scope.get("user", {})
if not scope_user:
return None
if isinstance(scope_user, dict):
return scope_user
if isinstance(scope_user, BaseModel):
return scope_user.dict()
if hasattr(scope_user, "asdict"): # dataclasses
return scope_user.asdict()
plugin = get_plugin_for_value(scope_user)
if plugin and not is_async_callable(plugin.to_dict):
return plugin.to_dict(scope_user)
return None
def exception_handler(exc: Exception, scope: "Scope", _: "State") -> None:
hub = Hub.current
if hub.get_integration(StarliteIntegration) is None:
return
user_info: "Optional[Dict[str, Any]]" = None
if _should_send_default_pii():
user_info = retrieve_user_from_scope(scope)
if user_info and isinstance(user_info, dict):
with hub.configure_scope() as sentry_scope:
sentry_scope.set_user(user_info)
event, hint = event_from_exception(
exc,
client_options=hub.client.options if hub.client else None,
mechanism={"type": StarliteIntegration.identifier, "handled": False},
)
hub.capture_event(event, hint=hint)
| 10,090 | Python | 36.099265 | 111 | 0.595837 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/tornado.py | import weakref
import contextlib
from inspect import iscoroutinefunction
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.tracing import (
TRANSACTION_SOURCE_COMPONENT,
TRANSACTION_SOURCE_ROUTE,
Transaction,
)
from sentry_sdk.utils import (
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
event_from_exception,
capture_internal_exceptions,
transaction_from_function,
)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations._wsgi_common import (
RequestExtractor,
_filter_headers,
_is_json_content_type,
)
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk._compat import iteritems
try:
from tornado import version_info as TORNADO_VERSION
from tornado.web import RequestHandler, HTTPError
from tornado.gen import coroutine
except ImportError:
raise DidNotEnable("Tornado not installed")
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Optional
from typing import Dict
from typing import Callable
from typing import Generator
from sentry_sdk._types import EventProcessor
class TornadoIntegration(Integration):
identifier = "tornado"
@staticmethod
def setup_once():
# type: () -> None
if TORNADO_VERSION < (5, 0):
raise DidNotEnable("Tornado 5+ required")
if not HAS_REAL_CONTEXTVARS:
# Tornado is async. We better have contextvars or we're going to leak
# state between requests.
raise DidNotEnable(
"The tornado integration for Sentry requires Python 3.7+ or the aiocontextvars package"
+ CONTEXTVARS_ERROR_MESSAGE
)
ignore_logger("tornado.access")
old_execute = RequestHandler._execute
awaitable = iscoroutinefunction(old_execute)
if awaitable:
# Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)
# In that case our method should be a coroutine function too
async def sentry_execute_request_handler(self, *args, **kwargs):
# type: (RequestHandler, *Any, **Any) -> Any
with _handle_request_impl(self):
return await old_execute(self, *args, **kwargs)
else:
@coroutine # type: ignore
def sentry_execute_request_handler(self, *args, **kwargs): # type: ignore
# type: (RequestHandler, *Any, **Any) -> Any
with _handle_request_impl(self):
result = yield from old_execute(self, *args, **kwargs)
return result
RequestHandler._execute = sentry_execute_request_handler
old_log_exception = RequestHandler.log_exception
def sentry_log_exception(self, ty, value, tb, *args, **kwargs):
# type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]
_capture_exception(ty, value, tb)
return old_log_exception(self, ty, value, tb, *args, **kwargs)
RequestHandler.log_exception = sentry_log_exception
@contextlib.contextmanager
def _handle_request_impl(self):
# type: (RequestHandler) -> Generator[None, None, None]
hub = Hub.current
integration = hub.get_integration(TornadoIntegration)
if integration is None:
yield
weak_handler = weakref.ref(self)
with Hub(hub) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
processor = _make_event_processor(weak_handler)
scope.add_event_processor(processor)
transaction = Transaction.continue_from_headers(
self.request.headers,
op=OP.HTTP_SERVER,
# Like with all other integrations, this is our
# fallback transaction in case there is no route.
# sentry_urldispatcher_resolve is responsible for
# setting a transaction name later.
name="generic Tornado request",
source=TRANSACTION_SOURCE_ROUTE,
)
with hub.start_transaction(
transaction, custom_sampling_context={"tornado_request": self.request}
):
yield
def _capture_exception(ty, value, tb):
# type: (type, BaseException, Any) -> None
hub = Hub.current
if hub.get_integration(TornadoIntegration) is None:
return
if isinstance(value, HTTPError):
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
(ty, value, tb),
client_options=client.options,
mechanism={"type": "tornado", "handled": False},
)
hub.capture_event(event, hint=hint)
def _make_event_processor(weak_handler):
# type: (Callable[[], RequestHandler]) -> EventProcessor
def tornado_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
handler = weak_handler()
if handler is None:
return event
request = handler.request
with capture_internal_exceptions():
method = getattr(handler, handler.request.method.lower())
event["transaction"] = transaction_from_function(method)
event["transaction_info"] = {"source": TRANSACTION_SOURCE_COMPONENT}
with capture_internal_exceptions():
extractor = TornadoRequestExtractor(request)
extractor.extract_into_event(event)
request_info = event["request"]
request_info["url"] = "%s://%s%s" % (
request.protocol,
request.host,
request.path,
)
request_info["query_string"] = request.query
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote_ip}
request_info["headers"] = _filter_headers(dict(request.headers))
with capture_internal_exceptions():
if handler.current_user and _should_send_default_pii():
event.setdefault("user", {}).setdefault("is_authenticated", True)
return event
return tornado_processor
class TornadoRequestExtractor(RequestExtractor):
def content_length(self):
# type: () -> int
if self.request.body is None:
return 0
return len(self.request.body)
def cookies(self):
# type: () -> Dict[str, str]
return {k: v.value for k, v in iteritems(self.request.cookies)}
def raw_data(self):
# type: () -> bytes
return self.request.body
def form(self):
# type: () -> Dict[str, Any]
return {
k: [v.decode("latin1", "replace") for v in vs]
for k, vs in iteritems(self.request.body_arguments)
}
def is_json(self):
# type: () -> bool
return _is_json_content_type(self.request.headers.get("content-type"))
def files(self):
# type: () -> Dict[str, Any]
return {k: v[0] for k, v in iteritems(self.request.files) if v}
def size_of_file(self, file):
# type: (Any) -> int
return len(file.body or ())
| 7,307 | Python | 31.193832 | 108 | 0.619406 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/asyncio.py | from __future__ import absolute_import
import sys
from sentry_sdk._compat import reraise
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk._types import MYPY
from sentry_sdk.utils import event_from_exception
try:
import asyncio
from asyncio.tasks import Task
except ImportError:
raise DidNotEnable("asyncio not available")
if MYPY:
from typing import Any
from sentry_sdk._types import ExcInfo
def patch_asyncio():
# type: () -> None
orig_task_factory = None
try:
loop = asyncio.get_running_loop()
orig_task_factory = loop.get_task_factory()
def _sentry_task_factory(loop, coro):
# type: (Any, Any) -> Any
async def _coro_creating_hub_and_span():
# type: () -> None
hub = Hub(Hub.current)
with hub:
with hub.start_span(op=OP.FUNCTION, description=coro.__qualname__):
try:
await coro
except Exception:
reraise(*_capture_exception(hub))
# Trying to use user set task factory (if there is one)
if orig_task_factory:
return orig_task_factory(loop, _coro_creating_hub_and_span()) # type: ignore
# The default task factory in `asyncio` does not have its own function
# but is just a couple of lines in `asyncio.base_events.create_task()`
# Those lines are copied here.
# WARNING:
# If the default behavior of the task creation in asyncio changes,
# this will break!
task = Task(_coro_creating_hub_and_span(), loop=loop)
if task._source_traceback: # type: ignore
del task._source_traceback[-1] # type: ignore
return task
loop.set_task_factory(_sentry_task_factory)
except RuntimeError:
# When there is no running loop, we have nothing to patch.
pass
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
integration = hub.get_integration(AsyncioIntegration)
if integration is not None:
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "asyncio", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
class AsyncioIntegration(Integration):
identifier = "asyncio"
@staticmethod
def setup_once():
# type: () -> None
patch_asyncio()
| 2,787 | Python | 28.978494 | 93 | 0.592034 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/fastapi.py | import asyncio
import threading
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable
from sentry_sdk.integrations.starlette import (
StarletteIntegration,
StarletteRequestExtractor,
)
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.utils import transaction_from_function
if MYPY:
from typing import Any, Callable, Dict
from sentry_sdk.scope import Scope
try:
import fastapi # type: ignore
except ImportError:
raise DidNotEnable("FastAPI is not installed")
_DEFAULT_TRANSACTION_NAME = "generic FastAPI request"
class FastApiIntegration(StarletteIntegration):
identifier = "fastapi"
@staticmethod
def setup_once():
# type: () -> None
patch_get_request_handler()
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (Scope, str, Any) -> None
name = ""
if transaction_style == "endpoint":
endpoint = request.scope.get("endpoint")
if endpoint:
name = transaction_from_function(endpoint) or ""
elif transaction_style == "url":
route = request.scope.get("route")
if route:
path = getattr(route, "path", None)
if path is not None:
name = path
if not name:
name = _DEFAULT_TRANSACTION_NAME
source = TRANSACTION_SOURCE_ROUTE
else:
source = SOURCE_FOR_STYLE[transaction_style]
scope.set_transaction_name(name, source=source)
def patch_get_request_handler():
# type: () -> None
old_get_request_handler = fastapi.routing.get_request_handler
def _sentry_get_request_handler(*args, **kwargs):
# type: (*Any, **Any) -> Any
dependant = kwargs.get("dependant")
if (
dependant
and dependant.call is not None
and not asyncio.iscoroutinefunction(dependant.call)
):
old_call = dependant.call
def _sentry_call(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
with hub.configure_scope() as sentry_scope:
if sentry_scope.profile is not None:
sentry_scope.profile.active_thread_id = (
threading.current_thread().ident
)
return old_call(*args, **kwargs)
dependant.call = _sentry_call
old_app = old_get_request_handler(*args, **kwargs)
async def _sentry_app(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(FastApiIntegration)
if integration is None:
return await old_app(*args, **kwargs)
with hub.configure_scope() as sentry_scope:
request = args[0]
_set_transaction_name_and_source(
sentry_scope, integration.transaction_style, request
)
extractor = StarletteRequestExtractor(request)
info = await extractor.extract_request_info()
def _make_request_event_processor(req, integration):
# type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# Extract information from request
request_info = event.get("request", {})
if info:
if "cookies" in info and _should_send_default_pii():
request_info["cookies"] = info["cookies"]
if "data" in info:
request_info["data"] = info["data"]
event["request"] = request_info
return event
return event_processor
sentry_scope._name = FastApiIntegration.identifier
sentry_scope.add_event_processor(
_make_request_event_processor(request, integration)
)
return await old_app(*args, **kwargs)
return _sentry_app
fastapi.routing.get_request_handler = _sentry_get_request_handler
| 4,446 | Python | 31.940741 | 100 | 0.562753 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/excepthook.py | import sys
from sentry_sdk.hub import Hub
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
from sentry_sdk.integrations import Integration
from sentry_sdk._types import MYPY
if MYPY:
from typing import Callable
from typing import Any
from typing import Type
from typing import Optional
from types import TracebackType
Excepthook = Callable[
[Type[BaseException], BaseException, Optional[TracebackType]],
Any,
]
class ExcepthookIntegration(Integration):
identifier = "excepthook"
always_run = False
def __init__(self, always_run=False):
# type: (bool) -> None
if not isinstance(always_run, bool):
raise ValueError(
"Invalid value for always_run: %s (must be type boolean)"
% (always_run,)
)
self.always_run = always_run
@staticmethod
def setup_once():
# type: () -> None
sys.excepthook = _make_excepthook(sys.excepthook)
def _make_excepthook(old_excepthook):
# type: (Excepthook) -> Excepthook
def sentry_sdk_excepthook(type_, value, traceback):
# type: (Type[BaseException], BaseException, Optional[TracebackType]) -> None
hub = Hub.current
integration = hub.get_integration(ExcepthookIntegration)
if integration is not None and _should_send(integration.always_run):
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
event, hint = event_from_exception(
(type_, value, traceback),
client_options=client.options,
mechanism={"type": "excepthook", "handled": False},
)
hub.capture_event(event, hint=hint)
return old_excepthook(type_, value, traceback)
return sentry_sdk_excepthook
def _should_send(always_run=False):
# type: (bool) -> bool
if always_run:
return True
if hasattr(sys, "ps1"):
# Disable the excepthook for interactive Python shells, otherwise
# every typo gets sent to Sentry.
return False
return True
| 2,242 | Python | 27.392405 | 85 | 0.621766 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/chalice.py | import sys
from sentry_sdk._compat import reraise
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations.aws_lambda import _make_request_event_processor
from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._types import MYPY
from sentry_sdk._functools import wraps
import chalice # type: ignore
from chalice import Chalice, ChaliceViewError
from chalice.app import EventSourceHandler as ChaliceEventSourceHandler # type: ignore
if MYPY:
from typing import Any
from typing import Dict
from typing import TypeVar
from typing import Callable
F = TypeVar("F", bound=Callable[..., Any])
try:
from chalice import __version__ as CHALICE_VERSION
except ImportError:
raise DidNotEnable("Chalice is not installed")
class EventSourceHandler(ChaliceEventSourceHandler): # type: ignore
def __call__(self, event, context):
# type: (Any, Any) -> Any
hub = Hub.current
client = hub.client # type: Any
with hub.push_scope() as scope:
with capture_internal_exceptions():
configured_time = context.get_remaining_time_in_millis()
scope.add_event_processor(
_make_request_event_processor(event, context, configured_time)
)
try:
return ChaliceEventSourceHandler.__call__(self, event, context)
except Exception:
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "chalice", "handled": False},
)
hub.capture_event(event, hint=hint)
hub.flush()
reraise(*exc_info)
def _get_view_function_response(app, view_function, function_args):
# type: (Any, F, Any) -> F
@wraps(view_function)
def wrapped_view_function(**function_args):
# type: (**Any) -> Any
hub = Hub.current
client = hub.client # type: Any
with hub.push_scope() as scope:
with capture_internal_exceptions():
configured_time = app.lambda_context.get_remaining_time_in_millis()
scope.set_transaction_name(
app.lambda_context.function_name,
source=TRANSACTION_SOURCE_COMPONENT,
)
scope.add_event_processor(
_make_request_event_processor(
app.current_request.to_dict(),
app.lambda_context,
configured_time,
)
)
try:
return view_function(**function_args)
except Exception as exc:
if isinstance(exc, ChaliceViewError):
raise
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "chalice", "handled": False},
)
hub.capture_event(event, hint=hint)
hub.flush()
raise
return wrapped_view_function # type: ignore
class ChaliceIntegration(Integration):
identifier = "chalice"
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, CHALICE_VERSION.split(".")[:3]))
except (ValueError, TypeError):
raise DidNotEnable("Unparsable Chalice version: {}".format(CHALICE_VERSION))
if version < (1, 20):
old_get_view_function_response = Chalice._get_view_function_response
else:
from chalice.app import RestAPIEventHandler
old_get_view_function_response = (
RestAPIEventHandler._get_view_function_response
)
def sentry_event_response(app, view_function, function_args):
# type: (Any, F, Dict[str, Any]) -> Any
wrapped_view_function = _get_view_function_response(
app, view_function, function_args
)
return old_get_view_function_response(
app, wrapped_view_function, function_args
)
if version < (1, 20):
Chalice._get_view_function_response = sentry_event_response
else:
RestAPIEventHandler._get_view_function_response = sentry_event_response
# for everything else (like events)
chalice.app.EventSourceHandler = EventSourceHandler
| 4,775 | Python | 34.641791 | 88 | 0.578429 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/stdlib.py | import os
import subprocess
import sys
import platform
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.tracing_utils import EnvironHeaders
from sentry_sdk.utils import capture_internal_exceptions, logger, safe_repr
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import List
from sentry_sdk._types import Event, Hint
try:
from httplib import HTTPConnection # type: ignore
except ImportError:
from http.client import HTTPConnection
_RUNTIME_CONTEXT = {
"name": platform.python_implementation(),
"version": "%s.%s.%s" % (sys.version_info[:3]),
"build": sys.version,
}
class StdlibIntegration(Integration):
identifier = "stdlib"
@staticmethod
def setup_once():
# type: () -> None
_install_httplib()
_install_subprocess()
@add_global_event_processor
def add_python_runtime_context(event, hint):
# type: (Event, Hint) -> Optional[Event]
if Hub.current.get_integration(StdlibIntegration) is not None:
contexts = event.setdefault("contexts", {})
if isinstance(contexts, dict) and "runtime" not in contexts:
contexts["runtime"] = _RUNTIME_CONTEXT
return event
def _install_httplib():
# type: () -> None
real_putrequest = HTTPConnection.putrequest
real_getresponse = HTTPConnection.getresponse
def putrequest(self, method, url, *args, **kwargs):
# type: (HTTPConnection, str, str, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(StdlibIntegration) is None:
return real_putrequest(self, method, url, *args, **kwargs)
host = self.host
port = self.port
default_port = self.default_port
real_url = url
if real_url is None or not real_url.startswith(("http://", "https://")):
real_url = "%s://%s%s%s" % (
default_port == 443 and "https" or "http",
host,
port != default_port and ":%s" % port or "",
url,
)
span = hub.start_span(
op=OP.HTTP_CLIENT, description="%s %s" % (method, real_url)
)
span.set_data("method", method)
span.set_data("url", real_url)
rv = real_putrequest(self, method, url, *args, **kwargs)
for key, value in hub.iter_trace_propagation_headers(span):
logger.debug(
"[Tracing] Adding `{key}` header {value} to outgoing request to {real_url}.".format(
key=key, value=value, real_url=real_url
)
)
self.putheader(key, value)
self._sentrysdk_span = span
return rv
def getresponse(self, *args, **kwargs):
# type: (HTTPConnection, *Any, **Any) -> Any
span = getattr(self, "_sentrysdk_span", None)
if span is None:
return real_getresponse(self, *args, **kwargs)
rv = real_getresponse(self, *args, **kwargs)
span.set_data("status_code", rv.status)
span.set_http_status(int(rv.status))
span.set_data("reason", rv.reason)
span.finish()
return rv
HTTPConnection.putrequest = putrequest
HTTPConnection.getresponse = getresponse
def _init_argument(args, kwargs, name, position, setdefault_callback=None):
# type: (List[Any], Dict[Any, Any], str, int, Optional[Callable[[Any], Any]]) -> Any
"""
given (*args, **kwargs) of a function call, retrieve (and optionally set a
default for) an argument by either name or position.
This is useful for wrapping functions with complex type signatures and
extracting a few arguments without needing to redefine that function's
entire type signature.
"""
if name in kwargs:
rv = kwargs[name]
if setdefault_callback is not None:
rv = setdefault_callback(rv)
if rv is not None:
kwargs[name] = rv
elif position < len(args):
rv = args[position]
if setdefault_callback is not None:
rv = setdefault_callback(rv)
if rv is not None:
args[position] = rv
else:
rv = setdefault_callback and setdefault_callback(None)
if rv is not None:
kwargs[name] = rv
return rv
def _install_subprocess():
# type: () -> None
old_popen_init = subprocess.Popen.__init__
def sentry_patched_popen_init(self, *a, **kw):
# type: (subprocess.Popen[Any], *Any, **Any) -> None
hub = Hub.current
if hub.get_integration(StdlibIntegration) is None:
return old_popen_init(self, *a, **kw)
# Convert from tuple to list to be able to set values.
a = list(a)
args = _init_argument(a, kw, "args", 0) or []
cwd = _init_argument(a, kw, "cwd", 9)
# if args is not a list or tuple (and e.g. some iterator instead),
# let's not use it at all. There are too many things that can go wrong
# when trying to collect an iterator into a list and setting that list
# into `a` again.
#
# Also invocations where `args` is not a sequence are not actually
# legal. They just happen to work under CPython.
description = None
if isinstance(args, (list, tuple)) and len(args) < 100:
with capture_internal_exceptions():
description = " ".join(map(str, args))
if description is None:
description = safe_repr(args)
env = None
with hub.start_span(op=OP.SUBPROCESS, description=description) as span:
for k, v in hub.iter_trace_propagation_headers(span):
if env is None:
env = _init_argument(
a, kw, "env", 10, lambda x: dict(x or os.environ)
)
env["SUBPROCESS_" + k.upper().replace("-", "_")] = v
if cwd:
span.set_data("subprocess.cwd", cwd)
rv = old_popen_init(self, *a, **kw)
span.set_tag("subprocess.pid", self.pid)
return rv
subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore
old_popen_wait = subprocess.Popen.wait
def sentry_patched_popen_wait(self, *a, **kw):
# type: (subprocess.Popen[Any], *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(StdlibIntegration) is None:
return old_popen_wait(self, *a, **kw)
with hub.start_span(op=OP.SUBPROCESS_WAIT) as span:
span.set_tag("subprocess.pid", self.pid)
return old_popen_wait(self, *a, **kw)
subprocess.Popen.wait = sentry_patched_popen_wait # type: ignore
old_popen_communicate = subprocess.Popen.communicate
def sentry_patched_popen_communicate(self, *a, **kw):
# type: (subprocess.Popen[Any], *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(StdlibIntegration) is None:
return old_popen_communicate(self, *a, **kw)
with hub.start_span(op=OP.SUBPROCESS_COMMUNICATE) as span:
span.set_tag("subprocess.pid", self.pid)
return old_popen_communicate(self, *a, **kw)
subprocess.Popen.communicate = sentry_patched_popen_communicate # type: ignore
def get_subprocess_traceparent_headers():
# type: () -> EnvironHeaders
return EnvironHeaders(os.environ, prefix="SUBPROCESS_")
| 7,705 | Python | 30.975104 | 100 | 0.597404 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/starlette.py | from __future__ import absolute_import
import asyncio
import functools
import threading
from sentry_sdk._compat import iteritems
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.integrations._wsgi_common import (
_is_json_content_type,
request_body_within_bounds,
)
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.utils import (
AnnotatedValue,
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
)
if MYPY:
from typing import Any, Awaitable, Callable, Dict, Optional
from sentry_sdk.scope import Scope as SentryScope
try:
import starlette # type: ignore
from starlette.applications import Starlette # type: ignore
from starlette.datastructures import UploadFile # type: ignore
from starlette.middleware import Middleware # type: ignore
from starlette.middleware.authentication import ( # type: ignore
AuthenticationMiddleware,
)
from starlette.requests import Request # type: ignore
from starlette.routing import Match # type: ignore
from starlette.types import ASGIApp, Receive, Scope as StarletteScope, Send # type: ignore
except ImportError:
raise DidNotEnable("Starlette is not installed")
try:
# Starlette 0.20
from starlette.middleware.exceptions import ExceptionMiddleware # type: ignore
except ImportError:
# Startlette 0.19.1
from starlette.exceptions import ExceptionMiddleware # type: ignore
try:
# Optional dependency of Starlette to parse form data.
import multipart # type: ignore
except ImportError:
multipart = None
_DEFAULT_TRANSACTION_NAME = "generic Starlette request"
TRANSACTION_STYLE_VALUES = ("endpoint", "url")
class StarletteIntegration(Integration):
identifier = "starlette"
transaction_style = ""
def __init__(self, transaction_style="url"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
patch_middlewares()
patch_asgi_app()
patch_request_response()
def _enable_span_for_middleware(middleware_class):
# type: (Any) -> type
old_call = middleware_class.__call__
async def _create_span_call(app, scope, receive, send, **kwargs):
# type: (Any, Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]], Any) -> None
hub = Hub.current
integration = hub.get_integration(StarletteIntegration)
if integration is not None:
middleware_name = app.__class__.__name__
with hub.start_span(
op=OP.MIDDLEWARE_STARLETTE, description=middleware_name
) as middleware_span:
middleware_span.set_tag("starlette.middleware_name", middleware_name)
# Creating spans for the "receive" callback
async def _sentry_receive(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
with hub.start_span(
op=OP.MIDDLEWARE_STARLETTE_RECEIVE,
description=getattr(receive, "__qualname__", str(receive)),
) as span:
span.set_tag("starlette.middleware_name", middleware_name)
return await receive(*args, **kwargs)
receive_name = getattr(receive, "__name__", str(receive))
receive_patched = receive_name == "_sentry_receive"
new_receive = _sentry_receive if not receive_patched else receive
# Creating spans for the "send" callback
async def _sentry_send(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
with hub.start_span(
op=OP.MIDDLEWARE_STARLETTE_SEND,
description=getattr(send, "__qualname__", str(send)),
) as span:
span.set_tag("starlette.middleware_name", middleware_name)
return await send(*args, **kwargs)
send_name = getattr(send, "__name__", str(send))
send_patched = send_name == "_sentry_send"
new_send = _sentry_send if not send_patched else send
return await old_call(app, scope, new_receive, new_send, **kwargs)
else:
return await old_call(app, scope, receive, send, **kwargs)
not_yet_patched = old_call.__name__ not in [
"_create_span_call",
"_sentry_authenticationmiddleware_call",
"_sentry_exceptionmiddleware_call",
]
if not_yet_patched:
middleware_class.__call__ = _create_span_call
return middleware_class
def _capture_exception(exception, handled=False):
# type: (BaseException, **Any) -> None
hub = Hub.current
if hub.get_integration(StarletteIntegration) is None:
return
event, hint = event_from_exception(
exception,
client_options=hub.client.options if hub.client else None,
mechanism={"type": StarletteIntegration.identifier, "handled": handled},
)
hub.capture_event(event, hint=hint)
def patch_exception_middleware(middleware_class):
# type: (Any) -> None
"""
Capture all exceptions in Starlette app and
also extract user information.
"""
old_middleware_init = middleware_class.__init__
not_yet_patched = "_sentry_middleware_init" not in str(old_middleware_init)
if not_yet_patched:
def _sentry_middleware_init(self, *args, **kwargs):
# type: (Any, Any, Any) -> None
old_middleware_init(self, *args, **kwargs)
# Patch existing exception handlers
old_handlers = self._exception_handlers.copy()
async def _sentry_patched_exception_handler(self, *args, **kwargs):
# type: (Any, Any, Any) -> None
exp = args[0]
is_http_server_error = (
hasattr(exp, "status_code") and exp.status_code >= 500
)
if is_http_server_error:
_capture_exception(exp, handled=True)
# Find a matching handler
old_handler = None
for cls in type(exp).__mro__:
if cls in old_handlers:
old_handler = old_handlers[cls]
break
if old_handler is None:
return
if _is_async_callable(old_handler):
return await old_handler(self, *args, **kwargs)
else:
return old_handler(self, *args, **kwargs)
for key in self._exception_handlers.keys():
self._exception_handlers[key] = _sentry_patched_exception_handler
middleware_class.__init__ = _sentry_middleware_init
old_call = middleware_class.__call__
async def _sentry_exceptionmiddleware_call(self, scope, receive, send):
# type: (Dict[str, Any], Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]]) -> None
# Also add the user (that was eventually set by be Authentication middle
# that was called before this middleware). This is done because the authentication
# middleware sets the user in the scope and then (in the same function)
# calls this exception middelware. In case there is no exception (or no handler
# for the type of exception occuring) then the exception bubbles up and setting the
# user information into the sentry scope is done in auth middleware and the
# ASGI middleware will then send everything to Sentry and this is fine.
# But if there is an exception happening that the exception middleware here
# has a handler for, it will send the exception directly to Sentry, so we need
# the user information right now.
# This is why we do it here.
_add_user_to_sentry_scope(scope)
await old_call(self, scope, receive, send)
middleware_class.__call__ = _sentry_exceptionmiddleware_call
def _add_user_to_sentry_scope(scope):
# type: (Dict[str, Any]) -> None
"""
Extracts user information from the ASGI scope and
adds it to Sentry's scope.
"""
if "user" not in scope:
return
if not _should_send_default_pii():
return
hub = Hub.current
if hub.get_integration(StarletteIntegration) is None:
return
with hub.configure_scope() as sentry_scope:
user_info = {} # type: Dict[str, Any]
starlette_user = scope["user"]
username = getattr(starlette_user, "username", None)
if username:
user_info.setdefault("username", starlette_user.username)
user_id = getattr(starlette_user, "id", None)
if user_id:
user_info.setdefault("id", starlette_user.id)
email = getattr(starlette_user, "email", None)
if email:
user_info.setdefault("email", starlette_user.email)
sentry_scope.user = user_info
def patch_authentication_middleware(middleware_class):
# type: (Any) -> None
"""
Add user information to Sentry scope.
"""
old_call = middleware_class.__call__
not_yet_patched = "_sentry_authenticationmiddleware_call" not in str(old_call)
if not_yet_patched:
async def _sentry_authenticationmiddleware_call(self, scope, receive, send):
# type: (Dict[str, Any], Dict[str, Any], Callable[[], Awaitable[Dict[str, Any]]], Callable[[Dict[str, Any]], Awaitable[None]]) -> None
await old_call(self, scope, receive, send)
_add_user_to_sentry_scope(scope)
middleware_class.__call__ = _sentry_authenticationmiddleware_call
def patch_middlewares():
# type: () -> None
"""
Patches Starlettes `Middleware` class to record
spans for every middleware invoked.
"""
old_middleware_init = Middleware.__init__
not_yet_patched = "_sentry_middleware_init" not in str(old_middleware_init)
if not_yet_patched:
def _sentry_middleware_init(self, cls, **options):
# type: (Any, Any, Any) -> None
if cls == SentryAsgiMiddleware:
return old_middleware_init(self, cls, **options)
span_enabled_cls = _enable_span_for_middleware(cls)
old_middleware_init(self, span_enabled_cls, **options)
if cls == AuthenticationMiddleware:
patch_authentication_middleware(cls)
if cls == ExceptionMiddleware:
patch_exception_middleware(cls)
Middleware.__init__ = _sentry_middleware_init
def patch_asgi_app():
# type: () -> None
"""
Instrument Starlette ASGI app using the SentryAsgiMiddleware.
"""
old_app = Starlette.__call__
async def _sentry_patched_asgi_app(self, scope, receive, send):
# type: (Starlette, StarletteScope, Receive, Send) -> None
if Hub.current.get_integration(StarletteIntegration) is None:
return await old_app(self, scope, receive, send)
middleware = SentryAsgiMiddleware(
lambda *a, **kw: old_app(self, *a, **kw),
mechanism_type=StarletteIntegration.identifier,
)
middleware.__call__ = middleware._run_asgi3
return await middleware(scope, receive, send)
Starlette.__call__ = _sentry_patched_asgi_app
# This was vendored in from Starlette to support Starlette 0.19.1 because
# this function was only introduced in 0.20.x
def _is_async_callable(obj):
# type: (Any) -> bool
while isinstance(obj, functools.partial):
obj = obj.func
return asyncio.iscoroutinefunction(obj) or (
callable(obj) and asyncio.iscoroutinefunction(obj.__call__)
)
def patch_request_response():
# type: () -> None
old_request_response = starlette.routing.request_response
def _sentry_request_response(func):
# type: (Callable[[Any], Any]) -> ASGIApp
old_func = func
is_coroutine = _is_async_callable(old_func)
if is_coroutine:
async def _sentry_async_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(StarletteIntegration)
if integration is None:
return await old_func(*args, **kwargs)
with hub.configure_scope() as sentry_scope:
request = args[0]
_set_transaction_name_and_source(
sentry_scope, integration.transaction_style, request
)
extractor = StarletteRequestExtractor(request)
info = await extractor.extract_request_info()
def _make_request_event_processor(req, integration):
# type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# Add info from request to event
request_info = event.get("request", {})
if info:
if "cookies" in info:
request_info["cookies"] = info["cookies"]
if "data" in info:
request_info["data"] = info["data"]
event["request"] = request_info
return event
return event_processor
sentry_scope._name = StarletteIntegration.identifier
sentry_scope.add_event_processor(
_make_request_event_processor(request, integration)
)
return await old_func(*args, **kwargs)
func = _sentry_async_func
else:
def _sentry_sync_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(StarletteIntegration)
if integration is None:
return old_func(*args, **kwargs)
with hub.configure_scope() as sentry_scope:
if sentry_scope.profile is not None:
sentry_scope.profile.active_thread_id = (
threading.current_thread().ident
)
request = args[0]
_set_transaction_name_and_source(
sentry_scope, integration.transaction_style, request
)
extractor = StarletteRequestExtractor(request)
cookies = extractor.extract_cookies_from_request()
def _make_request_event_processor(req, integration):
# type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# Extract information from request
request_info = event.get("request", {})
if cookies:
request_info["cookies"] = cookies
event["request"] = request_info
return event
return event_processor
sentry_scope._name = StarletteIntegration.identifier
sentry_scope.add_event_processor(
_make_request_event_processor(request, integration)
)
return old_func(*args, **kwargs)
func = _sentry_sync_func
return old_request_response(func)
starlette.routing.request_response = _sentry_request_response
class StarletteRequestExtractor:
"""
Extracts useful information from the Starlette request
(like form data or cookies) and adds it to the Sentry event.
"""
request = None # type: Request
def __init__(self, request):
# type: (StarletteRequestExtractor, Request) -> None
self.request = request
def extract_cookies_from_request(self):
# type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
client = Hub.current.client
if client is None:
return None
cookies = None # type: Optional[Dict[str, Any]]
if _should_send_default_pii():
cookies = self.cookies()
return cookies
async def extract_request_info(self):
# type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
client = Hub.current.client
if client is None:
return None
request_info = {} # type: Dict[str, Any]
with capture_internal_exceptions():
# Add cookies
if _should_send_default_pii():
request_info["cookies"] = self.cookies()
# If there is no body, just return the cookies
content_length = await self.content_length()
if not content_length:
return request_info
# Add annotation if body is too big
if content_length and not request_body_within_bounds(
client, content_length
):
request_info["data"] = AnnotatedValue.removed_because_over_size_limit()
return request_info
# Add JSON body, if it is a JSON request
json = await self.json()
if json:
request_info["data"] = json
return request_info
# Add form as key/value pairs, if request has form data
form = await self.form()
if form:
form_data = {}
for key, val in iteritems(form):
is_file = isinstance(val, UploadFile)
form_data[key] = (
val
if not is_file
else AnnotatedValue.removed_because_raw_data()
)
request_info["data"] = form_data
return request_info
# Raw data, do not add body just an annotation
request_info["data"] = AnnotatedValue.removed_because_raw_data()
return request_info
async def content_length(self):
# type: (StarletteRequestExtractor) -> Optional[int]
if "content-length" in self.request.headers:
return int(self.request.headers["content-length"])
return None
def cookies(self):
# type: (StarletteRequestExtractor) -> Dict[str, Any]
return self.request.cookies
async def form(self):
# type: (StarletteRequestExtractor) -> Any
if multipart is None:
return None
# Parse the body first to get it cached, as Starlette does not cache form() as it
# does with body() and json() https://github.com/encode/starlette/discussions/1933
# Calling `.form()` without calling `.body()` first will
# potentially break the users project.
await self.request.body()
return await self.request.form()
def is_json(self):
# type: (StarletteRequestExtractor) -> bool
return _is_json_content_type(self.request.headers.get("content-type"))
async def json(self):
# type: (StarletteRequestExtractor) -> Optional[Dict[str, Any]]
if not self.is_json():
return None
return await self.request.json()
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (SentryScope, str, Any) -> None
name = ""
if transaction_style == "endpoint":
endpoint = request.scope.get("endpoint")
if endpoint:
name = transaction_from_function(endpoint) or ""
elif transaction_style == "url":
router = request.scope["router"]
for route in router.routes:
match = route.matches(request.scope)
if match[0] == Match.FULL:
if transaction_style == "endpoint":
name = transaction_from_function(match[1]["endpoint"]) or ""
break
elif transaction_style == "url":
name = route.path
break
if not name:
name = _DEFAULT_TRANSACTION_NAME
source = TRANSACTION_SOURCE_ROUTE
else:
source = SOURCE_FOR_STYLE[transaction_style]
scope.set_transaction_name(name, source=source)
| 21,474 | Python | 34.911371 | 146 | 0.575626 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/atexit.py | from __future__ import absolute_import
import os
import sys
import atexit
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.integrations import Integration
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Optional
def default_callback(pending, timeout):
# type: (int, int) -> None
"""This is the default shutdown callback that is set on the options.
It prints out a message to stderr that informs the user that some events
are still pending and the process is waiting for them to flush out.
"""
def echo(msg):
# type: (str) -> None
sys.stderr.write(msg + "\n")
echo("Sentry is attempting to send %i pending error messages" % pending)
echo("Waiting up to %s seconds" % timeout)
echo("Press Ctrl-%s to quit" % (os.name == "nt" and "Break" or "C"))
sys.stderr.flush()
class AtexitIntegration(Integration):
identifier = "atexit"
def __init__(self, callback=None):
# type: (Optional[Any]) -> None
if callback is None:
callback = default_callback
self.callback = callback
@staticmethod
def setup_once():
# type: () -> None
@atexit.register
def _shutdown():
# type: () -> None
logger.debug("atexit: got shutdown signal")
hub = Hub.main
integration = hub.get_integration(AtexitIntegration)
if integration is not None:
logger.debug("atexit: shutting down client")
# If there is a session on the hub, close it now.
hub.end_session()
# If an integration is there, a client has to be there.
client = hub.client # type: Any
client.close(callback=integration.callback)
| 1,837 | Python | 28.174603 | 76 | 0.616766 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/falcon.py | from __future__ import absolute_import
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from sentry_sdk.tracing import SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
from sentry_sdk._types import EventProcessor
try:
import falcon # type: ignore
import falcon.api_helpers # type: ignore
from falcon import __version__ as FALCON_VERSION
except ImportError:
raise DidNotEnable("Falcon not installed")
class FalconRequestExtractor(RequestExtractor):
def env(self):
# type: () -> Dict[str, Any]
return self.request.env
def cookies(self):
# type: () -> Dict[str, Any]
return self.request.cookies
def form(self):
# type: () -> None
return None # No such concept in Falcon
def files(self):
# type: () -> None
return None # No such concept in Falcon
def raw_data(self):
# type: () -> Optional[str]
# As request data can only be read once we won't make this available
# to Sentry. Just send back a dummy string in case there was a
# content length.
# TODO(jmagnusson): Figure out if there's a way to support this
content_length = self.content_length()
if content_length > 0:
return "[REQUEST_CONTAINING_RAW_DATA]"
else:
return None
def json(self):
# type: () -> Optional[Dict[str, Any]]
try:
return self.request.media
except falcon.errors.HTTPBadRequest:
# NOTE(jmagnusson): We return `falcon.Request._media` here because
# falcon 1.4 doesn't do proper type checking in
# `falcon.Request.media`. This has been fixed in 2.0.
# Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953
return self.request._media
class SentryFalconMiddleware(object):
"""Captures exceptions in Falcon requests and send to Sentry"""
def process_request(self, req, resp, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> None
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is None:
return
with hub.configure_scope() as scope:
scope._name = "falcon"
scope.add_event_processor(_make_request_event_processor(req, integration))
TRANSACTION_STYLE_VALUES = ("uri_template", "path")
class FalconIntegration(Integration):
identifier = "falcon"
transaction_style = ""
def __init__(self, transaction_style="uri_template"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, FALCON_VERSION.split(".")))
except (ValueError, TypeError):
raise DidNotEnable("Unparsable Falcon version: {}".format(FALCON_VERSION))
if version < (1, 4):
raise DidNotEnable("Falcon 1.4 or newer required.")
_patch_wsgi_app()
_patch_handle_exception()
_patch_prepare_middleware()
def _patch_wsgi_app():
# type: () -> None
original_wsgi_app = falcon.API.__call__
def sentry_patched_wsgi_app(self, env, start_response):
# type: (falcon.API, Any, Any) -> Any
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is None:
return original_wsgi_app(self, env, start_response)
sentry_wrapped = SentryWsgiMiddleware(
lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)
)
return sentry_wrapped(env, start_response)
falcon.API.__call__ = sentry_patched_wsgi_app
def _patch_handle_exception():
# type: () -> None
original_handle_exception = falcon.API._handle_exception
def sentry_patched_handle_exception(self, *args):
# type: (falcon.API, *Any) -> Any
# NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
# method signature from `(ex, req, resp, params)` to
# `(req, resp, ex, params)`
if isinstance(args[0], Exception):
ex = args[0]
else:
ex = args[2]
was_handled = original_handle_exception(self, *args)
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is not None and _exception_leads_to_http_5xx(ex):
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
ex,
client_options=client.options,
mechanism={"type": "falcon", "handled": False},
)
hub.capture_event(event, hint=hint)
return was_handled
falcon.API._handle_exception = sentry_patched_handle_exception
def _patch_prepare_middleware():
# type: () -> None
original_prepare_middleware = falcon.api_helpers.prepare_middleware
def sentry_patched_prepare_middleware(
middleware=None, independent_middleware=False
):
# type: (Any, Any) -> Any
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is not None:
middleware = [SentryFalconMiddleware()] + (middleware or [])
return original_prepare_middleware(middleware, independent_middleware)
falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware
def _exception_leads_to_http_5xx(ex):
# type: (Exception) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
return is_server_error or is_unhandled_error
def _set_transaction_name_and_source(event, transaction_style, request):
# type: (Dict[str, Any], str, falcon.Request) -> None
name_for_style = {
"uri_template": request.uri_template,
"path": request.path,
}
event["transaction"] = name_for_style[transaction_style]
event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
def _make_request_event_processor(req, integration):
# type: (falcon.Request, FalconIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
_set_transaction_name_and_source(event, integration.transaction_style, req)
with capture_internal_exceptions():
FalconRequestExtractor(req).extract_into_event(event)
return event
return event_processor
| 7,322 | Python | 31.259912 | 97 | 0.630975 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/bottle.py | from __future__ import absolute_import
from sentry_sdk.hub import Hub
from sentry_sdk.tracing import SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk._types import MYPY
if MYPY:
from sentry_sdk.integrations.wsgi import _ScopedResponse
from typing import Any
from typing import Dict
from typing import Callable
from typing import Optional
from bottle import FileUpload, FormsDict, LocalRequest # type: ignore
from sentry_sdk._types import EventProcessor, Event
try:
from bottle import (
Bottle,
Route,
request as bottle_request,
HTTPResponse,
__version__ as BOTTLE_VERSION,
)
except ImportError:
raise DidNotEnable("Bottle not installed")
TRANSACTION_STYLE_VALUES = ("endpoint", "url")
class BottleIntegration(Integration):
identifier = "bottle"
transaction_style = ""
def __init__(self, transaction_style="endpoint"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, BOTTLE_VERSION.replace("-dev", "").split(".")))
except (TypeError, ValueError):
raise DidNotEnable("Unparsable Bottle version: {}".format(version))
if version < (0, 12):
raise DidNotEnable("Bottle 0.12 or newer required.")
# monkey patch method Bottle.__call__
old_app = Bottle.__call__
def sentry_patched_wsgi_app(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
hub = Hub.current
integration = hub.get_integration(BottleIntegration)
if integration is None:
return old_app(self, environ, start_response)
return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
environ, start_response
)
Bottle.__call__ = sentry_patched_wsgi_app
# monkey patch method Bottle._handle
old_handle = Bottle._handle
def _patched_handle(self, environ):
# type: (Bottle, Dict[str, Any]) -> Any
hub = Hub.current
integration = hub.get_integration(BottleIntegration)
if integration is None:
return old_handle(self, environ)
# create new scope
scope_manager = hub.push_scope()
with scope_manager:
app = self
with hub.configure_scope() as scope:
scope._name = "bottle"
scope.add_event_processor(
_make_request_event_processor(app, bottle_request, integration)
)
res = old_handle(self, environ)
# scope cleanup
return res
Bottle._handle = _patched_handle
# monkey patch method Route._make_callback
old_make_callback = Route._make_callback
def patched_make_callback(self, *args, **kwargs):
# type: (Route, *object, **object) -> Any
hub = Hub.current
integration = hub.get_integration(BottleIntegration)
prepared_callback = old_make_callback(self, *args, **kwargs)
if integration is None:
return prepared_callback
# If an integration is there, a client has to be there.
client = hub.client # type: Any
def wrapped_callback(*args, **kwargs):
# type: (*object, **object) -> Any
try:
res = prepared_callback(*args, **kwargs)
except HTTPResponse:
raise
except Exception as exception:
event, hint = event_from_exception(
exception,
client_options=client.options,
mechanism={"type": "bottle", "handled": False},
)
hub.capture_event(event, hint=hint)
raise exception
return res
return wrapped_callback
Route._make_callback = patched_make_callback
class BottleRequestExtractor(RequestExtractor):
def env(self):
# type: () -> Dict[str, str]
return self.request.environ
def cookies(self):
# type: () -> Dict[str, str]
return self.request.cookies
def raw_data(self):
# type: () -> bytes
return self.request.body.read()
def form(self):
# type: () -> FormsDict
if self.is_json():
return None
return self.request.forms.decode()
def files(self):
# type: () -> Optional[Dict[str, str]]
if self.is_json():
return None
return self.request.files
def size_of_file(self, file):
# type: (FileUpload) -> int
return file.content_length
def _set_transaction_name_and_source(event, transaction_style, request):
# type: (Event, str, Any) -> None
name = ""
if transaction_style == "url":
name = request.route.rule or ""
elif transaction_style == "endpoint":
name = (
request.route.name
or transaction_from_function(request.route.callback)
or ""
)
event["transaction"] = name
event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
def _make_request_event_processor(app, request, integration):
# type: (Bottle, LocalRequest, BottleIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
_set_transaction_name_and_source(event, integration.transaction_style, request)
with capture_internal_exceptions():
BottleRequestExtractor(request).extract_into_event(event)
return event
return event_processor
| 6,488 | Python | 29.753554 | 87 | 0.582922 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/rq.py | from __future__ import absolute_import
import weakref
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.tracing import Transaction, TRANSACTION_SOURCE_TASK
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
format_timestamp,
)
try:
from rq.queue import Queue
from rq.timeouts import JobTimeoutException
from rq.version import VERSION as RQ_VERSION
from rq.worker import Worker
except ImportError:
raise DidNotEnable("RQ not installed")
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any, Callable, Dict
from sentry_sdk._types import EventProcessor
from sentry_sdk.utils import ExcInfo
from rq.job import Job
class RqIntegration(Integration):
identifier = "rq"
@staticmethod
def setup_once():
# type: () -> None
try:
version = tuple(map(int, RQ_VERSION.split(".")[:3]))
except (ValueError, TypeError):
raise DidNotEnable("Unparsable RQ version: {}".format(RQ_VERSION))
if version < (0, 6):
raise DidNotEnable("RQ 0.6 or newer is required.")
old_perform_job = Worker.perform_job
def sentry_patched_perform_job(self, job, *args, **kwargs):
# type: (Any, Job, *Queue, **Any) -> bool
hub = Hub.current
integration = hub.get_integration(RqIntegration)
if integration is None:
return old_perform_job(self, job, *args, **kwargs)
client = hub.client
assert client is not None
with hub.push_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_event_processor(weakref.ref(job)))
transaction = Transaction.continue_from_headers(
job.meta.get("_sentry_trace_headers") or {},
op=OP.QUEUE_TASK_RQ,
name="unknown RQ task",
source=TRANSACTION_SOURCE_TASK,
)
with capture_internal_exceptions():
transaction.name = job.func_name
with hub.start_transaction(
transaction, custom_sampling_context={"rq_job": job}
):
rv = old_perform_job(self, job, *args, **kwargs)
if self.is_horse:
# We're inside of a forked process and RQ is
# about to call `os._exit`. Make sure that our
# events get sent out.
client.flush()
return rv
Worker.perform_job = sentry_patched_perform_job
old_handle_exception = Worker.handle_exception
def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
# type: (Worker, Any, *Any, **Any) -> Any
if job.is_failed:
_capture_exception(exc_info) # type: ignore
return old_handle_exception(self, job, *exc_info, **kwargs)
Worker.handle_exception = sentry_patched_handle_exception
old_enqueue_job = Queue.enqueue_job
def sentry_patched_enqueue_job(self, job, **kwargs):
# type: (Queue, Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(RqIntegration) is not None:
job.meta["_sentry_trace_headers"] = dict(
hub.iter_trace_propagation_headers()
)
return old_enqueue_job(self, job, **kwargs)
Queue.enqueue_job = sentry_patched_enqueue_job
ignore_logger("rq.worker")
def _make_event_processor(weak_job):
# type: (Callable[[], Job]) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
job = weak_job()
if job is not None:
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
extra["rq-job"] = {
"job_id": job.id,
"func": job.func_name,
"args": job.args,
"kwargs": job.kwargs,
"description": job.description,
}
if job.enqueued_at:
extra["rq-job"]["enqueued_at"] = format_timestamp(job.enqueued_at)
if job.started_at:
extra["rq-job"]["started_at"] = format_timestamp(job.started_at)
if "exc_info" in hint:
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], JobTimeoutException):
event["fingerprint"] = ["rq", "JobTimeoutException", job.func_name]
return event
return event_processor
def _capture_exception(exc_info, **kwargs):
# type: (ExcInfo, **Any) -> None
hub = Hub.current
if hub.get_integration(RqIntegration) is None:
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "rq", "handled": False},
)
hub.capture_event(event, hint=hint)
| 5,350 | Python | 31.041916 | 87 | 0.571963 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/logging.py | from __future__ import absolute_import
import logging
import datetime
from fnmatch import fnmatch
from sentry_sdk.hub import Hub
from sentry_sdk.utils import (
to_string,
event_from_exception,
current_stacktrace,
capture_internal_exceptions,
)
from sentry_sdk.integrations import Integration
from sentry_sdk._compat import iteritems
from sentry_sdk._types import MYPY
if MYPY:
from logging import LogRecord
from typing import Any
from typing import Dict
from typing import Optional
DEFAULT_LEVEL = logging.INFO
DEFAULT_EVENT_LEVEL = logging.ERROR
LOGGING_TO_EVENT_LEVEL = {
logging.NOTSET: "notset",
logging.DEBUG: "debug",
logging.INFO: "info",
logging.WARN: "warning", # WARN is same a WARNING
logging.WARNING: "warning",
logging.ERROR: "error",
logging.FATAL: "fatal",
logging.CRITICAL: "fatal", # CRITICAL is same as FATAL
}
# Capturing events from those loggers causes recursion errors. We cannot allow
# the user to unconditionally create events from those loggers under any
# circumstances.
#
# Note: Ignoring by logger name here is better than mucking with thread-locals.
# We do not necessarily know whether thread-locals work 100% correctly in the user's environment.
_IGNORED_LOGGERS = set(
["sentry_sdk.errors", "urllib3.connectionpool", "urllib3.connection"]
)
def ignore_logger(
name, # type: str
):
# type: (...) -> None
"""This disables recording (both in breadcrumbs and as events) calls to
a logger of a specific name. Among other uses, many of our integrations
use this to prevent their actions being recorded as breadcrumbs. Exposed
to users as a way to quiet spammy loggers.
:param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
"""
_IGNORED_LOGGERS.add(name)
class LoggingIntegration(Integration):
identifier = "logging"
def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
# type: (Optional[int], Optional[int]) -> None
self._handler = None
self._breadcrumb_handler = None
if level is not None:
self._breadcrumb_handler = BreadcrumbHandler(level=level)
if event_level is not None:
self._handler = EventHandler(level=event_level)
def _handle_record(self, record):
# type: (LogRecord) -> None
if self._handler is not None and record.levelno >= self._handler.level:
self._handler.handle(record)
if (
self._breadcrumb_handler is not None
and record.levelno >= self._breadcrumb_handler.level
):
self._breadcrumb_handler.handle(record)
@staticmethod
def setup_once():
# type: () -> None
old_callhandlers = logging.Logger.callHandlers
def sentry_patched_callhandlers(self, record):
# type: (Any, LogRecord) -> Any
try:
return old_callhandlers(self, record)
finally:
# This check is done twice, once also here before we even get
# the integration. Otherwise we have a high chance of getting
# into a recursion error when the integration is resolved
# (this also is slower).
if record.name not in _IGNORED_LOGGERS:
integration = Hub.current.get_integration(LoggingIntegration)
if integration is not None:
integration._handle_record(record)
logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
def _can_record(record):
# type: (LogRecord) -> bool
"""Prevents ignored loggers from recording"""
for logger in _IGNORED_LOGGERS:
if fnmatch(record.name, logger):
return False
return True
def _breadcrumb_from_record(record):
# type: (LogRecord) -> Dict[str, Any]
return {
"type": "log",
"level": _logging_to_event_level(record),
"category": record.name,
"message": record.message,
"timestamp": datetime.datetime.utcfromtimestamp(record.created),
"data": _extra_from_record(record),
}
def _logging_to_event_level(record):
# type: (LogRecord) -> str
return LOGGING_TO_EVENT_LEVEL.get(
record.levelno, record.levelname.lower() if record.levelname else ""
)
COMMON_RECORD_ATTRS = frozenset(
(
"args",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"linenno",
"lineno",
"message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack",
"tags",
"thread",
"threadName",
"stack_info",
)
)
def _extra_from_record(record):
# type: (LogRecord) -> Dict[str, None]
return {
k: v
for k, v in iteritems(vars(record))
if k not in COMMON_RECORD_ATTRS
and (not isinstance(k, str) or not k.startswith("_"))
}
class EventHandler(logging.Handler, object):
"""
A logging handler that emits Sentry events for each log record
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
#
# exc_info may also be any falsy value due to Python stdlib being
# liberal with what it receives and Celery's billiard being "liberal"
# with what it sends. See
# https://github.com/getsentry/sentry-python/issues/904
if record.exc_info and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
client_options["with_locals"]
),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record)
event["logger"] = record.name
# Log records from `warnings` module as separate issues
record_caputured_from_warnings_module = (
record.name == "py.warnings" and record.msg == "%s"
)
if record_caputured_from_warnings_module:
# use the actual message and not "%s" as the message
# this prevents grouping all warnings under one "%s" issue
msg = record.args[0] # type: ignore
event["logentry"] = {
"message": msg,
"params": (),
}
else:
event["logentry"] = {
"message": to_string(record.msg),
"params": record.args,
}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
# Legacy name
SentryHandler = EventHandler
class BreadcrumbHandler(logging.Handler, object):
"""
A logging handler that records breadcrumbs for each log record.
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
Hub.current.add_breadcrumb(
_breadcrumb_from_record(record), hint={"log_record": record}
)
| 8,656 | Python | 29.059028 | 110 | 0.581331 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/beam.py | from __future__ import absolute_import
import sys
import types
from sentry_sdk._functools import wraps
from sentry_sdk.hub import Hub
from sentry_sdk._compat import reraise
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Iterator
from typing import TypeVar
from typing import Optional
from typing import Callable
from sentry_sdk.client import Client
from sentry_sdk._types import ExcInfo
T = TypeVar("T")
F = TypeVar("F", bound=Callable[..., Any])
WRAPPED_FUNC = "_wrapped_{}_"
INSPECT_FUNC = "_inspect_{}" # Required format per apache_beam/transforms/core.py
USED_FUNC = "_sentry_used_"
class BeamIntegration(Integration):
identifier = "beam"
@staticmethod
def setup_once():
# type: () -> None
from apache_beam.transforms.core import DoFn, ParDo # type: ignore
ignore_logger("root")
ignore_logger("bundle_processor.create")
function_patches = ["process", "start_bundle", "finish_bundle", "setup"]
for func_name in function_patches:
setattr(
DoFn,
INSPECT_FUNC.format(func_name),
_wrap_inspect_call(DoFn, func_name),
)
old_init = ParDo.__init__
def sentry_init_pardo(self, fn, *args, **kwargs):
# type: (ParDo, Any, *Any, **Any) -> Any
# Do not monkey patch init twice
if not getattr(self, "_sentry_is_patched", False):
for func_name in function_patches:
if not hasattr(fn, func_name):
continue
wrapped_func = WRAPPED_FUNC.format(func_name)
# Check to see if inspect is set and process is not
# to avoid monkey patching process twice.
# Check to see if function is part of object for
# backwards compatibility.
process_func = getattr(fn, func_name)
inspect_func = getattr(fn, INSPECT_FUNC.format(func_name))
if not getattr(inspect_func, USED_FUNC, False) and not getattr(
process_func, USED_FUNC, False
):
setattr(fn, wrapped_func, process_func)
setattr(fn, func_name, _wrap_task_call(process_func))
self._sentry_is_patched = True
old_init(self, fn, *args, **kwargs)
ParDo.__init__ = sentry_init_pardo
def _wrap_inspect_call(cls, func_name):
# type: (Any, Any) -> Any
if not hasattr(cls, func_name):
return None
def _inspect(self):
# type: (Any) -> Any
"""
Inspect function overrides the way Beam gets argspec.
"""
wrapped_func = WRAPPED_FUNC.format(func_name)
if hasattr(self, wrapped_func):
process_func = getattr(self, wrapped_func)
else:
process_func = getattr(self, func_name)
setattr(self, func_name, _wrap_task_call(process_func))
setattr(self, wrapped_func, process_func)
# getfullargspec is deprecated in more recent beam versions and get_function_args_defaults
# (which uses Signatures internally) should be used instead.
try:
from apache_beam.transforms.core import get_function_args_defaults
return get_function_args_defaults(process_func)
except ImportError:
from apache_beam.typehints.decorators import getfullargspec # type: ignore
return getfullargspec(process_func)
setattr(_inspect, USED_FUNC, True)
return _inspect
def _wrap_task_call(func):
# type: (F) -> F
"""
Wrap task call with a try catch to get exceptions.
Pass the client on to raise_exception so it can get rebinded.
"""
client = Hub.current.client
@wraps(func)
def _inner(*args, **kwargs):
# type: (*Any, **Any) -> Any
try:
gen = func(*args, **kwargs)
except Exception:
raise_exception(client)
if not isinstance(gen, types.GeneratorType):
return gen
return _wrap_generator_call(gen, client)
setattr(_inner, USED_FUNC, True)
return _inner # type: ignore
def _capture_exception(exc_info, hub):
# type: (ExcInfo, Hub) -> None
"""
Send Beam exception to Sentry.
"""
integration = hub.get_integration(BeamIntegration)
if integration is None:
return
client = hub.client
if client is None:
return
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "beam", "handled": False},
)
hub.capture_event(event, hint=hint)
def raise_exception(client):
# type: (Optional[Client]) -> None
"""
Raise an exception. If the client is not in the hub, rebind it.
"""
hub = Hub.current
if hub.client is None:
hub.bind_client(client)
exc_info = sys.exc_info()
with capture_internal_exceptions():
_capture_exception(exc_info, hub)
reraise(*exc_info)
def _wrap_generator_call(gen, client):
# type: (Iterator[T], Optional[Client]) -> Iterator[T]
"""
Wrap the generator to handle any failures.
"""
while True:
try:
yield next(gen)
except StopIteration:
break
except Exception:
raise_exception(client)
| 5,671 | Python | 29.494623 | 98 | 0.596015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/_wsgi_common.py | import json
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import AnnotatedValue
from sentry_sdk._compat import text_type, iteritems
from sentry_sdk._types import MYPY
if MYPY:
import sentry_sdk
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
SENSITIVE_ENV_KEYS = (
"REMOTE_ADDR",
"HTTP_X_FORWARDED_FOR",
"HTTP_SET_COOKIE",
"HTTP_COOKIE",
"HTTP_AUTHORIZATION",
"HTTP_X_API_KEY",
"HTTP_X_FORWARDED_FOR",
"HTTP_X_REAL_IP",
)
SENSITIVE_HEADERS = tuple(
x[len("HTTP_") :] for x in SENSITIVE_ENV_KEYS if x.startswith("HTTP_")
)
def request_body_within_bounds(client, content_length):
# type: (Optional[sentry_sdk.Client], int) -> bool
if client is None:
return False
bodies = client.options["request_bodies"]
return not (
bodies == "never"
or (bodies == "small" and content_length > 10**3)
or (bodies == "medium" and content_length > 10**4)
)
class RequestExtractor(object):
def __init__(self, request):
# type: (Any) -> None
self.request = request
def extract_into_event(self, event):
# type: (Dict[str, Any]) -> None
client = Hub.current.client
if client is None:
return
data = None # type: Optional[Union[AnnotatedValue, Dict[str, Any]]]
content_length = self.content_length()
request_info = event.get("request", {})
if _should_send_default_pii():
request_info["cookies"] = dict(self.cookies())
if not request_body_within_bounds(client, content_length):
data = AnnotatedValue.removed_because_over_size_limit()
else:
parsed_body = self.parsed_body()
if parsed_body is not None:
data = parsed_body
elif self.raw_data():
data = AnnotatedValue.removed_because_raw_data()
else:
data = None
if data is not None:
request_info["data"] = data
event["request"] = request_info
def content_length(self):
# type: () -> int
try:
return int(self.env().get("CONTENT_LENGTH", 0))
except ValueError:
return 0
def cookies(self):
# type: () -> Dict[str, Any]
raise NotImplementedError()
def raw_data(self):
# type: () -> Optional[Union[str, bytes]]
raise NotImplementedError()
def form(self):
# type: () -> Optional[Dict[str, Any]]
raise NotImplementedError()
def parsed_body(self):
# type: () -> Optional[Dict[str, Any]]
form = self.form()
files = self.files()
if form or files:
data = dict(iteritems(form))
for key, _ in iteritems(files):
data[key] = AnnotatedValue.removed_because_raw_data()
return data
return self.json()
def is_json(self):
# type: () -> bool
return _is_json_content_type(self.env().get("CONTENT_TYPE"))
def json(self):
# type: () -> Optional[Any]
try:
if not self.is_json():
return None
raw_data = self.raw_data()
if raw_data is None:
return None
if isinstance(raw_data, text_type):
return json.loads(raw_data)
else:
return json.loads(raw_data.decode("utf-8"))
except ValueError:
pass
return None
def files(self):
# type: () -> Optional[Dict[str, Any]]
raise NotImplementedError()
def size_of_file(self, file):
# type: (Any) -> int
raise NotImplementedError()
def env(self):
# type: () -> Dict[str, Any]
raise NotImplementedError()
def _is_json_content_type(ct):
# type: (Optional[str]) -> bool
mt = (ct or "").split(";", 1)[0]
return (
mt == "application/json"
or (mt.startswith("application/"))
and mt.endswith("+json")
)
def _filter_headers(headers):
# type: (Dict[str, str]) -> Dict[str, str]
if _should_send_default_pii():
return headers
return {
k: (
v
if k.upper().replace("-", "_") not in SENSITIVE_HEADERS
else AnnotatedValue.removed_because_over_size_limit()
)
for k, v in iteritems(headers)
}
| 4,476 | Python | 24.878613 | 76 | 0.552949 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/sanic.py | import sys
import weakref
from inspect import isawaitable
from sentry_sdk._compat import urlparse, reraise
from sentry_sdk.hub import Hub
from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Callable
from typing import Optional
from typing import Union
from typing import Tuple
from typing import Dict
from sanic.request import Request, RequestParameters
from sentry_sdk._types import Event, EventProcessor, Hint
from sanic.router import Route
try:
from sanic import Sanic, __version__ as SANIC_VERSION
from sanic.exceptions import SanicException
from sanic.router import Router
from sanic.handlers import ErrorHandler
except ImportError:
raise DidNotEnable("Sanic not installed")
old_error_handler_lookup = ErrorHandler.lookup
old_handle_request = Sanic.handle_request
old_router_get = Router.get
try:
# This method was introduced in Sanic v21.9
old_startup = Sanic._startup
except AttributeError:
pass
class SanicIntegration(Integration):
identifier = "sanic"
version = (0, 0) # type: Tuple[int, ...]
@staticmethod
def setup_once():
# type: () -> None
try:
SanicIntegration.version = tuple(map(int, SANIC_VERSION.split(".")))
except (TypeError, ValueError):
raise DidNotEnable("Unparsable Sanic version: {}".format(SANIC_VERSION))
if SanicIntegration.version < (0, 8):
raise DidNotEnable("Sanic 0.8 or newer required.")
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise DidNotEnable(
"The sanic integration for Sentry requires Python 3.7+ "
" or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
if SANIC_VERSION.startswith("0.8."):
# Sanic 0.8 and older creates a logger named "root" and puts a
# stringified version of every exception in there (without exc_info),
# which our error deduplication can't detect.
#
# We explicitly check the version here because it is a very
# invasive step to ignore this logger and not necessary in newer
# versions at all.
#
# https://github.com/huge-success/sanic/issues/1332
ignore_logger("root")
if SanicIntegration.version < (21, 9):
_setup_legacy_sanic()
return
_setup_sanic()
class SanicRequestExtractor(RequestExtractor):
def content_length(self):
# type: () -> int
if self.request.body is None:
return 0
return len(self.request.body)
def cookies(self):
# type: () -> Dict[str, str]
return dict(self.request.cookies)
def raw_data(self):
# type: () -> bytes
return self.request.body
def form(self):
# type: () -> RequestParameters
return self.request.form
def is_json(self):
# type: () -> bool
raise NotImplementedError()
def json(self):
# type: () -> Optional[Any]
return self.request.json
def files(self):
# type: () -> RequestParameters
return self.request.files
def size_of_file(self, file):
# type: (Any) -> int
return len(file.body or ())
def _setup_sanic():
# type: () -> None
Sanic._startup = _startup
ErrorHandler.lookup = _sentry_error_handler_lookup
def _setup_legacy_sanic():
# type: () -> None
Sanic.handle_request = _legacy_handle_request
Router.get = _legacy_router_get
ErrorHandler.lookup = _sentry_error_handler_lookup
async def _startup(self):
# type: (Sanic) -> None
# This happens about as early in the lifecycle as possible, just after the
# Request object is created. The body has not yet been consumed.
self.signal("http.lifecycle.request")(_hub_enter)
# This happens after the handler is complete. In v21.9 this signal is not
# dispatched when there is an exception. Therefore we need to close out
# and call _hub_exit from the custom exception handler as well.
# See https://github.com/sanic-org/sanic/issues/2297
self.signal("http.lifecycle.response")(_hub_exit)
# This happens inside of request handling immediately after the route
# has been identified by the router.
self.signal("http.routing.after")(_set_transaction)
# The above signals need to be declared before this can be called.
await old_startup(self)
async def _hub_enter(request):
# type: (Request) -> None
hub = Hub.current
request.ctx._sentry_do_integration = (
hub.get_integration(SanicIntegration) is not None
)
if not request.ctx._sentry_do_integration:
return
weak_request = weakref.ref(request)
request.ctx._sentry_hub = Hub(hub)
request.ctx._sentry_hub.__enter__()
with request.ctx._sentry_hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
async def _hub_exit(request, **_):
# type: (Request, **Any) -> None
request.ctx._sentry_hub.__exit__(None, None, None)
async def _set_transaction(request, route, **kwargs):
# type: (Request, Route, **Any) -> None
hub = Hub.current
if hub.get_integration(SanicIntegration) is not None:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
route_name = route.name.replace(request.app.name, "").strip(".")
scope.set_transaction_name(
route_name, source=TRANSACTION_SOURCE_COMPONENT
)
def _sentry_error_handler_lookup(self, exception, *args, **kwargs):
# type: (Any, Exception, *Any, **Any) -> Optional[object]
_capture_exception(exception)
old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)
if old_error_handler is None:
return None
if Hub.current.get_integration(SanicIntegration) is None:
return old_error_handler
async def sentry_wrapped_error_handler(request, exception):
# type: (Request, Exception) -> Any
try:
response = old_error_handler(request, exception)
if isawaitable(response):
response = await response
return response
except Exception:
# Report errors that occur in Sanic error handler. These
# exceptions will not even show up in Sanic's
# `sanic.exceptions` logger.
exc_info = sys.exc_info()
_capture_exception(exc_info)
reraise(*exc_info)
finally:
# As mentioned in previous comment in _startup, this can be removed
# after https://github.com/sanic-org/sanic/issues/2297 is resolved
if SanicIntegration.version == (21, 9):
await _hub_exit(request)
return sentry_wrapped_error_handler
async def _legacy_handle_request(self, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(SanicIntegration) is None:
return old_handle_request(self, request, *args, **kwargs)
weak_request = weakref.ref(request)
with Hub(hub) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
response = old_handle_request(self, request, *args, **kwargs)
if isawaitable(response):
response = await response
return response
def _legacy_router_get(self, *args):
# type: (Any, Union[Any, Request]) -> Any
rv = old_router_get(self, *args)
hub = Hub.current
if hub.get_integration(SanicIntegration) is not None:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
if SanicIntegration.version and SanicIntegration.version >= (21, 3):
# Sanic versions above and including 21.3 append the app name to the
# route name, and so we need to remove it from Route name so the
# transaction name is consistent across all versions
sanic_app_name = self.ctx.app.name
sanic_route = rv[0].name
if sanic_route.startswith("%s." % sanic_app_name):
# We add a 1 to the len of the sanic_app_name because there is a dot
# that joins app name and the route name
# Format: app_name.route_name
sanic_route = sanic_route[len(sanic_app_name) + 1 :]
scope.set_transaction_name(
sanic_route, source=TRANSACTION_SOURCE_COMPONENT
)
else:
scope.set_transaction_name(
rv[0].__name__, source=TRANSACTION_SOURCE_COMPONENT
)
return rv
def _capture_exception(exception):
# type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None
hub = Hub.current
integration = hub.get_integration(SanicIntegration)
if integration is None:
return
# If an integration is there, a client has to be there.
client = hub.client # type: Any
with capture_internal_exceptions():
event, hint = event_from_exception(
exception,
client_options=client.options,
mechanism={"type": "sanic", "handled": False},
)
hub.capture_event(event, hint=hint)
def _make_request_processor(weak_request):
# type: (Callable[[], Request]) -> EventProcessor
def sanic_processor(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
try:
if hint and issubclass(hint["exc_info"][0], SanicException):
return None
except KeyError:
pass
request = weak_request()
if request is None:
return event
with capture_internal_exceptions():
extractor = SanicRequestExtractor(request)
extractor.extract_into_event(event)
request_info = event["request"]
urlparts = urlparse.urlsplit(request.url)
request_info["url"] = "%s://%s%s" % (
urlparts.scheme,
urlparts.netloc,
urlparts.path,
)
request_info["query_string"] = urlparts.query
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
request_info["headers"] = _filter_headers(dict(request.headers))
return event
return sanic_processor
| 11,311 | Python | 32.270588 | 95 | 0.618867 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/opentelemetry/__init__.py | from sentry_sdk.integrations.opentelemetry.span_processor import ( # noqa: F401
SentrySpanProcessor,
)
from sentry_sdk.integrations.opentelemetry.propagator import ( # noqa: F401
SentryPropagator,
)
| 210 | Python | 25.374997 | 80 | 0.771429 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/opentelemetry/propagator.py | from opentelemetry import trace # type: ignore
from opentelemetry.context import ( # type: ignore
Context,
get_current,
set_value,
)
from opentelemetry.propagators.textmap import ( # type: ignore
CarrierT,
Getter,
Setter,
TextMapPropagator,
default_getter,
default_setter,
)
from opentelemetry.trace import ( # type: ignore
TraceFlags,
NonRecordingSpan,
SpanContext,
)
from sentry_sdk.integrations.opentelemetry.consts import (
SENTRY_BAGGAGE_KEY,
SENTRY_TRACE_KEY,
)
from sentry_sdk.integrations.opentelemetry.span_processor import (
SentrySpanProcessor,
)
from sentry_sdk.tracing import (
BAGGAGE_HEADER_NAME,
SENTRY_TRACE_HEADER_NAME,
)
from sentry_sdk.tracing_utils import Baggage, extract_sentrytrace_data
from sentry_sdk._types import MYPY
if MYPY:
from typing import Optional
from typing import Set
class SentryPropagator(TextMapPropagator): # type: ignore
"""
Propagates tracing headers for Sentry's tracing system in a way OTel understands.
"""
def extract(self, carrier, context=None, getter=default_getter):
# type: (CarrierT, Optional[Context], Getter) -> Context
if context is None:
context = get_current()
sentry_trace = getter.get(carrier, SENTRY_TRACE_HEADER_NAME)
if not sentry_trace:
return context
sentrytrace = extract_sentrytrace_data(sentry_trace[0])
if not sentrytrace:
return context
context = set_value(SENTRY_TRACE_KEY, sentrytrace, context)
trace_id, span_id = sentrytrace["trace_id"], sentrytrace["parent_span_id"]
span_context = SpanContext(
trace_id=int(trace_id, 16), # type: ignore
span_id=int(span_id, 16), # type: ignore
# we simulate a sampled trace on the otel side and leave the sampling to sentry
trace_flags=TraceFlags(TraceFlags.SAMPLED),
is_remote=True,
)
baggage_header = getter.get(carrier, BAGGAGE_HEADER_NAME)
if baggage_header:
baggage = Baggage.from_incoming_header(baggage_header[0])
else:
# If there's an incoming sentry-trace but no incoming baggage header,
# for instance in traces coming from older SDKs,
# baggage will be empty and frozen and won't be populated as head SDK.
baggage = Baggage(sentry_items={})
baggage.freeze()
context = set_value(SENTRY_BAGGAGE_KEY, baggage, context)
span = NonRecordingSpan(span_context)
modified_context = trace.set_span_in_context(span, context)
return modified_context
def inject(self, carrier, context=None, setter=default_setter):
# type: (CarrierT, Optional[Context], Setter) -> None
if context is None:
context = get_current()
current_span = trace.get_current_span(context)
if not current_span.context.is_valid:
return
span_id = trace.format_span_id(current_span.context.span_id)
span_map = SentrySpanProcessor().otel_span_map
sentry_span = span_map.get(span_id, None)
if not sentry_span:
return
setter.set(carrier, SENTRY_TRACE_HEADER_NAME, sentry_span.to_traceparent())
baggage = sentry_span.containing_transaction.get_baggage()
if baggage:
setter.set(carrier, BAGGAGE_HEADER_NAME, baggage.serialize())
@property
def fields(self):
# type: () -> Set[str]
return {SENTRY_TRACE_HEADER_NAME, BAGGAGE_HEADER_NAME}
| 3,591 | Python | 30.508772 | 91 | 0.651072 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/opentelemetry/consts.py | from opentelemetry.context import ( # type: ignore
create_key,
)
SENTRY_TRACE_KEY = create_key("sentry-trace")
SENTRY_BAGGAGE_KEY = create_key("sentry-baggage")
| 167 | Python | 22.999997 | 51 | 0.724551 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/opentelemetry/span_processor.py | from datetime import datetime
from opentelemetry.context import get_value # type: ignore
from opentelemetry.sdk.trace import SpanProcessor # type: ignore
from opentelemetry.semconv.trace import SpanAttributes # type: ignore
from opentelemetry.trace import ( # type: ignore
format_span_id,
format_trace_id,
get_current_span,
SpanContext,
Span as OTelSpan,
SpanKind,
)
from opentelemetry.trace.span import ( # type: ignore
INVALID_SPAN_ID,
INVALID_TRACE_ID,
)
from sentry_sdk.consts import INSTRUMENTER
from sentry_sdk.hub import Hub
from sentry_sdk.integrations.opentelemetry.consts import (
SENTRY_BAGGAGE_KEY,
SENTRY_TRACE_KEY,
)
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.tracing import Transaction, Span as SentrySpan
from sentry_sdk.utils import Dsn
from sentry_sdk._types import MYPY
from urllib3.util import parse_url as urlparse # type: ignore
if MYPY:
from typing import Any
from typing import Dict
from typing import Union
from sentry_sdk._types import Event, Hint
OPEN_TELEMETRY_CONTEXT = "otel"
def link_trace_context_to_error_event(event, otel_span_map):
# type: (Event, Dict[str, Union[Transaction, OTelSpan]]) -> Event
hub = Hub.current
if not hub:
return event
if hub.client and hub.client.options["instrumenter"] != INSTRUMENTER.OTEL:
return event
if hasattr(event, "type") and event["type"] == "transaction":
return event
otel_span = get_current_span()
if not otel_span:
return event
ctx = otel_span.get_span_context()
trace_id = format_trace_id(ctx.trace_id)
span_id = format_span_id(ctx.span_id)
if trace_id == INVALID_TRACE_ID or span_id == INVALID_SPAN_ID:
return event
sentry_span = otel_span_map.get(span_id, None)
if not sentry_span:
return event
contexts = event.setdefault("contexts", {})
contexts.setdefault("trace", {}).update(sentry_span.get_trace_context())
return event
class SentrySpanProcessor(SpanProcessor): # type: ignore
"""
Converts OTel spans into Sentry spans so they can be sent to the Sentry backend.
"""
# The mapping from otel span ids to sentry spans
otel_span_map = {} # type: Dict[str, Union[Transaction, OTelSpan]]
def __new__(cls):
# type: () -> SentrySpanProcessor
if not hasattr(cls, "instance"):
cls.instance = super(SentrySpanProcessor, cls).__new__(cls)
return cls.instance
def __init__(self):
# type: () -> None
@add_global_event_processor
def global_event_processor(event, hint):
# type: (Event, Hint) -> Event
return link_trace_context_to_error_event(event, self.otel_span_map)
def on_start(self, otel_span, parent_context=None):
# type: (OTelSpan, SpanContext) -> None
hub = Hub.current
if not hub:
return
if not hub.client or (hub.client and not hub.client.dsn):
return
try:
_ = Dsn(hub.client.dsn or "")
except Exception:
return
if hub.client and hub.client.options["instrumenter"] != INSTRUMENTER.OTEL:
return
if not otel_span.context.is_valid:
return
if self._is_sentry_span(hub, otel_span):
return
trace_data = self._get_trace_data(otel_span, parent_context)
parent_span_id = trace_data["parent_span_id"]
sentry_parent_span = (
self.otel_span_map.get(parent_span_id, None) if parent_span_id else None
)
sentry_span = None
if sentry_parent_span:
sentry_span = sentry_parent_span.start_child(
span_id=trace_data["span_id"],
description=otel_span.name,
start_timestamp=datetime.fromtimestamp(otel_span.start_time / 1e9),
instrumenter=INSTRUMENTER.OTEL,
)
else:
sentry_span = hub.start_transaction(
name=otel_span.name,
span_id=trace_data["span_id"],
parent_span_id=parent_span_id,
trace_id=trace_data["trace_id"],
baggage=trace_data["baggage"],
start_timestamp=datetime.fromtimestamp(otel_span.start_time / 1e9),
instrumenter=INSTRUMENTER.OTEL,
)
self.otel_span_map[trace_data["span_id"]] = sentry_span
def on_end(self, otel_span):
# type: (OTelSpan) -> None
hub = Hub.current
if not hub:
return
if hub.client and hub.client.options["instrumenter"] != INSTRUMENTER.OTEL:
return
if not otel_span.context.is_valid:
return
span_id = format_span_id(otel_span.context.span_id)
sentry_span = self.otel_span_map.pop(span_id, None)
if not sentry_span:
return
sentry_span.op = otel_span.name
if isinstance(sentry_span, Transaction):
sentry_span.name = otel_span.name
sentry_span.set_context(
OPEN_TELEMETRY_CONTEXT, self._get_otel_context(otel_span)
)
else:
self._update_span_with_otel_data(sentry_span, otel_span)
sentry_span.finish(
end_timestamp=datetime.fromtimestamp(otel_span.end_time / 1e9)
)
def _is_sentry_span(self, hub, otel_span):
# type: (Hub, OTelSpan) -> bool
"""
Break infinite loop:
HTTP requests to Sentry are caught by OTel and send again to Sentry.
"""
otel_span_url = otel_span.attributes.get(SpanAttributes.HTTP_URL, None)
dsn_url = hub.client and Dsn(hub.client.dsn or "").netloc
if otel_span_url and dsn_url in otel_span_url:
return True
return False
def _get_otel_context(self, otel_span):
# type: (OTelSpan) -> Dict[str, Any]
"""
Returns the OTel context for Sentry.
See: https://develop.sentry.dev/sdk/performance/opentelemetry/#step-5-add-opentelemetry-context
"""
ctx = {}
if otel_span.attributes:
ctx["attributes"] = dict(otel_span.attributes)
if otel_span.resource.attributes:
ctx["resource"] = dict(otel_span.resource.attributes)
return ctx
def _get_trace_data(self, otel_span, parent_context):
# type: (OTelSpan, SpanContext) -> Dict[str, Any]
"""
Extracts tracing information from one OTel span and its parent OTel context.
"""
trace_data = {}
span_id = format_span_id(otel_span.context.span_id)
trace_data["span_id"] = span_id
trace_id = format_trace_id(otel_span.context.trace_id)
trace_data["trace_id"] = trace_id
parent_span_id = (
format_span_id(otel_span.parent.span_id) if otel_span.parent else None
)
trace_data["parent_span_id"] = parent_span_id
sentry_trace_data = get_value(SENTRY_TRACE_KEY, parent_context)
trace_data["parent_sampled"] = (
sentry_trace_data["parent_sampled"] if sentry_trace_data else None
)
baggage = get_value(SENTRY_BAGGAGE_KEY, parent_context)
trace_data["baggage"] = baggage
return trace_data
def _update_span_with_otel_data(self, sentry_span, otel_span):
# type: (SentrySpan, OTelSpan) -> None
"""
Convert OTel span data and update the Sentry span with it.
This should eventually happen on the server when ingesting the spans.
"""
for key, val in otel_span.attributes.items():
sentry_span.set_data(key, val)
sentry_span.set_data("otel.kind", otel_span.kind)
op = otel_span.name
description = otel_span.name
http_method = otel_span.attributes.get(SpanAttributes.HTTP_METHOD, None)
db_query = otel_span.attributes.get(SpanAttributes.DB_SYSTEM, None)
if http_method:
op = "http"
if otel_span.kind == SpanKind.SERVER:
op += ".server"
elif otel_span.kind == SpanKind.CLIENT:
op += ".client"
description = http_method
peer_name = otel_span.attributes.get(SpanAttributes.NET_PEER_NAME, None)
if peer_name:
description += " {}".format(peer_name)
target = otel_span.attributes.get(SpanAttributes.HTTP_TARGET, None)
if target:
description += " {}".format(target)
if not peer_name and not target:
url = otel_span.attributes.get(SpanAttributes.HTTP_URL, None)
if url:
parsed_url = urlparse(url)
url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
description += " {}".format(url)
status_code = otel_span.attributes.get(
SpanAttributes.HTTP_STATUS_CODE, None
)
if status_code:
sentry_span.set_http_status(status_code)
elif db_query:
op = "db"
statement = otel_span.attributes.get(SpanAttributes.DB_STATEMENT, None)
if statement:
description = statement
sentry_span.op = op
sentry_span.description = description
| 9,388 | Python | 31.154109 | 103 | 0.597784 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/asgi.py | """
Instrumentation for Django 3.0
Since this file contains `async def` it is conditionally imported in
`sentry_sdk.integrations.django` (depending on the existence of
`django.core.handlers.asgi`.
"""
import asyncio
import threading
from sentry_sdk import Hub, _functools
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
if MYPY:
from typing import Any
from typing import Union
from typing import Callable
from django.http.response import HttpResponse
def patch_django_asgi_handler_impl(cls):
# type: (Any) -> None
from sentry_sdk.integrations.django import DjangoIntegration
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, scope, receive, send):
# type: (Any, Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, scope, receive, send)
middleware = SentryAsgiMiddleware(
old_app.__get__(self, cls), unsafe_context_data=True
)._run_asgi3
return await middleware(scope, receive, send)
cls.__call__ = sentry_patched_asgi_handler
def patch_get_response_async(cls, _before_get_response):
# type: (Any, Any) -> None
old_get_response_async = cls.get_response_async
async def sentry_patched_get_response_async(self, request):
# type: (Any, Any) -> Union[HttpResponse, BaseException]
_before_get_response(request)
return await old_get_response_async(self, request)
cls.get_response_async = sentry_patched_get_response_async
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
import channels # type: ignore
from sentry_sdk.integrations.django import DjangoIntegration
if channels.__version__ < "3.0.0":
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
# type: (Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, receive, send)
middleware = SentryAsgiMiddleware(
lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
)
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
else:
# The ASGI handler in Channels >= 3 has the same signature as
# the Django handler.
patch_django_asgi_handler_impl(cls)
def wrap_async_view(hub, callback):
# type: (Hub, Any) -> Any
@_functools.wraps(callback)
async def sentry_wrapped_callback(request, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
with hub.configure_scope() as sentry_scope:
if sentry_scope.profile is not None:
sentry_scope.profile.active_thread_id = threading.current_thread().ident
with hub.start_span(
op=OP.VIEW_RENDER, description=request.resolver_match.view_name
):
return await callback(request, *args, **kwargs)
return sentry_wrapped_callback
def _asgi_middleware_mixin_factory(_check_middleware_span):
# type: (Callable[..., Any]) -> Any
"""
Mixin class factory that generates a middleware mixin for handling requests
in async mode.
"""
class SentryASGIMixin:
if MYPY:
_inner = None
def __init__(self, get_response):
# type: (Callable[..., Any]) -> None
self.get_response = get_response
self._acall_method = None
self._async_check()
def _async_check(self):
# type: () -> None
"""
If get_response is a coroutine function, turns us into async mode so
a thread is not consumed during a whole request.
Taken from django.utils.deprecation::MiddlewareMixin._async_check
"""
if asyncio.iscoroutinefunction(self.get_response):
self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore
def async_route_check(self):
# type: () -> bool
"""
Function that checks if we are in async mode,
and if we are forwards the handling of requests to __acall__
"""
return asyncio.iscoroutinefunction(self.get_response)
async def __acall__(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
f = self._acall_method
if f is None:
if hasattr(self._inner, "__acall__"):
self._acall_method = f = self._inner.__acall__ # type: ignore
else:
self._acall_method = f = self._inner
middleware_span = _check_middleware_span(old_method=f)
if middleware_span is None:
return await f(*args, **kwargs)
with middleware_span:
return await f(*args, **kwargs)
return SentryASGIMixin
| 5,054 | Python | 30.993671 | 88 | 0.608825 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/signals_handlers.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.dispatch import Signal
from sentry_sdk import Hub
from sentry_sdk._functools import wraps
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
if MYPY:
from typing import Any
from typing import Callable
from typing import List
def _get_receiver_name(receiver):
# type: (Callable[..., Any]) -> str
name = ""
if hasattr(receiver, "__qualname__"):
name = receiver.__qualname__
elif hasattr(receiver, "__name__"): # Python 2.7 has no __qualname__
name = receiver.__name__
elif hasattr(
receiver, "func"
): # certain functions (like partials) dont have a name
if hasattr(receiver, "func") and hasattr(receiver.func, "__name__"): # type: ignore
name = "partial(<function " + receiver.func.__name__ + ">)" # type: ignore
if (
name == ""
): # In case nothing was found, return the string representation (this is the slowest case)
return str(receiver)
if hasattr(receiver, "__module__"): # prepend with module, if there is one
name = receiver.__module__ + "." + name
return name
def patch_signals():
# type: () -> None
"""Patch django signal receivers to create a span"""
old_live_receivers = Signal._live_receivers
def _sentry_live_receivers(self, sender):
# type: (Signal, Any) -> List[Callable[..., Any]]
hub = Hub.current
receivers = old_live_receivers(self, sender)
def sentry_receiver_wrapper(receiver):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(receiver)
def wrapper(*args, **kwargs):
# type: (Any, Any) -> Any
signal_name = _get_receiver_name(receiver)
with hub.start_span(
op=OP.EVENT_DJANGO,
description=signal_name,
) as span:
span.set_data("signal", signal_name)
return receiver(*args, **kwargs)
return wrapper
for idx, receiver in enumerate(receivers):
receivers[idx] = sentry_receiver_wrapper(receiver)
return receivers
Signal._live_receivers = _sentry_live_receivers
| 2,296 | Python | 29.626666 | 96 | 0.582317 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import threading
import weakref
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP, SENSITIVE_DATA_SUBSTITUTE
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.serializer import add_global_repr_processor
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_URL
from sentry_sdk.tracing_utils import record_sql_queries
from sentry_sdk.utils import (
AnnotatedValue,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
logger,
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
walk_exception_chain,
)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from sentry_sdk.integrations._wsgi_common import RequestExtractor
try:
from django import VERSION as DJANGO_VERSION
from django.conf import settings as django_settings
from django.core import signals
try:
from django.urls import resolve
except ImportError:
from django.core.urlresolvers import resolve
except ImportError:
raise DidNotEnable("Django not installed")
from sentry_sdk.integrations.django.transactions import LEGACY_RESOLVER
from sentry_sdk.integrations.django.templates import (
get_template_frame_from_exception,
patch_templates,
)
from sentry_sdk.integrations.django.middleware import patch_django_middlewares
from sentry_sdk.integrations.django.signals_handlers import patch_signals
from sentry_sdk.integrations.django.views import patch_views
if MYPY:
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Union
from typing import List
from django.core.handlers.wsgi import WSGIRequest
from django.http.response import HttpResponse
from django.http.request import QueryDict
from django.utils.datastructures import MultiValueDict
from sentry_sdk.scope import Scope
from sentry_sdk.integrations.wsgi import _ScopedResponse
from sentry_sdk._types import Event, Hint, EventProcessor, NotImplementedType
if DJANGO_VERSION < (1, 10):
def is_authenticated(request_user):
# type: (Any) -> bool
return request_user.is_authenticated()
else:
def is_authenticated(request_user):
# type: (Any) -> bool
return request_user.is_authenticated
TRANSACTION_STYLE_VALUES = ("function_name", "url")
class DjangoIntegration(Integration):
identifier = "django"
transaction_style = ""
middleware_spans = None
def __init__(self, transaction_style="url", middleware_spans=True):
# type: (str, bool) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
self.middleware_spans = middleware_spans
@staticmethod
def setup_once():
# type: () -> None
if DJANGO_VERSION < (1, 8):
raise DidNotEnable("Django 1.8 or newer is required.")
install_sql_hook()
# Patch in our custom middleware.
# logs an error for every 500
ignore_logger("django.server")
ignore_logger("django.request")
from django.core.handlers.wsgi import WSGIHandler
old_app = WSGIHandler.__call__
def sentry_patched_wsgi_handler(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
if Hub.current.get_integration(DjangoIntegration) is None:
return old_app(self, environ, start_response)
bound_old_app = old_app.__get__(self, WSGIHandler)
from django.conf import settings
use_x_forwarded_for = settings.USE_X_FORWARDED_HOST
return SentryWsgiMiddleware(bound_old_app, use_x_forwarded_for)(
environ, start_response
)
WSGIHandler.__call__ = sentry_patched_wsgi_handler
_patch_get_response()
_patch_django_asgi_handler()
signals.got_request_exception.connect(_got_request_exception)
@add_global_event_processor
def process_django_templates(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if hint is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception, (_, exc_value, _) in zip(
reversed(values), walk_exception_chain(exc_info)
):
frame = get_template_frame_from_exception(exc_value)
if frame is not None:
frames = exception.get("stacktrace", {}).get("frames", [])
for i in reversed(range(len(frames))):
f = frames[i]
if (
f.get("function") in ("Parser.parse", "parse", "render")
and f.get("module") == "django.template.base"
):
i += 1
break
else:
i = len(frames)
frames.insert(i, frame)
return event
@add_global_repr_processor
def _django_queryset_repr(value, hint):
# type: (Any, Dict[str, Any]) -> Union[NotImplementedType, str]
try:
# Django 1.6 can fail to import `QuerySet` when Django settings
# have not yet been initialized.
#
# If we fail to import, return `NotImplemented`. It's at least
# unlikely that we have a query set in `value` when importing
# `QuerySet` fails.
from django.db.models.query import QuerySet
except Exception:
return NotImplemented
if not isinstance(value, QuerySet) or value._result_cache:
return NotImplemented
# Do not call Hub.get_integration here. It is intentional that
# running under a new hub does not suddenly start executing
# querysets. This might be surprising to the user but it's likely
# less annoying.
return "<%s from %s at 0x%x>" % (
value.__class__.__name__,
value.__module__,
id(value),
)
_patch_channels()
patch_django_middlewares()
patch_views()
patch_templates()
patch_signals()
_DRF_PATCHED = False
_DRF_PATCH_LOCK = threading.Lock()
def _patch_drf():
# type: () -> None
"""
Patch Django Rest Framework for more/better request data. DRF's request
type is a wrapper around Django's request type. The attribute we're
interested in is `request.data`, which is a cached property containing a
parsed request body. Reading a request body from that property is more
reliable than reading from any of Django's own properties, as those don't
hold payloads in memory and therefore can only be accessed once.
We patch the Django request object to include a weak backreference to the
DRF request object, such that we can later use either in
`DjangoRequestExtractor`.
This function is not called directly on SDK setup, because importing almost
any part of Django Rest Framework will try to access Django settings (where
`sentry_sdk.init()` might be called from in the first place). Instead we
run this function on every request and do the patching on the first
request.
"""
global _DRF_PATCHED
if _DRF_PATCHED:
# Double-checked locking
return
with _DRF_PATCH_LOCK:
if _DRF_PATCHED:
return
# We set this regardless of whether the code below succeeds or fails.
# There is no point in trying to patch again on the next request.
_DRF_PATCHED = True
with capture_internal_exceptions():
try:
from rest_framework.views import APIView # type: ignore
except ImportError:
pass
else:
old_drf_initial = APIView.initial
def sentry_patched_drf_initial(self, request, *args, **kwargs):
# type: (APIView, Any, *Any, **Any) -> Any
with capture_internal_exceptions():
request._request._sentry_drf_request_backref = weakref.ref(
request
)
pass
return old_drf_initial(self, request, *args, **kwargs)
APIView.initial = sentry_patched_drf_initial
def _patch_channels():
# type: () -> None
try:
from channels.http import AsgiHandler # type: ignore
except ImportError:
return
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
#
# We cannot hard-raise here because channels may not be used at all in
# the current process. That is the case when running traditional WSGI
# workers in gunicorn+gevent and the websocket stuff in a separate
# process.
logger.warning(
"We detected that you are using Django channels 2.0."
+ CONTEXTVARS_ERROR_MESSAGE
)
from sentry_sdk.integrations.django.asgi import patch_channels_asgi_handler_impl
patch_channels_asgi_handler_impl(AsgiHandler)
def _patch_django_asgi_handler():
# type: () -> None
try:
from django.core.handlers.asgi import ASGIHandler
except ImportError:
return
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
#
# We cannot hard-raise here because Django's ASGI stuff may not be used
# at all.
logger.warning(
"We detected that you are using Django 3." + CONTEXTVARS_ERROR_MESSAGE
)
from sentry_sdk.integrations.django.asgi import patch_django_asgi_handler_impl
patch_django_asgi_handler_impl(ASGIHandler)
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (Scope, str, WSGIRequest) -> None
try:
transaction_name = None
if transaction_style == "function_name":
fn = resolve(request.path).func
transaction_name = transaction_from_function(getattr(fn, "view_class", fn))
elif transaction_style == "url":
if hasattr(request, "urlconf"):
transaction_name = LEGACY_RESOLVER.resolve(
request.path_info, urlconf=request.urlconf
)
else:
transaction_name = LEGACY_RESOLVER.resolve(request.path_info)
if transaction_name is None:
transaction_name = request.path_info
source = TRANSACTION_SOURCE_URL
else:
source = SOURCE_FOR_STYLE[transaction_style]
scope.set_transaction_name(
transaction_name,
source=source,
)
except Exception:
pass
def _before_get_response(request):
# type: (WSGIRequest) -> None
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is None:
return
_patch_drf()
with hub.configure_scope() as scope:
# Rely on WSGI middleware to start a trace
_set_transaction_name_and_source(scope, integration.transaction_style, request)
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
def _attempt_resolve_again(request, scope, transaction_style):
# type: (WSGIRequest, Scope, str) -> None
"""
Some django middlewares overwrite request.urlconf
so we need to respect that contract,
so we try to resolve the url again.
"""
if not hasattr(request, "urlconf"):
return
_set_transaction_name_and_source(scope, transaction_style, request)
def _after_get_response(request):
# type: (WSGIRequest) -> None
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is None or integration.transaction_style != "url":
return
with hub.configure_scope() as scope:
_attempt_resolve_again(request, scope, integration.transaction_style)
def _patch_get_response():
# type: () -> None
"""
patch get_response, because at that point we have the Django request object
"""
from django.core.handlers.base import BaseHandler
old_get_response = BaseHandler.get_response
def sentry_patched_get_response(self, request):
# type: (Any, WSGIRequest) -> Union[HttpResponse, BaseException]
_before_get_response(request)
rv = old_get_response(self, request)
_after_get_response(request)
return rv
BaseHandler.get_response = sentry_patched_get_response
if hasattr(BaseHandler, "get_response_async"):
from sentry_sdk.integrations.django.asgi import patch_get_response_async
patch_get_response_async(BaseHandler, _before_get_response)
def _make_event_processor(weak_request, integration):
# type: (Callable[[], WSGIRequest], DjangoIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
request = weak_request()
if request is None:
return event
try:
drf_request = request._sentry_drf_request_backref()
if drf_request is not None:
request = drf_request
except AttributeError:
pass
with capture_internal_exceptions():
DjangoRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
_set_user_info(request, event)
return event
return event_processor
def _got_request_exception(request=None, **kwargs):
# type: (WSGIRequest, **Any) -> None
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is not None:
if request is not None and integration.transaction_style == "url":
with hub.configure_scope() as scope:
_attempt_resolve_again(request, scope, integration.transaction_style)
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
sys.exc_info(),
client_options=client.options,
mechanism={"type": "django", "handled": False},
)
hub.capture_event(event, hint=hint)
class DjangoRequestExtractor(RequestExtractor):
def env(self):
# type: () -> Dict[str, str]
return self.request.META
def cookies(self):
# type: () -> Dict[str, Union[str, AnnotatedValue]]
privacy_cookies = [
django_settings.CSRF_COOKIE_NAME,
django_settings.SESSION_COOKIE_NAME,
]
clean_cookies = {} # type: Dict[str, Union[str, AnnotatedValue]]
for (key, val) in self.request.COOKIES.items():
if key in privacy_cookies:
clean_cookies[key] = SENSITIVE_DATA_SUBSTITUTE
else:
clean_cookies[key] = val
return clean_cookies
def raw_data(self):
# type: () -> bytes
return self.request.body
def form(self):
# type: () -> QueryDict
return self.request.POST
def files(self):
# type: () -> MultiValueDict
return self.request.FILES
def size_of_file(self, file):
# type: (Any) -> int
return file.size
def parsed_body(self):
# type: () -> Optional[Dict[str, Any]]
try:
return self.request.data
except AttributeError:
return RequestExtractor.parsed_body(self)
def _set_user_info(request, event):
# type: (WSGIRequest, Dict[str, Any]) -> None
user_info = event.setdefault("user", {})
user = getattr(request, "user", None)
if user is None or not is_authenticated(user):
return
try:
user_info.setdefault("id", str(user.pk))
except Exception:
pass
try:
user_info.setdefault("email", user.email)
except Exception:
pass
try:
user_info.setdefault("username", user.get_username())
except Exception:
pass
def install_sql_hook():
# type: () -> None
"""If installed this causes Django's queries to be captured."""
try:
from django.db.backends.utils import CursorWrapper
except ImportError:
from django.db.backends.util import CursorWrapper
try:
# django 1.6 and 1.7 compatability
from django.db.backends import BaseDatabaseWrapper
except ImportError:
# django 1.8 or later
from django.db.backends.base.base import BaseDatabaseWrapper
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
real_connect = BaseDatabaseWrapper.connect
except AttributeError:
# This won't work on Django versions < 1.6
return
def execute(self, sql, params=None):
# type: (CursorWrapper, Any, Optional[Any]) -> Any
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return real_execute(self, sql, params)
with record_sql_queries(
hub, self.cursor, sql, params, paramstyle="format", executemany=False
):
return real_execute(self, sql, params)
def executemany(self, sql, param_list):
# type: (CursorWrapper, Any, List[Any]) -> Any
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return real_executemany(self, sql, param_list)
with record_sql_queries(
hub, self.cursor, sql, param_list, paramstyle="format", executemany=True
):
return real_executemany(self, sql, param_list)
def connect(self):
# type: (BaseDatabaseWrapper) -> None
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return real_connect(self)
with capture_internal_exceptions():
hub.add_breadcrumb(message="connect", category="query")
with hub.start_span(op=OP.DB, description="connect"):
return real_connect(self)
CursorWrapper.execute = execute
CursorWrapper.executemany = executemany
BaseDatabaseWrapper.connect = connect
ignore_logger("django.db.backends")
| 19,552 | Python | 31.265677 | 87 | 0.619834 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/middleware.py | """
Create spans from Django middleware invocations
"""
from django import VERSION as DJANGO_VERSION
from sentry_sdk import Hub
from sentry_sdk._functools import wraps
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
from sentry_sdk.utils import (
ContextVar,
transaction_from_function,
capture_internal_exceptions,
)
if MYPY:
from typing import Any
from typing import Callable
from typing import Optional
from typing import TypeVar
from sentry_sdk.tracing import Span
F = TypeVar("F", bound=Callable[..., Any])
_import_string_should_wrap_middleware = ContextVar(
"import_string_should_wrap_middleware"
)
if DJANGO_VERSION < (1, 7):
import_string_name = "import_by_path"
else:
import_string_name = "import_string"
if DJANGO_VERSION < (3, 1):
_asgi_middleware_mixin_factory = lambda _: object
else:
from .asgi import _asgi_middleware_mixin_factory
def patch_django_middlewares():
# type: () -> None
from django.core.handlers import base
old_import_string = getattr(base, import_string_name)
def sentry_patched_import_string(dotted_path):
# type: (str) -> Any
rv = old_import_string(dotted_path)
if _import_string_should_wrap_middleware.get(None):
rv = _wrap_middleware(rv, dotted_path)
return rv
setattr(base, import_string_name, sentry_patched_import_string)
old_load_middleware = base.BaseHandler.load_middleware
def sentry_patched_load_middleware(*args, **kwargs):
# type: (Any, Any) -> Any
_import_string_should_wrap_middleware.set(True)
try:
return old_load_middleware(*args, **kwargs)
finally:
_import_string_should_wrap_middleware.set(False)
base.BaseHandler.load_middleware = sentry_patched_load_middleware
def _wrap_middleware(middleware, middleware_name):
# type: (Any, str) -> Any
from sentry_sdk.integrations.django import DjangoIntegration
def _check_middleware_span(old_method):
# type: (Callable[..., Any]) -> Optional[Span]
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is None or not integration.middleware_spans:
return None
function_name = transaction_from_function(old_method)
description = middleware_name
function_basename = getattr(old_method, "__name__", None)
if function_basename:
description = "{}.{}".format(description, function_basename)
middleware_span = hub.start_span(
op=OP.MIDDLEWARE_DJANGO, description=description
)
middleware_span.set_tag("django.function_name", function_name)
middleware_span.set_tag("django.middleware_name", middleware_name)
return middleware_span
def _get_wrapped_method(old_method):
# type: (F) -> F
with capture_internal_exceptions():
def sentry_wrapped_method(*args, **kwargs):
# type: (*Any, **Any) -> Any
middleware_span = _check_middleware_span(old_method)
if middleware_span is None:
return old_method(*args, **kwargs)
with middleware_span:
return old_method(*args, **kwargs)
try:
# fails for __call__ of function on Python 2 (see py2.7-django-1.11)
sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)
# Necessary for Django 3.1
sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore
except Exception:
pass
return sentry_wrapped_method # type: ignore
return old_method
class SentryWrappingMiddleware(
_asgi_middleware_mixin_factory(_check_middleware_span) # type: ignore
):
async_capable = getattr(middleware, "async_capable", False)
def __init__(self, get_response=None, *args, **kwargs):
# type: (Optional[Callable[..., Any]], *Any, **Any) -> None
if get_response:
self._inner = middleware(get_response, *args, **kwargs)
else:
self._inner = middleware(*args, **kwargs)
self.get_response = get_response
self._call_method = None
if self.async_capable:
super(SentryWrappingMiddleware, self).__init__(get_response)
# We need correct behavior for `hasattr()`, which we can only determine
# when we have an instance of the middleware we're wrapping.
def __getattr__(self, method_name):
# type: (str) -> Any
if method_name not in (
"process_request",
"process_view",
"process_template_response",
"process_response",
"process_exception",
):
raise AttributeError()
old_method = getattr(self._inner, method_name)
rv = _get_wrapped_method(old_method)
self.__dict__[method_name] = rv
return rv
def __call__(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
if hasattr(self, "async_route_check") and self.async_route_check():
return self.__acall__(*args, **kwargs)
f = self._call_method
if f is None:
self._call_method = f = self._inner.__call__
middleware_span = _check_middleware_span(old_method=f)
if middleware_span is None:
return f(*args, **kwargs)
with middleware_span:
return f(*args, **kwargs)
for attr in (
"__name__",
"__module__",
"__qualname__",
):
if hasattr(middleware, attr):
setattr(SentryWrappingMiddleware, attr, getattr(middleware, attr))
return SentryWrappingMiddleware
| 5,950 | Python | 30.823529 | 84 | 0.59395 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/transactions.py | """
Copied from raven-python. Used for
`DjangoIntegration(transaction_fron="raven_legacy")`.
"""
from __future__ import absolute_import
import re
from sentry_sdk._types import MYPY
if MYPY:
from django.urls.resolvers import URLResolver
from typing import Dict
from typing import List
from typing import Optional
from django.urls.resolvers import URLPattern
from typing import Tuple
from typing import Union
from re import Pattern
try:
from django.urls import get_resolver
except ImportError:
from django.core.urlresolvers import get_resolver
def get_regex(resolver_or_pattern):
# type: (Union[URLPattern, URLResolver]) -> Pattern[str]
"""Utility method for django's deprecated resolver.regex"""
try:
regex = resolver_or_pattern.regex
except AttributeError:
regex = resolver_or_pattern.pattern.regex
return regex
class RavenResolver(object):
_optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
_named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)+")
_non_named_group_matcher = re.compile(r"\([^\)]+\)")
# [foo|bar|baz]
_either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
_camel_re = re.compile(r"([A-Z]+)([a-z])")
_cache = {} # type: Dict[URLPattern, str]
def _simplify(self, pattern):
# type: (str) -> str
r"""
Clean up urlpattern regexes into something readable by humans:
From:
> "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
To:
> "{sport_slug}/athletes/{athlete_slug}/"
"""
# remove optional params
# TODO(dcramer): it'd be nice to change these into [%s] but it currently
# conflicts with the other rules because we're doing regexp matches
# rather than parsing tokens
result = self._optional_group_matcher.sub(lambda m: "%s" % m.group(1), pattern)
# handle named groups first
result = self._named_group_matcher.sub(lambda m: "{%s}" % m.group(1), result)
# handle non-named groups
result = self._non_named_group_matcher.sub("{var}", result)
# handle optional params
result = self._either_option_matcher.sub(lambda m: m.group(1), result)
# clean up any outstanding regex-y characters.
result = (
result.replace("^", "")
.replace("$", "")
.replace("?", "")
.replace("\\A", "")
.replace("\\Z", "")
.replace("//", "/")
.replace("\\", "")
)
return result
def _resolve(self, resolver, path, parents=None):
# type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]
match = get_regex(resolver).search(path) # Django < 2.0
if not match:
return None
if parents is None:
parents = [resolver]
elif resolver not in parents:
parents = parents + [resolver]
new_path = path[match.end() :]
for pattern in resolver.url_patterns:
# this is an include()
if not pattern.callback:
match_ = self._resolve(pattern, new_path, parents)
if match_:
return match_
continue
elif not get_regex(pattern).search(new_path):
continue
try:
return self._cache[pattern]
except KeyError:
pass
prefix = "".join(self._simplify(get_regex(p).pattern) for p in parents)
result = prefix + self._simplify(get_regex(pattern).pattern)
if not result.startswith("/"):
result = "/" + result
self._cache[pattern] = result
return result
return None
def resolve(
self,
path, # type: str
urlconf=None, # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]
):
# type: (...) -> Optional[str]
resolver = get_resolver(urlconf)
match = self._resolve(resolver, path)
return match
LEGACY_RESOLVER = RavenResolver()
| 4,161 | Python | 29.379562 | 105 | 0.567892 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/templates.py | from django.template import TemplateSyntaxError
from django import VERSION as DJANGO_VERSION
from sentry_sdk import _functools, Hub
from sentry_sdk._types import MYPY
from sentry_sdk.consts import OP
if MYPY:
from typing import Any
from typing import Dict
from typing import Optional
from typing import Iterator
from typing import Tuple
try:
# support Django 1.9
from django.template.base import Origin
except ImportError:
# backward compatibility
from django.template.loader import LoaderOrigin as Origin
def get_template_frame_from_exception(exc_value):
# type: (Optional[BaseException]) -> Optional[Dict[str, Any]]
# As of Django 1.9 or so the new template debug thing showed up.
if hasattr(exc_value, "template_debug"):
return _get_template_frame_from_debug(exc_value.template_debug) # type: ignore
# As of r16833 (Django) all exceptions may contain a
# ``django_template_source`` attribute (rather than the legacy
# ``TemplateSyntaxError.source`` check)
if hasattr(exc_value, "django_template_source"):
return _get_template_frame_from_source(
exc_value.django_template_source # type: ignore
)
if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, "source"):
source = exc_value.source
if isinstance(source, (tuple, list)) and isinstance(source[0], Origin):
return _get_template_frame_from_source(source) # type: ignore
return None
def _get_template_name_description(template_name):
# type: (str) -> str
if isinstance(template_name, (list, tuple)):
if template_name:
return "[{}, ...]".format(template_name[0])
else:
return template_name
def patch_templates():
# type: () -> None
from django.template.response import SimpleTemplateResponse
from sentry_sdk.integrations.django import DjangoIntegration
real_rendered_content = SimpleTemplateResponse.rendered_content
@property # type: ignore
def rendered_content(self):
# type: (SimpleTemplateResponse) -> str
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return real_rendered_content.fget(self)
with hub.start_span(
op=OP.TEMPLATE_RENDER,
description=_get_template_name_description(self.template_name),
) as span:
span.set_data("context", self.context_data)
return real_rendered_content.fget(self)
SimpleTemplateResponse.rendered_content = rendered_content
if DJANGO_VERSION < (1, 7):
return
import django.shortcuts
real_render = django.shortcuts.render
@_functools.wraps(real_render)
def render(request, template_name, context=None, *args, **kwargs):
# type: (django.http.HttpRequest, str, Optional[Dict[str, Any]], *Any, **Any) -> django.http.HttpResponse
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return real_render(request, template_name, context, *args, **kwargs)
with hub.start_span(
op=OP.TEMPLATE_RENDER,
description=_get_template_name_description(template_name),
) as span:
span.set_data("context", context)
return real_render(request, template_name, context, *args, **kwargs)
django.shortcuts.render = render
def _get_template_frame_from_debug(debug):
# type: (Dict[str, Any]) -> Dict[str, Any]
if debug is None:
return None
lineno = debug["line"]
filename = debug["name"]
if filename is None:
filename = "<django template>"
pre_context = []
post_context = []
context_line = None
for i, line in debug["source_lines"]:
if i < lineno:
pre_context.append(line)
elif i > lineno:
post_context.append(line)
else:
context_line = line
return {
"filename": filename,
"lineno": lineno,
"pre_context": pre_context[-5:],
"post_context": post_context[:5],
"context_line": context_line,
"in_app": True,
}
def _linebreak_iter(template_source):
# type: (str) -> Iterator[int]
yield 0
p = template_source.find("\n")
while p >= 0:
yield p + 1
p = template_source.find("\n", p + 1)
def _get_template_frame_from_source(source):
# type: (Tuple[Origin, Tuple[int, int]]) -> Optional[Dict[str, Any]]
if not source:
return None
origin, (start, end) = source
filename = getattr(origin, "loadname", None)
if filename is None:
filename = "<django template>"
template_source = origin.reload()
lineno = None
upto = 0
pre_context = []
post_context = []
context_line = None
for num, next in enumerate(_linebreak_iter(template_source)):
line = template_source[upto:next]
if start >= upto and end <= next:
lineno = num
context_line = line
elif lineno is None:
pre_context.append(line)
else:
post_context.append(line)
upto = next
if context_line is None or lineno is None:
return None
return {
"filename": filename,
"lineno": lineno,
"pre_context": pre_context[-5:],
"post_context": post_context[:5],
"context_line": context_line,
}
| 5,415 | Python | 29.088889 | 113 | 0.627331 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/django/views.py | import threading
from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
from sentry_sdk._types import MYPY
from sentry_sdk import _functools
if MYPY:
from typing import Any
try:
from asyncio import iscoroutinefunction
except ImportError:
iscoroutinefunction = None # type: ignore
try:
from sentry_sdk.integrations.django.asgi import wrap_async_view
except (ImportError, SyntaxError):
wrap_async_view = None # type: ignore
def patch_views():
# type: () -> None
from django.core.handlers.base import BaseHandler
from django.template.response import SimpleTemplateResponse
from sentry_sdk.integrations.django import DjangoIntegration
old_make_view_atomic = BaseHandler.make_view_atomic
old_render = SimpleTemplateResponse.render
def sentry_patched_render(self):
# type: (SimpleTemplateResponse) -> Any
hub = Hub.current
with hub.start_span(
op=OP.VIEW_RESPONSE_RENDER, description="serialize response"
):
return old_render(self)
@_functools.wraps(old_make_view_atomic)
def sentry_patched_make_view_atomic(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
callback = old_make_view_atomic(self, *args, **kwargs)
# XXX: The wrapper function is created for every request. Find more
# efficient way to wrap views (or build a cache?)
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is not None and integration.middleware_spans:
if (
iscoroutinefunction is not None
and wrap_async_view is not None
and iscoroutinefunction(callback)
):
sentry_wrapped_callback = wrap_async_view(hub, callback)
else:
sentry_wrapped_callback = _wrap_sync_view(hub, callback)
else:
sentry_wrapped_callback = callback
return sentry_wrapped_callback
SimpleTemplateResponse.render = sentry_patched_render
BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
def _wrap_sync_view(hub, callback):
# type: (Hub, Any) -> Any
@_functools.wraps(callback)
def sentry_wrapped_callback(request, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
with hub.configure_scope() as sentry_scope:
# set the active thread id to the handler thread for sync views
# this isn't necessary for async views since that runs on main
if sentry_scope.profile is not None:
sentry_scope.profile.active_thread_id = threading.current_thread().ident
with hub.start_span(
op=OP.VIEW_RENDER, description=request.resolver_match.view_name
):
return callback(request, *args, **kwargs)
return sentry_wrapped_callback
| 2,895 | Python | 31.177777 | 88 | 0.650777 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/spark/__init__.py | from sentry_sdk.integrations.spark.spark_driver import SparkIntegration
from sentry_sdk.integrations.spark.spark_worker import SparkWorkerIntegration
__all__ = ["SparkIntegration", "SparkWorkerIntegration"]
| 208 | Python | 40.799992 | 77 | 0.831731 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/spark/spark_driver.py | from sentry_sdk import configure_scope
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.utils import capture_internal_exceptions
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Optional
from sentry_sdk._types import Event, Hint
class SparkIntegration(Integration):
identifier = "spark"
@staticmethod
def setup_once():
# type: () -> None
patch_spark_context_init()
def _set_app_properties():
# type: () -> None
"""
Set properties in driver that propagate to worker processes, allowing for workers to have access to those properties.
This allows worker integration to have access to app_name and application_id.
"""
from pyspark import SparkContext
spark_context = SparkContext._active_spark_context
if spark_context:
spark_context.setLocalProperty("sentry_app_name", spark_context.appName)
spark_context.setLocalProperty(
"sentry_application_id", spark_context.applicationId
)
def _start_sentry_listener(sc):
# type: (Any) -> None
"""
Start java gateway server to add custom `SparkListener`
"""
from pyspark.java_gateway import ensure_callback_server_started
gw = sc._gateway
ensure_callback_server_started(gw)
listener = SentryListener()
sc._jsc.sc().addSparkListener(listener)
def patch_spark_context_init():
# type: () -> None
from pyspark import SparkContext
spark_context_init = SparkContext._do_init
def _sentry_patched_spark_context_init(self, *args, **kwargs):
# type: (SparkContext, *Any, **Any) -> Optional[Any]
init = spark_context_init(self, *args, **kwargs)
if Hub.current.get_integration(SparkIntegration) is None:
return init
_start_sentry_listener(self)
_set_app_properties()
with configure_scope() as scope:
@scope.add_event_processor
def process_event(event, hint):
# type: (Event, Hint) -> Optional[Event]
with capture_internal_exceptions():
if Hub.current.get_integration(SparkIntegration) is None:
return event
event.setdefault("user", {}).setdefault("id", self.sparkUser())
event.setdefault("tags", {}).setdefault(
"executor.id", self._conf.get("spark.executor.id")
)
event["tags"].setdefault(
"spark-submit.deployMode",
self._conf.get("spark.submit.deployMode"),
)
event["tags"].setdefault(
"driver.host", self._conf.get("spark.driver.host")
)
event["tags"].setdefault(
"driver.port", self._conf.get("spark.driver.port")
)
event["tags"].setdefault("spark_version", self.version)
event["tags"].setdefault("app_name", self.appName)
event["tags"].setdefault("application_id", self.applicationId)
event["tags"].setdefault("master", self.master)
event["tags"].setdefault("spark_home", self.sparkHome)
event.setdefault("extra", {}).setdefault("web_url", self.uiWebUrl)
return event
return init
SparkContext._do_init = _sentry_patched_spark_context_init
class SparkListener(object):
def onApplicationEnd(self, applicationEnd): # noqa: N802,N803
# type: (Any) -> None
pass
def onApplicationStart(self, applicationStart): # noqa: N802,N803
# type: (Any) -> None
pass
def onBlockManagerAdded(self, blockManagerAdded): # noqa: N802,N803
# type: (Any) -> None
pass
def onBlockManagerRemoved(self, blockManagerRemoved): # noqa: N802,N803
# type: (Any) -> None
pass
def onBlockUpdated(self, blockUpdated): # noqa: N802,N803
# type: (Any) -> None
pass
def onEnvironmentUpdate(self, environmentUpdate): # noqa: N802,N803
# type: (Any) -> None
pass
def onExecutorAdded(self, executorAdded): # noqa: N802,N803
# type: (Any) -> None
pass
def onExecutorBlacklisted(self, executorBlacklisted): # noqa: N802,N803
# type: (Any) -> None
pass
def onExecutorBlacklistedForStage( # noqa: N802
self, executorBlacklistedForStage # noqa: N803
):
# type: (Any) -> None
pass
def onExecutorMetricsUpdate(self, executorMetricsUpdate): # noqa: N802,N803
# type: (Any) -> None
pass
def onExecutorRemoved(self, executorRemoved): # noqa: N802,N803
# type: (Any) -> None
pass
def onJobEnd(self, jobEnd): # noqa: N802,N803
# type: (Any) -> None
pass
def onJobStart(self, jobStart): # noqa: N802,N803
# type: (Any) -> None
pass
def onNodeBlacklisted(self, nodeBlacklisted): # noqa: N802,N803
# type: (Any) -> None
pass
def onNodeBlacklistedForStage(self, nodeBlacklistedForStage): # noqa: N802,N803
# type: (Any) -> None
pass
def onNodeUnblacklisted(self, nodeUnblacklisted): # noqa: N802,N803
# type: (Any) -> None
pass
def onOtherEvent(self, event): # noqa: N802,N803
# type: (Any) -> None
pass
def onSpeculativeTaskSubmitted(self, speculativeTask): # noqa: N802,N803
# type: (Any) -> None
pass
def onStageCompleted(self, stageCompleted): # noqa: N802,N803
# type: (Any) -> None
pass
def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803
# type: (Any) -> None
pass
def onTaskEnd(self, taskEnd): # noqa: N802,N803
# type: (Any) -> None
pass
def onTaskGettingResult(self, taskGettingResult): # noqa: N802,N803
# type: (Any) -> None
pass
def onTaskStart(self, taskStart): # noqa: N802,N803
# type: (Any) -> None
pass
def onUnpersistRDD(self, unpersistRDD): # noqa: N802,N803
# type: (Any) -> None
pass
class Java:
implements = ["org.apache.spark.scheduler.SparkListenerInterface"]
class SentryListener(SparkListener):
def __init__(self):
# type: () -> None
self.hub = Hub.current
def onJobStart(self, jobStart): # noqa: N802,N803
# type: (Any) -> None
message = "Job {} Started".format(jobStart.jobId())
self.hub.add_breadcrumb(level="info", message=message)
_set_app_properties()
def onJobEnd(self, jobEnd): # noqa: N802,N803
# type: (Any) -> None
level = ""
message = ""
data = {"result": jobEnd.jobResult().toString()}
if jobEnd.jobResult().toString() == "JobSucceeded":
level = "info"
message = "Job {} Ended".format(jobEnd.jobId())
else:
level = "warning"
message = "Job {} Failed".format(jobEnd.jobId())
self.hub.add_breadcrumb(level=level, message=message, data=data)
def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803
# type: (Any) -> None
stage_info = stageSubmitted.stageInfo()
message = "Stage {} Submitted".format(stage_info.stageId())
data = {"attemptId": stage_info.attemptId(), "name": stage_info.name()}
self.hub.add_breadcrumb(level="info", message=message, data=data)
_set_app_properties()
def onStageCompleted(self, stageCompleted): # noqa: N802,N803
# type: (Any) -> None
from py4j.protocol import Py4JJavaError # type: ignore
stage_info = stageCompleted.stageInfo()
message = ""
level = ""
data = {"attemptId": stage_info.attemptId(), "name": stage_info.name()}
# Have to Try Except because stageInfo.failureReason() is typed with Scala Option
try:
data["reason"] = stage_info.failureReason().get()
message = "Stage {} Failed".format(stage_info.stageId())
level = "warning"
except Py4JJavaError:
message = "Stage {} Completed".format(stage_info.stageId())
level = "info"
self.hub.add_breadcrumb(level=level, message=message, data=data)
| 8,465 | Python | 31.068182 | 121 | 0.587596 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk/integrations/spark/spark_worker.py | from __future__ import absolute_import
import sys
from sentry_sdk import configure_scope
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.utils import (
capture_internal_exceptions,
exc_info_from_error,
single_exception_from_error_tuple,
walk_exception_chain,
event_hint_with_exc_info,
)
from sentry_sdk._types import MYPY
if MYPY:
from typing import Any
from typing import Optional
from sentry_sdk._types import ExcInfo, Event, Hint
class SparkWorkerIntegration(Integration):
identifier = "spark_worker"
@staticmethod
def setup_once():
# type: () -> None
import pyspark.daemon as original_daemon
original_daemon.worker_main = _sentry_worker_main
def _capture_exception(exc_info, hub):
# type: (ExcInfo, Hub) -> None
client = hub.client
client_options = client.options # type: ignore
mechanism = {"type": "spark", "handled": False}
exc_info = exc_info_from_error(exc_info)
exc_type, exc_value, tb = exc_info
rv = []
# On Exception worker will call sys.exit(-1), so we can ignore SystemExit and similar errors
for exc_type, exc_value, tb in walk_exception_chain(exc_info):
if exc_type not in (SystemExit, EOFError, ConnectionResetError):
rv.append(
single_exception_from_error_tuple(
exc_type, exc_value, tb, client_options, mechanism
)
)
if rv:
rv.reverse()
hint = event_hint_with_exc_info(exc_info)
event = {"level": "error", "exception": {"values": rv}}
_tag_task_context()
hub.capture_event(event, hint=hint)
def _tag_task_context():
# type: () -> None
from pyspark.taskcontext import TaskContext
with configure_scope() as scope:
@scope.add_event_processor
def process_event(event, hint):
# type: (Event, Hint) -> Optional[Event]
with capture_internal_exceptions():
integration = Hub.current.get_integration(SparkWorkerIntegration)
task_context = TaskContext.get()
if integration is None or task_context is None:
return event
event.setdefault("tags", {}).setdefault(
"stageId", str(task_context.stageId())
)
event["tags"].setdefault("partitionId", str(task_context.partitionId()))
event["tags"].setdefault(
"attemptNumber", str(task_context.attemptNumber())
)
event["tags"].setdefault(
"taskAttemptId", str(task_context.taskAttemptId())
)
if task_context._localProperties:
if "sentry_app_name" in task_context._localProperties:
event["tags"].setdefault(
"app_name", task_context._localProperties["sentry_app_name"]
)
event["tags"].setdefault(
"application_id",
task_context._localProperties["sentry_application_id"],
)
if "callSite.short" in task_context._localProperties:
event.setdefault("extra", {}).setdefault(
"callSite", task_context._localProperties["callSite.short"]
)
return event
def _sentry_worker_main(*args, **kwargs):
# type: (*Optional[Any], **Optional[Any]) -> None
import pyspark.worker as original_worker
try:
original_worker.main(*args, **kwargs)
except SystemExit:
if Hub.current.get_integration(SparkWorkerIntegration) is not None:
hub = Hub.current
exc_info = sys.exc_info()
with capture_internal_exceptions():
_capture_exception(exc_info, hub)
| 3,980 | Python | 30.848 | 96 | 0.573116 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/http.py | from __future__ import annotations
import sys
from .imports import lazy_import
from .version import version as websockets_version
# For backwards compatibility:
lazy_import(
globals(),
# Headers and MultipleValuesError used to be defined in this module.
aliases={
"Headers": ".datastructures",
"MultipleValuesError": ".datastructures",
},
deprecated_aliases={
"read_request": ".legacy.http",
"read_response": ".legacy.http",
},
)
__all__ = ["USER_AGENT"]
PYTHON_VERSION = "{}.{}".format(*sys.version_info)
USER_AGENT = f"Python/{PYTHON_VERSION} websockets/{websockets_version}"
| 644 | Python | 19.806451 | 72 | 0.661491 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/imports.py | from __future__ import annotations
import warnings
from typing import Any, Dict, Iterable, Optional
__all__ = ["lazy_import"]
def import_name(name: str, source: str, namespace: Dict[str, Any]) -> Any:
"""
Import ``name`` from ``source`` in ``namespace``.
There are two use cases:
- ``name`` is an object defined in ``source``;
- ``name`` is a submodule of ``source``.
Neither :func:`__import__` nor :func:`~importlib.import_module` does
exactly this. :func:`__import__` is closer to the intended behavior.
"""
level = 0
while source[level] == ".":
level += 1
assert level < len(source), "importing from parent isn't supported"
module = __import__(source[level:], namespace, None, [name], level)
return getattr(module, name)
def lazy_import(
namespace: Dict[str, Any],
aliases: Optional[Dict[str, str]] = None,
deprecated_aliases: Optional[Dict[str, str]] = None,
) -> None:
"""
Provide lazy, module-level imports.
Typical use::
__getattr__, __dir__ = lazy_import(
globals(),
aliases={
"<name>": "<source module>",
...
},
deprecated_aliases={
...,
}
)
This function defines ``__getattr__`` and ``__dir__`` per :pep:`562`.
"""
if aliases is None:
aliases = {}
if deprecated_aliases is None:
deprecated_aliases = {}
namespace_set = set(namespace)
aliases_set = set(aliases)
deprecated_aliases_set = set(deprecated_aliases)
assert not namespace_set & aliases_set, "namespace conflict"
assert not namespace_set & deprecated_aliases_set, "namespace conflict"
assert not aliases_set & deprecated_aliases_set, "namespace conflict"
package = namespace["__name__"]
def __getattr__(name: str) -> Any:
assert aliases is not None # mypy cannot figure this out
try:
source = aliases[name]
except KeyError:
pass
else:
return import_name(name, source, namespace)
assert deprecated_aliases is not None # mypy cannot figure this out
try:
source = deprecated_aliases[name]
except KeyError:
pass
else:
warnings.warn(
f"{package}.{name} is deprecated",
DeprecationWarning,
stacklevel=2,
)
return import_name(name, source, namespace)
raise AttributeError(f"module {package!r} has no attribute {name!r}")
namespace["__getattr__"] = __getattr__
def __dir__() -> Iterable[str]:
return sorted(namespace_set | aliases_set | deprecated_aliases_set)
namespace["__dir__"] = __dir__
| 2,790 | Python | 26.91 | 77 | 0.568459 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/connection.py | from __future__ import annotations
import enum
import logging
import uuid
from typing import Generator, List, Optional, Type, Union
from .exceptions import (
ConnectionClosed,
ConnectionClosedError,
ConnectionClosedOK,
InvalidState,
PayloadTooBig,
ProtocolError,
)
from .extensions import Extension
from .frames import (
OK_CLOSE_CODES,
OP_BINARY,
OP_CLOSE,
OP_CONT,
OP_PING,
OP_PONG,
OP_TEXT,
Close,
Frame,
)
from .http11 import Request, Response
from .streams import StreamReader
from .typing import LoggerLike, Origin, Subprotocol
__all__ = [
"Connection",
"Side",
"State",
"SEND_EOF",
]
Event = Union[Request, Response, Frame]
"""Events that :meth:`~Connection.events_received` may return."""
class Side(enum.IntEnum):
"""A WebSocket connection is either a server or a client."""
SERVER, CLIENT = range(2)
SERVER = Side.SERVER
CLIENT = Side.CLIENT
class State(enum.IntEnum):
"""A WebSocket connection is in one of these four states."""
CONNECTING, OPEN, CLOSING, CLOSED = range(4)
CONNECTING = State.CONNECTING
OPEN = State.OPEN
CLOSING = State.CLOSING
CLOSED = State.CLOSED
SEND_EOF = b""
"""Sentinel signaling that the TCP connection must be half-closed."""
class Connection:
"""
Sans-I/O implementation of a WebSocket connection.
Args:
side: :attr:`~Side.CLIENT` or :attr:`~Side.SERVER`.
state: initial state of the WebSocket connection.
max_size: maximum size of incoming messages in bytes;
:obj:`None` to disable the limit.
logger: logger for this connection; depending on ``side``,
defaults to ``logging.getLogger("websockets.client")``
or ``logging.getLogger("websockets.server")``;
see the :doc:`logging guide <../topics/logging>` for details.
"""
def __init__(
self,
side: Side,
state: State = OPEN,
max_size: Optional[int] = 2**20,
logger: Optional[LoggerLike] = None,
) -> None:
# Unique identifier. For logs.
self.id: uuid.UUID = uuid.uuid4()
"""Unique identifier of the connection. Useful in logs."""
# Logger or LoggerAdapter for this connection.
if logger is None:
logger = logging.getLogger(f"websockets.{side.name.lower()}")
self.logger: LoggerLike = logger
"""Logger for this connection."""
# Track if DEBUG is enabled. Shortcut logging calls if it isn't.
self.debug = logger.isEnabledFor(logging.DEBUG)
# Connection side. CLIENT or SERVER.
self.side = side
# Connection state. Initially OPEN because subclasses handle CONNECTING.
self.state = state
# Maximum size of incoming messages in bytes.
self.max_size = max_size
# Current size of incoming message in bytes. Only set while reading a
# fragmented message i.e. a data frames with the FIN bit not set.
self.cur_size: Optional[int] = None
# True while sending a fragmented message i.e. a data frames with the
# FIN bit not set.
self.expect_continuation_frame = False
# WebSocket protocol parameters.
self.origin: Optional[Origin] = None
self.extensions: List[Extension] = []
self.subprotocol: Optional[Subprotocol] = None
# Close code and reason, set when a close frame is sent or received.
self.close_rcvd: Optional[Close] = None
self.close_sent: Optional[Close] = None
self.close_rcvd_then_sent: Optional[bool] = None
# Track if an exception happened during the handshake.
self.handshake_exc: Optional[Exception] = None
"""
Exception to raise if the opening handshake failed.
:obj:`None` if the opening handshake succeeded.
"""
# Track if send_eof() was called.
self.eof_sent = False
# Parser state.
self.reader = StreamReader()
self.events: List[Event] = []
self.writes: List[bytes] = []
self.parser = self.parse()
next(self.parser) # start coroutine
self.parser_exc: Optional[Exception] = None
@property
def state(self) -> State:
"""
WebSocket connection state.
Defined in 4.1, 4.2, 7.1.3, and 7.1.4 of :rfc:`6455`.
"""
return self._state
@state.setter
def state(self, state: State) -> None:
if self.debug:
self.logger.debug("= connection is %s", state.name)
self._state = state
@property
def close_code(self) -> Optional[int]:
"""
`WebSocket close code`_.
.. _WebSocket close code:
https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.5
:obj:`None` if the connection isn't closed yet.
"""
if self.state is not CLOSED:
return None
elif self.close_rcvd is None:
return 1006
else:
return self.close_rcvd.code
@property
def close_reason(self) -> Optional[str]:
"""
`WebSocket close reason`_.
.. _WebSocket close reason:
https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.6
:obj:`None` if the connection isn't closed yet.
"""
if self.state is not CLOSED:
return None
elif self.close_rcvd is None:
return ""
else:
return self.close_rcvd.reason
@property
def close_exc(self) -> ConnectionClosed:
"""
Exception to raise when trying to interact with a closed connection.
Don't raise this exception while the connection :attr:`state`
is :attr:`~websockets.connection.State.CLOSING`; wait until
it's :attr:`~websockets.connection.State.CLOSED`.
Indeed, the exception includes the close code and reason, which are
known only once the connection is closed.
Raises:
AssertionError: if the connection isn't closed yet.
"""
assert self.state is CLOSED, "connection isn't closed yet"
exc_type: Type[ConnectionClosed]
if (
self.close_rcvd is not None
and self.close_sent is not None
and self.close_rcvd.code in OK_CLOSE_CODES
and self.close_sent.code in OK_CLOSE_CODES
):
exc_type = ConnectionClosedOK
else:
exc_type = ConnectionClosedError
exc: ConnectionClosed = exc_type(
self.close_rcvd,
self.close_sent,
self.close_rcvd_then_sent,
)
# Chain to the exception raised in the parser, if any.
exc.__cause__ = self.parser_exc
return exc
# Public methods for receiving data.
def receive_data(self, data: bytes) -> None:
"""
Receive data from the network.
After calling this method:
- You must call :meth:`data_to_send` and send this data to the network.
- You should call :meth:`events_received` and process resulting events.
Raises:
EOFError: if :meth:`receive_eof` was called earlier.
"""
self.reader.feed_data(data)
next(self.parser)
def receive_eof(self) -> None:
"""
Receive the end of the data stream from the network.
After calling this method:
- You must call :meth:`data_to_send` and send this data to the network.
- You aren't expected to call :meth:`events_received`; it won't return
any new events.
Raises:
EOFError: if :meth:`receive_eof` was called earlier.
"""
self.reader.feed_eof()
next(self.parser)
# Public methods for sending events.
def send_continuation(self, data: bytes, fin: bool) -> None:
"""
Send a `Continuation frame`_.
.. _Continuation frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
Parameters:
data: payload containing the same kind of data
as the initial frame.
fin: FIN bit; set it to :obj:`True` if this is the last frame
of a fragmented message and to :obj:`False` otherwise.
Raises:
ProtocolError: if a fragmented message isn't in progress.
"""
if not self.expect_continuation_frame:
raise ProtocolError("unexpected continuation frame")
self.expect_continuation_frame = not fin
self.send_frame(Frame(OP_CONT, data, fin))
def send_text(self, data: bytes, fin: bool = True) -> None:
"""
Send a `Text frame`_.
.. _Text frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
Parameters:
data: payload containing text encoded with UTF-8.
fin: FIN bit; set it to :obj:`False` if this is the first frame of
a fragmented message.
Raises:
ProtocolError: if a fragmented message is in progress.
"""
if self.expect_continuation_frame:
raise ProtocolError("expected a continuation frame")
self.expect_continuation_frame = not fin
self.send_frame(Frame(OP_TEXT, data, fin))
def send_binary(self, data: bytes, fin: bool = True) -> None:
"""
Send a `Binary frame`_.
.. _Binary frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
Parameters:
data: payload containing arbitrary binary data.
fin: FIN bit; set it to :obj:`False` if this is the first frame of
a fragmented message.
Raises:
ProtocolError: if a fragmented message is in progress.
"""
if self.expect_continuation_frame:
raise ProtocolError("expected a continuation frame")
self.expect_continuation_frame = not fin
self.send_frame(Frame(OP_BINARY, data, fin))
def send_close(self, code: Optional[int] = None, reason: str = "") -> None:
"""
Send a `Close frame`_.
.. _Close frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.1
Parameters:
code: close code.
reason: close reason.
Raises:
ProtocolError: if a fragmented message is being sent, if the code
isn't valid, or if a reason is provided without a code
"""
if self.expect_continuation_frame:
raise ProtocolError("expected a continuation frame")
if code is None:
if reason != "":
raise ProtocolError("cannot send a reason without a code")
close = Close(1005, "")
data = b""
else:
close = Close(code, reason)
data = close.serialize()
# send_frame() guarantees that self.state is OPEN at this point.
# 7.1.3. The WebSocket Closing Handshake is Started
self.send_frame(Frame(OP_CLOSE, data))
self.close_sent = close
self.state = CLOSING
def send_ping(self, data: bytes) -> None:
"""
Send a `Ping frame`_.
.. _Ping frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2
Parameters:
data: payload containing arbitrary binary data.
"""
self.send_frame(Frame(OP_PING, data))
def send_pong(self, data: bytes) -> None:
"""
Send a `Pong frame`_.
.. _Pong frame:
https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3
Parameters:
data: payload containing arbitrary binary data.
"""
self.send_frame(Frame(OP_PONG, data))
def fail(self, code: int, reason: str = "") -> None:
"""
`Fail the WebSocket connection`_.
.. _Fail the WebSocket connection:
https://datatracker.ietf.org/doc/html/rfc6455#section-7.1.7
Parameters:
code: close code
reason: close reason
Raises:
ProtocolError: if the code isn't valid.
"""
# 7.1.7. Fail the WebSocket Connection
# Send a close frame when the state is OPEN (a close frame was already
# sent if it's CLOSING), except when failing the connection because
# of an error reading from or writing to the network.
if self.state is OPEN:
if code != 1006:
close = Close(code, reason)
data = close.serialize()
self.send_frame(Frame(OP_CLOSE, data))
self.close_sent = close
self.state = CLOSING
# When failing the connection, a server closes the TCP connection
# without waiting for the client to complete the handshake, while a
# client waits for the server to close the TCP connection, possibly
# after sending a close frame that the client will ignore.
if self.side is SERVER and not self.eof_sent:
self.send_eof()
# 7.1.7. Fail the WebSocket Connection "An endpoint MUST NOT continue
# to attempt to process data(including a responding Close frame) from
# the remote endpoint after being instructed to _Fail the WebSocket
# Connection_."
self.parser = self.discard()
next(self.parser) # start coroutine
# Public method for getting incoming events after receiving data.
def events_received(self) -> List[Event]:
"""
Fetch events generated from data received from the network.
Call this method immediately after any of the ``receive_*()`` methods.
Process resulting events, likely by passing them to the application.
Returns:
List[Event]: Events read from the connection.
"""
events, self.events = self.events, []
return events
# Public method for getting outgoing data after receiving data or sending events.
def data_to_send(self) -> List[bytes]:
"""
Obtain data to send to the network.
Call this method immediately after any of the ``receive_*()``,
``send_*()``, or :meth:`fail` methods.
Write resulting data to the connection.
The empty bytestring :data:`~websockets.connection.SEND_EOF` signals
the end of the data stream. When you receive it, half-close the TCP
connection.
Returns:
List[bytes]: Data to write to the connection.
"""
writes, self.writes = self.writes, []
return writes
def close_expected(self) -> bool:
"""
Tell if the TCP connection is expected to close soon.
Call this method immediately after any of the ``receive_*()`` or
:meth:`fail` methods.
If it returns :obj:`True`, schedule closing the TCP connection after a
short timeout if the other side hasn't already closed it.
Returns:
bool: Whether the TCP connection is expected to close soon.
"""
# We expect a TCP close if and only if we sent a close frame:
# * Normal closure: once we send a close frame, we expect a TCP close:
# server waits for client to complete the TCP closing handshake;
# client waits for server to initiate the TCP closing handshake.
# * Abnormal closure: we always send a close frame and the same logic
# applies, except on EOFError where we don't send a close frame
# because we already received the TCP close, so we don't expect it.
# We already got a TCP Close if and only if the state is CLOSED.
return self.state is CLOSING or self.handshake_exc is not None
# Private methods for receiving data.
def parse(self) -> Generator[None, None, None]:
"""
Parse incoming data into frames.
:meth:`receive_data` and :meth:`receive_eof` run this generator
coroutine until it needs more data or reaches EOF.
"""
try:
while True:
if (yield from self.reader.at_eof()):
if self.debug:
self.logger.debug("< EOF")
# If the WebSocket connection is closed cleanly, with a
# closing handhshake, recv_frame() substitutes parse()
# with discard(). This branch is reached only when the
# connection isn't closed cleanly.
raise EOFError("unexpected end of stream")
if self.max_size is None:
max_size = None
elif self.cur_size is None:
max_size = self.max_size
else:
max_size = self.max_size - self.cur_size
# During a normal closure, execution ends here on the next
# iteration of the loop after receiving a close frame. At
# this point, recv_frame() replaced parse() by discard().
frame = yield from Frame.parse(
self.reader.read_exact,
mask=self.side is SERVER,
max_size=max_size,
extensions=self.extensions,
)
if self.debug:
self.logger.debug("< %s", frame)
self.recv_frame(frame)
except ProtocolError as exc:
self.fail(1002, str(exc))
self.parser_exc = exc
except EOFError as exc:
self.fail(1006, str(exc))
self.parser_exc = exc
except UnicodeDecodeError as exc:
self.fail(1007, f"{exc.reason} at position {exc.start}")
self.parser_exc = exc
except PayloadTooBig as exc:
self.fail(1009, str(exc))
self.parser_exc = exc
except Exception as exc:
self.logger.error("parser failed", exc_info=True)
# Don't include exception details, which may be security-sensitive.
self.fail(1011)
self.parser_exc = exc
# During an abnormal closure, execution ends here after catching an
# exception. At this point, fail() replaced parse() by discard().
yield
raise AssertionError("parse() shouldn't step after error") # pragma: no cover
def discard(self) -> Generator[None, None, None]:
"""
Discard incoming data.
This coroutine replaces :meth:`parse`:
- after receiving a close frame, during a normal closure (1.4);
- after sending a close frame, during an abnormal closure (7.1.7).
"""
# The server close the TCP connection in the same circumstances where
# discard() replaces parse(). The client closes the connection later,
# after the server closes the connection or a timeout elapses.
# (The latter case cannot be handled in this Sans-I/O layer.)
assert (self.side is SERVER) == (self.eof_sent)
while not (yield from self.reader.at_eof()):
self.reader.discard()
if self.debug:
self.logger.debug("< EOF")
# A server closes the TCP connection immediately, while a client
# waits for the server to close the TCP connection.
if self.side is CLIENT:
self.send_eof()
self.state = CLOSED
# If discard() completes normally, execution ends here.
yield
# Once the reader reaches EOF, its feed_data/eof() methods raise an
# error, so our receive_data/eof() methods don't step the generator.
raise AssertionError("discard() shouldn't step after EOF") # pragma: no cover
def recv_frame(self, frame: Frame) -> None:
"""
Process an incoming frame.
"""
if frame.opcode is OP_TEXT or frame.opcode is OP_BINARY:
if self.cur_size is not None:
raise ProtocolError("expected a continuation frame")
if frame.fin:
self.cur_size = None
else:
self.cur_size = len(frame.data)
elif frame.opcode is OP_CONT:
if self.cur_size is None:
raise ProtocolError("unexpected continuation frame")
if frame.fin:
self.cur_size = None
else:
self.cur_size += len(frame.data)
elif frame.opcode is OP_PING:
# 5.5.2. Ping: "Upon receipt of a Ping frame, an endpoint MUST
# send a Pong frame in response"
pong_frame = Frame(OP_PONG, frame.data)
self.send_frame(pong_frame)
elif frame.opcode is OP_PONG:
# 5.5.3 Pong: "A response to an unsolicited Pong frame is not
# expected."
pass
elif frame.opcode is OP_CLOSE:
# 7.1.5. The WebSocket Connection Close Code
# 7.1.6. The WebSocket Connection Close Reason
self.close_rcvd = Close.parse(frame.data)
if self.state is CLOSING:
assert self.close_sent is not None
self.close_rcvd_then_sent = False
if self.cur_size is not None:
raise ProtocolError("incomplete fragmented message")
# 5.5.1 Close: "If an endpoint receives a Close frame and did
# not previously send a Close frame, the endpoint MUST send a
# Close frame in response. (When sending a Close frame in
# response, the endpoint typically echos the status code it
# received.)"
if self.state is OPEN:
# Echo the original data instead of re-serializing it with
# Close.serialize() because that fails when the close frame
# is empty and Close.parse() synthetizes a 1005 close code.
# The rest is identical to send_close().
self.send_frame(Frame(OP_CLOSE, frame.data))
self.close_sent = self.close_rcvd
self.close_rcvd_then_sent = True
self.state = CLOSING
# 7.1.2. Start the WebSocket Closing Handshake: "Once an
# endpoint has both sent and received a Close control frame,
# that endpoint SHOULD _Close the WebSocket Connection_"
# A server closes the TCP connection immediately, while a client
# waits for the server to close the TCP connection.
if self.side is SERVER:
self.send_eof()
# 1.4. Closing Handshake: "after receiving a control frame
# indicating the connection should be closed, a peer discards
# any further data received."
self.parser = self.discard()
next(self.parser) # start coroutine
else: # pragma: no cover
# This can't happen because Frame.parse() validates opcodes.
raise AssertionError(f"unexpected opcode: {frame.opcode:02x}")
self.events.append(frame)
# Private methods for sending events.
def send_frame(self, frame: Frame) -> None:
if self.state is not OPEN:
raise InvalidState(
f"cannot write to a WebSocket in the {self.state.name} state"
)
if self.debug:
self.logger.debug("> %s", frame)
self.writes.append(
frame.serialize(mask=self.side is CLIENT, extensions=self.extensions)
)
def send_eof(self) -> None:
assert not self.eof_sent
self.eof_sent = True
if self.debug:
self.logger.debug("> EOF")
self.writes.append(SEND_EOF)
| 23,665 | Python | 32.664296 | 86 | 0.587661 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/exceptions.py | """
:mod:`websockets.exceptions` defines the following exception hierarchy:
* :exc:`WebSocketException`
* :exc:`ConnectionClosed`
* :exc:`ConnectionClosedError`
* :exc:`ConnectionClosedOK`
* :exc:`InvalidHandshake`
* :exc:`SecurityError`
* :exc:`InvalidMessage`
* :exc:`InvalidHeader`
* :exc:`InvalidHeaderFormat`
* :exc:`InvalidHeaderValue`
* :exc:`InvalidOrigin`
* :exc:`InvalidUpgrade`
* :exc:`InvalidStatus`
* :exc:`InvalidStatusCode` (legacy)
* :exc:`NegotiationError`
* :exc:`DuplicateParameter`
* :exc:`InvalidParameterName`
* :exc:`InvalidParameterValue`
* :exc:`AbortHandshake`
* :exc:`RedirectHandshake`
* :exc:`InvalidState`
* :exc:`InvalidURI`
* :exc:`PayloadTooBig`
* :exc:`ProtocolError`
"""
from __future__ import annotations
import http
from typing import Optional
from . import datastructures, frames, http11
__all__ = [
"WebSocketException",
"ConnectionClosed",
"ConnectionClosedError",
"ConnectionClosedOK",
"InvalidHandshake",
"SecurityError",
"InvalidMessage",
"InvalidHeader",
"InvalidHeaderFormat",
"InvalidHeaderValue",
"InvalidOrigin",
"InvalidUpgrade",
"InvalidStatus",
"InvalidStatusCode",
"NegotiationError",
"DuplicateParameter",
"InvalidParameterName",
"InvalidParameterValue",
"AbortHandshake",
"RedirectHandshake",
"InvalidState",
"InvalidURI",
"PayloadTooBig",
"ProtocolError",
"WebSocketProtocolError",
]
class WebSocketException(Exception):
"""
Base class for all exceptions defined by websockets.
"""
class ConnectionClosed(WebSocketException):
"""
Raised when trying to interact with a closed connection.
Attributes:
rcvd (Optional[Close]): if a close frame was received, its code and
reason are available in ``rcvd.code`` and ``rcvd.reason``.
sent (Optional[Close]): if a close frame was sent, its code and reason
are available in ``sent.code`` and ``sent.reason``.
rcvd_then_sent (Optional[bool]): if close frames were received and
sent, this attribute tells in which order this happened, from the
perspective of this side of the connection.
"""
def __init__(
self,
rcvd: Optional[frames.Close],
sent: Optional[frames.Close],
rcvd_then_sent: Optional[bool] = None,
) -> None:
self.rcvd = rcvd
self.sent = sent
self.rcvd_then_sent = rcvd_then_sent
def __str__(self) -> str:
if self.rcvd is None:
if self.sent is None:
assert self.rcvd_then_sent is None
return "no close frame received or sent"
else:
assert self.rcvd_then_sent is None
return f"sent {self.sent}; no close frame received"
else:
if self.sent is None:
assert self.rcvd_then_sent is None
return f"received {self.rcvd}; no close frame sent"
else:
assert self.rcvd_then_sent is not None
if self.rcvd_then_sent:
return f"received {self.rcvd}; then sent {self.sent}"
else:
return f"sent {self.sent}; then received {self.rcvd}"
# code and reason attributes are provided for backwards-compatibility
@property
def code(self) -> int:
return 1006 if self.rcvd is None else self.rcvd.code
@property
def reason(self) -> str:
return "" if self.rcvd is None else self.rcvd.reason
class ConnectionClosedError(ConnectionClosed):
"""
Like :exc:`ConnectionClosed`, when the connection terminated with an error.
A close code other than 1000 (OK) or 1001 (going away) was received or
sent, or the closing handshake didn't complete properly.
"""
class ConnectionClosedOK(ConnectionClosed):
"""
Like :exc:`ConnectionClosed`, when the connection terminated properly.
A close code 1000 (OK) or 1001 (going away) was received and sent.
"""
class InvalidHandshake(WebSocketException):
"""
Raised during the handshake when the WebSocket connection fails.
"""
class SecurityError(InvalidHandshake):
"""
Raised when a handshake request or response breaks a security rule.
Security limits are hard coded.
"""
class InvalidMessage(InvalidHandshake):
"""
Raised when a handshake request or response is malformed.
"""
class InvalidHeader(InvalidHandshake):
"""
Raised when a HTTP header doesn't have a valid format or value.
"""
def __init__(self, name: str, value: Optional[str] = None) -> None:
self.name = name
self.value = value
def __str__(self) -> str:
if self.value is None:
return f"missing {self.name} header"
elif self.value == "":
return f"empty {self.name} header"
else:
return f"invalid {self.name} header: {self.value}"
class InvalidHeaderFormat(InvalidHeader):
"""
Raised when a HTTP header cannot be parsed.
The format of the header doesn't match the grammar for that header.
"""
def __init__(self, name: str, error: str, header: str, pos: int) -> None:
super().__init__(name, f"{error} at {pos} in {header}")
class InvalidHeaderValue(InvalidHeader):
"""
Raised when a HTTP header has a wrong value.
The format of the header is correct but a value isn't acceptable.
"""
class InvalidOrigin(InvalidHeader):
"""
Raised when the Origin header in a request isn't allowed.
"""
def __init__(self, origin: Optional[str]) -> None:
super().__init__("Origin", origin)
class InvalidUpgrade(InvalidHeader):
"""
Raised when the Upgrade or Connection header isn't correct.
"""
class InvalidStatus(InvalidHandshake):
"""
Raised when a handshake response rejects the WebSocket upgrade.
"""
def __init__(self, response: http11.Response) -> None:
self.response = response
def __str__(self) -> str:
return (
"server rejected WebSocket connection: "
f"HTTP {self.response.status_code:d}"
)
class InvalidStatusCode(InvalidHandshake):
"""
Raised when a handshake response status code is invalid.
"""
def __init__(self, status_code: int, headers: datastructures.Headers) -> None:
self.status_code = status_code
self.headers = headers
def __str__(self) -> str:
return f"server rejected WebSocket connection: HTTP {self.status_code}"
class NegotiationError(InvalidHandshake):
"""
Raised when negotiating an extension fails.
"""
class DuplicateParameter(NegotiationError):
"""
Raised when a parameter name is repeated in an extension header.
"""
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"duplicate parameter: {self.name}"
class InvalidParameterName(NegotiationError):
"""
Raised when a parameter name in an extension header is invalid.
"""
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"invalid parameter name: {self.name}"
class InvalidParameterValue(NegotiationError):
"""
Raised when a parameter value in an extension header is invalid.
"""
def __init__(self, name: str, value: Optional[str]) -> None:
self.name = name
self.value = value
def __str__(self) -> str:
if self.value is None:
return f"missing value for parameter {self.name}"
elif self.value == "":
return f"empty value for parameter {self.name}"
else:
return f"invalid value for parameter {self.name}: {self.value}"
class AbortHandshake(InvalidHandshake):
"""
Raised to abort the handshake on purpose and return a HTTP response.
This exception is an implementation detail.
The public API
is :meth:`~websockets.server.WebSocketServerProtocol.process_request`.
Attributes:
status (~http.HTTPStatus): HTTP status code.
headers (Headers): HTTP response headers.
body (bytes): HTTP response body.
"""
def __init__(
self,
status: http.HTTPStatus,
headers: datastructures.HeadersLike,
body: bytes = b"",
) -> None:
self.status = status
self.headers = datastructures.Headers(headers)
self.body = body
def __str__(self) -> str:
return (
f"HTTP {self.status:d}, "
f"{len(self.headers)} headers, "
f"{len(self.body)} bytes"
)
class RedirectHandshake(InvalidHandshake):
"""
Raised when a handshake gets redirected.
This exception is an implementation detail.
"""
def __init__(self, uri: str) -> None:
self.uri = uri
def __str__(self) -> str:
return f"redirect to {self.uri}"
class InvalidState(WebSocketException, AssertionError):
"""
Raised when an operation is forbidden in the current state.
This exception is an implementation detail.
It should never be raised in normal circumstances.
"""
class InvalidURI(WebSocketException):
"""
Raised when connecting to an URI that isn't a valid WebSocket URI.
"""
def __init__(self, uri: str, msg: str) -> None:
self.uri = uri
self.msg = msg
def __str__(self) -> str:
return f"{self.uri} isn't a valid URI: {self.msg}"
class PayloadTooBig(WebSocketException):
"""
Raised when receiving a frame with a payload exceeding the maximum size.
"""
class ProtocolError(WebSocketException):
"""
Raised when a frame breaks the protocol.
"""
WebSocketProtocolError = ProtocolError # for backwards compatibility
| 10,049 | Python | 24.18797 | 82 | 0.617872 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/datastructures.py | from __future__ import annotations
import sys
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Tuple,
Union,
)
if sys.version_info[:2] >= (3, 8):
from typing import Protocol
else: # pragma: no cover
Protocol = object # mypy will report errors on Python 3.7.
__all__ = ["Headers", "HeadersLike", "MultipleValuesError"]
class MultipleValuesError(LookupError):
"""
Exception raised when :class:`Headers` has more than one value for a key.
"""
def __str__(self) -> str:
# Implement the same logic as KeyError_str in Objects/exceptions.c.
if len(self.args) == 1:
return repr(self.args[0])
return super().__str__()
class Headers(MutableMapping[str, str]):
"""
Efficient data structure for manipulating HTTP headers.
A :class:`list` of ``(name, values)`` is inefficient for lookups.
A :class:`dict` doesn't suffice because header names are case-insensitive
and multiple occurrences of headers with the same name are possible.
:class:`Headers` stores HTTP headers in a hybrid data structure to provide
efficient insertions and lookups while preserving the original data.
In order to account for multiple values with minimal hassle,
:class:`Headers` follows this logic:
- When getting a header with ``headers[name]``:
- if there's no value, :exc:`KeyError` is raised;
- if there's exactly one value, it's returned;
- if there's more than one value, :exc:`MultipleValuesError` is raised.
- When setting a header with ``headers[name] = value``, the value is
appended to the list of values for that header.
- When deleting a header with ``del headers[name]``, all values for that
header are removed (this is slow).
Other methods for manipulating headers are consistent with this logic.
As long as no header occurs multiple times, :class:`Headers` behaves like
:class:`dict`, except keys are lower-cased to provide case-insensitivity.
Two methods support manipulating multiple values explicitly:
- :meth:`get_all` returns a list of all values for a header;
- :meth:`raw_items` returns an iterator of ``(name, values)`` pairs.
"""
__slots__ = ["_dict", "_list"]
# Like dict, Headers accepts an optional "mapping or iterable" argument.
def __init__(self, *args: HeadersLike, **kwargs: str) -> None:
self._dict: Dict[str, List[str]] = {}
self._list: List[Tuple[str, str]] = []
self.update(*args, **kwargs)
def __str__(self) -> str:
return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._list!r})"
def copy(self) -> Headers:
copy = self.__class__()
copy._dict = self._dict.copy()
copy._list = self._list.copy()
return copy
def serialize(self) -> bytes:
# Since headers only contain ASCII characters, we can keep this simple.
return str(self).encode()
# Collection methods
def __contains__(self, key: object) -> bool:
return isinstance(key, str) and key.lower() in self._dict
def __iter__(self) -> Iterator[str]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
# MutableMapping methods
def __getitem__(self, key: str) -> str:
value = self._dict[key.lower()]
if len(value) == 1:
return value[0]
else:
raise MultipleValuesError(key)
def __setitem__(self, key: str, value: str) -> None:
self._dict.setdefault(key.lower(), []).append(value)
self._list.append((key, value))
def __delitem__(self, key: str) -> None:
key_lower = key.lower()
self._dict.__delitem__(key_lower)
# This is inefficient. Fortunately deleting HTTP headers is uncommon.
self._list = [(k, v) for k, v in self._list if k.lower() != key_lower]
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Headers):
return NotImplemented
return self._dict == other._dict
def clear(self) -> None:
"""
Remove all headers.
"""
self._dict = {}
self._list = []
def update(self, *args: HeadersLike, **kwargs: str) -> None:
"""
Update from a :class:`Headers` instance and/or keyword arguments.
"""
args = tuple(
arg.raw_items() if isinstance(arg, Headers) else arg for arg in args
)
super().update(*args, **kwargs)
# Methods for handling multiple values
def get_all(self, key: str) -> List[str]:
"""
Return the (possibly empty) list of all values for a header.
Args:
key: header name.
"""
return self._dict.get(key.lower(), [])
def raw_items(self) -> Iterator[Tuple[str, str]]:
"""
Return an iterator of all values as ``(name, value)`` pairs.
"""
return iter(self._list)
# copy of _typeshed.SupportsKeysAndGetItem.
class SupportsKeysAndGetItem(Protocol): # pragma: no cover
"""
Dict-like types with ``keys() -> str`` and ``__getitem__(key: str) -> str`` methods.
"""
def keys(self) -> Iterable[str]:
...
def __getitem__(self, key: str) -> str:
...
HeadersLike = Union[
Headers,
Mapping[str, str],
Iterable[Tuple[str, str]],
SupportsKeysAndGetItem,
]
"""
Types accepted where :class:`Headers` is expected.
In addition to :class:`Headers` itself, this includes dict-like types where both
keys and values are :class:`str`.
"""
| 5,738 | Python | 27.552239 | 88 | 0.602475 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/headers.py | from __future__ import annotations
import base64
import binascii
import ipaddress
import re
from typing import Callable, List, Optional, Sequence, Tuple, TypeVar, cast
from . import exceptions
from .typing import (
ConnectionOption,
ExtensionHeader,
ExtensionName,
ExtensionParameter,
Subprotocol,
UpgradeProtocol,
)
__all__ = [
"build_host",
"parse_connection",
"parse_upgrade",
"parse_extension",
"build_extension",
"parse_subprotocol",
"build_subprotocol",
"validate_subprotocols",
"build_www_authenticate_basic",
"parse_authorization_basic",
"build_authorization_basic",
]
T = TypeVar("T")
def build_host(host: str, port: int, secure: bool) -> str:
"""
Build a ``Host`` header.
"""
# https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2.2
# IPv6 addresses must be enclosed in brackets.
try:
address = ipaddress.ip_address(host)
except ValueError:
# host is a hostname
pass
else:
# host is an IP address
if address.version == 6:
host = f"[{host}]"
if port != (443 if secure else 80):
host = f"{host}:{port}"
return host
# To avoid a dependency on a parsing library, we implement manually the ABNF
# described in https://www.rfc-editor.org/rfc/rfc6455.html#section-9.1 and
# https://www.rfc-editor.org/rfc/rfc7230.html#appendix-B.
def peek_ahead(header: str, pos: int) -> Optional[str]:
"""
Return the next character from ``header`` at the given position.
Return :obj:`None` at the end of ``header``.
We never need to peek more than one character ahead.
"""
return None if pos == len(header) else header[pos]
_OWS_re = re.compile(r"[\t ]*")
def parse_OWS(header: str, pos: int) -> int:
"""
Parse optional whitespace from ``header`` at the given position.
Return the new position.
The whitespace itself isn't returned because it isn't significant.
"""
# There's always a match, possibly empty, whose content doesn't matter.
match = _OWS_re.match(header, pos)
assert match is not None
return match.end()
_token_re = re.compile(r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+")
def parse_token(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a token from ``header`` at the given position.
Return the token value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _token_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(header_name, "expected token", header, pos)
return match.group(), match.end()
_quoted_string_re = re.compile(
r'"(?:[\x09\x20-\x21\x23-\x5b\x5d-\x7e]|\\[\x09\x20-\x7e\x80-\xff])*"'
)
_unquote_re = re.compile(r"\\([\x09\x20-\x7e\x80-\xff])")
def parse_quoted_string(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a quoted string from ``header`` at the given position.
Return the unquoted value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _quoted_string_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected quoted string", header, pos
)
return _unquote_re.sub(r"\1", match.group()[1:-1]), match.end()
_quotable_re = re.compile(r"[\x09\x20-\x7e\x80-\xff]*")
_quote_re = re.compile(r"([\x22\x5c])")
def build_quoted_string(value: str) -> str:
"""
Format ``value`` as a quoted string.
This is the reverse of :func:`parse_quoted_string`.
"""
match = _quotable_re.fullmatch(value)
if match is None:
raise ValueError("invalid characters for quoted-string encoding")
return '"' + _quote_re.sub(r"\\\1", value) + '"'
def parse_list(
parse_item: Callable[[str, int, str], Tuple[T, int]],
header: str,
pos: int,
header_name: str,
) -> List[T]:
"""
Parse a comma-separated list from ``header`` at the given position.
This is appropriate for parsing values with the following grammar:
1#item
``parse_item`` parses one item.
``header`` is assumed not to start or end with whitespace.
(This function is designed for parsing an entire header value and
:func:`~websockets.http.read_headers` strips whitespace from values.)
Return a list of items.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Per https://www.rfc-editor.org/rfc/rfc7230.html#section-7, "a recipient
# MUST parse and ignore a reasonable number of empty list elements";
# hence while loops that remove extra delimiters.
# Remove extra delimiters before the first item.
while peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
items = []
while True:
# Loop invariant: a item starts at pos in header.
item, pos = parse_item(header, pos, header_name)
items.append(item)
pos = parse_OWS(header, pos)
# We may have reached the end of the header.
if pos == len(header):
break
# There must be a delimiter after each element except the last one.
if peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
else:
raise exceptions.InvalidHeaderFormat(
header_name, "expected comma", header, pos
)
# Remove extra delimiters before the next item.
while peek_ahead(header, pos) == ",":
pos = parse_OWS(header, pos + 1)
# We may have reached the end of the header.
if pos == len(header):
break
# Since we only advance in the header by one character with peek_ahead()
# or with the end position of a regex match, we can't overshoot the end.
assert pos == len(header)
return items
def parse_connection_option(
header: str, pos: int, header_name: str
) -> Tuple[ConnectionOption, int]:
"""
Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(ConnectionOption, item), pos
def parse_connection(header: str) -> List[ConnectionOption]:
"""
Parse a ``Connection`` header.
Return a list of HTTP connection options.
Args
header: value of the ``Connection`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_connection_option, header, 0, "Connection")
_protocol_re = re.compile(
r"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+(?:/[-!#$%&\'*+.^_`|~0-9a-zA-Z]+)?"
)
def parse_upgrade_protocol(
header: str, pos: int, header_name: str
) -> Tuple[UpgradeProtocol, int]:
"""
Parse an Upgrade protocol from ``header`` at the given position.
Return the protocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _protocol_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected protocol", header, pos
)
return cast(UpgradeProtocol, match.group()), match.end()
def parse_upgrade(header: str) -> List[UpgradeProtocol]:
"""
Parse an ``Upgrade`` header.
Return a list of HTTP protocols.
Args:
header: value of the ``Upgrade`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_upgrade_protocol, header, 0, "Upgrade")
def parse_extension_item_param(
header: str, pos: int, header_name: str
) -> Tuple[ExtensionParameter, int]:
"""
Parse a single extension parameter from ``header`` at the given position.
Return a ``(name, value)`` pair and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Extract parameter name.
name, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
# Extract parameter value, if there is one.
value: Optional[str] = None
if peek_ahead(header, pos) == "=":
pos = parse_OWS(header, pos + 1)
if peek_ahead(header, pos) == '"':
pos_before = pos # for proper error reporting below
value, pos = parse_quoted_string(header, pos, header_name)
# https://www.rfc-editor.org/rfc/rfc6455.html#section-9.1 says:
# the value after quoted-string unescaping MUST conform to
# the 'token' ABNF.
if _token_re.fullmatch(value) is None:
raise exceptions.InvalidHeaderFormat(
header_name, "invalid quoted header content", header, pos_before
)
else:
value, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
return (name, value), pos
def parse_extension_item(
header: str, pos: int, header_name: str
) -> Tuple[ExtensionHeader, int]:
"""
Parse an extension definition from ``header`` at the given position.
Return an ``(extension name, parameters)`` pair, where ``parameters`` is a
list of ``(name, value)`` pairs, and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
# Extract extension name.
name, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
# Extract all parameters.
parameters = []
while peek_ahead(header, pos) == ";":
pos = parse_OWS(header, pos + 1)
parameter, pos = parse_extension_item_param(header, pos, header_name)
parameters.append(parameter)
return (cast(ExtensionName, name), parameters), pos
def parse_extension(header: str) -> List[ExtensionHeader]:
"""
Parse a ``Sec-WebSocket-Extensions`` header.
Return a list of WebSocket extensions and their parameters in this format::
[
(
'extension name',
[
('parameter name', 'parameter value'),
....
]
),
...
]
Parameter values are :obj:`None` when no value is provided.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_extension_item, header, 0, "Sec-WebSocket-Extensions")
parse_extension_list = parse_extension # alias for backwards compatibility
def build_extension_item(
name: ExtensionName, parameters: List[ExtensionParameter]
) -> str:
"""
Build an extension definition.
This is the reverse of :func:`parse_extension_item`.
"""
return "; ".join(
[cast(str, name)]
+ [
# Quoted strings aren't necessary because values are always tokens.
name if value is None else f"{name}={value}"
for name, value in parameters
]
)
def build_extension(extensions: Sequence[ExtensionHeader]) -> str:
"""
Build a ``Sec-WebSocket-Extensions`` header.
This is the reverse of :func:`parse_extension`.
"""
return ", ".join(
build_extension_item(name, parameters) for name, parameters in extensions
)
build_extension_list = build_extension # alias for backwards compatibility
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
"""
Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos
def parse_subprotocol(header: str) -> List[Subprotocol]:
"""
Parse a ``Sec-WebSocket-Protocol`` header.
Return a list of WebSocket subprotocols.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
return parse_list(parse_subprotocol_item, header, 0, "Sec-WebSocket-Protocol")
parse_subprotocol_list = parse_subprotocol # alias for backwards compatibility
def build_subprotocol(subprotocols: Sequence[Subprotocol]) -> str:
"""
Build a ``Sec-WebSocket-Protocol`` header.
This is the reverse of :func:`parse_subprotocol`.
"""
return ", ".join(subprotocols)
build_subprotocol_list = build_subprotocol # alias for backwards compatibility
def validate_subprotocols(subprotocols: Sequence[Subprotocol]) -> None:
"""
Validate that ``subprotocols`` is suitable for :func:`build_subprotocol`.
"""
if not isinstance(subprotocols, Sequence):
raise TypeError("subprotocols must be a list")
if isinstance(subprotocols, str):
raise TypeError("subprotocols must be a list, not a str")
for subprotocol in subprotocols:
if not _token_re.fullmatch(subprotocol):
raise ValueError(f"invalid subprotocol: {subprotocol}")
def build_www_authenticate_basic(realm: str) -> str:
"""
Build a ``WWW-Authenticate`` header for HTTP Basic Auth.
Args:
realm: identifier of the protection space.
"""
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
realm = build_quoted_string(realm)
charset = build_quoted_string("UTF-8")
return f"Basic realm={realm}, charset={charset}"
_token68_re = re.compile(r"[A-Za-z0-9-._~+/]+=*")
def parse_token68(header: str, pos: int, header_name: str) -> Tuple[str, int]:
"""
Parse a token68 from ``header`` at the given position.
Return the token value and the new position.
Raises:
InvalidHeaderFormat: on invalid inputs.
"""
match = _token68_re.match(header, pos)
if match is None:
raise exceptions.InvalidHeaderFormat(
header_name, "expected token68", header, pos
)
return match.group(), match.end()
def parse_end(header: str, pos: int, header_name: str) -> None:
"""
Check that parsing reached the end of header.
"""
if pos < len(header):
raise exceptions.InvalidHeaderFormat(header_name, "trailing data", header, pos)
def parse_authorization_basic(header: str) -> Tuple[str, str]:
"""
Parse an ``Authorization`` header for HTTP Basic Auth.
Return a ``(username, password)`` tuple.
Args:
header: value of the ``Authorization`` header.
Raises:
InvalidHeaderFormat: on invalid inputs.
InvalidHeaderValue: on unsupported inputs.
"""
# https://www.rfc-editor.org/rfc/rfc7235.html#section-2.1
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
scheme, pos = parse_token(header, 0, "Authorization")
if scheme.lower() != "basic":
raise exceptions.InvalidHeaderValue(
"Authorization",
f"unsupported scheme: {scheme}",
)
if peek_ahead(header, pos) != " ":
raise exceptions.InvalidHeaderFormat(
"Authorization", "expected space after scheme", header, pos
)
pos += 1
basic_credentials, pos = parse_token68(header, pos, "Authorization")
parse_end(header, pos, "Authorization")
try:
user_pass = base64.b64decode(basic_credentials.encode()).decode()
except binascii.Error:
raise exceptions.InvalidHeaderValue(
"Authorization",
"expected base64-encoded credentials",
) from None
try:
username, password = user_pass.split(":", 1)
except ValueError:
raise exceptions.InvalidHeaderValue(
"Authorization",
"expected username:password credentials",
) from None
return username, password
def build_authorization_basic(username: str, password: str) -> str:
"""
Build an ``Authorization`` header for HTTP Basic Auth.
This is the reverse of :func:`parse_authorization_basic`.
"""
# https://www.rfc-editor.org/rfc/rfc7617.html#section-2
assert ":" not in username
user_pass = f"{username}:{password}"
basic_credentials = base64.b64encode(user_pass.encode()).decode()
return "Basic " + basic_credentials
| 16,120 | Python | 26.416667 | 88 | 0.628846 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/__init__.py | from __future__ import annotations
from .imports import lazy_import
from .version import version as __version__ # noqa
__all__ = [ # noqa
"AbortHandshake",
"basic_auth_protocol_factory",
"BasicAuthWebSocketServerProtocol",
"broadcast",
"ClientConnection",
"connect",
"ConnectionClosed",
"ConnectionClosedError",
"ConnectionClosedOK",
"Data",
"DuplicateParameter",
"ExtensionName",
"ExtensionParameter",
"InvalidHandshake",
"InvalidHeader",
"InvalidHeaderFormat",
"InvalidHeaderValue",
"InvalidMessage",
"InvalidOrigin",
"InvalidParameterName",
"InvalidParameterValue",
"InvalidState",
"InvalidStatus",
"InvalidStatusCode",
"InvalidUpgrade",
"InvalidURI",
"LoggerLike",
"NegotiationError",
"Origin",
"parse_uri",
"PayloadTooBig",
"ProtocolError",
"RedirectHandshake",
"SecurityError",
"serve",
"ServerConnection",
"Subprotocol",
"unix_connect",
"unix_serve",
"WebSocketClientProtocol",
"WebSocketCommonProtocol",
"WebSocketException",
"WebSocketProtocolError",
"WebSocketServer",
"WebSocketServerProtocol",
"WebSocketURI",
]
lazy_import(
globals(),
aliases={
"auth": ".legacy",
"basic_auth_protocol_factory": ".legacy.auth",
"BasicAuthWebSocketServerProtocol": ".legacy.auth",
"broadcast": ".legacy.protocol",
"ClientConnection": ".client",
"connect": ".legacy.client",
"unix_connect": ".legacy.client",
"WebSocketClientProtocol": ".legacy.client",
"Headers": ".datastructures",
"MultipleValuesError": ".datastructures",
"WebSocketException": ".exceptions",
"ConnectionClosed": ".exceptions",
"ConnectionClosedError": ".exceptions",
"ConnectionClosedOK": ".exceptions",
"InvalidHandshake": ".exceptions",
"SecurityError": ".exceptions",
"InvalidMessage": ".exceptions",
"InvalidHeader": ".exceptions",
"InvalidHeaderFormat": ".exceptions",
"InvalidHeaderValue": ".exceptions",
"InvalidOrigin": ".exceptions",
"InvalidUpgrade": ".exceptions",
"InvalidStatus": ".exceptions",
"InvalidStatusCode": ".exceptions",
"NegotiationError": ".exceptions",
"DuplicateParameter": ".exceptions",
"InvalidParameterName": ".exceptions",
"InvalidParameterValue": ".exceptions",
"AbortHandshake": ".exceptions",
"RedirectHandshake": ".exceptions",
"InvalidState": ".exceptions",
"InvalidURI": ".exceptions",
"PayloadTooBig": ".exceptions",
"ProtocolError": ".exceptions",
"WebSocketProtocolError": ".exceptions",
"protocol": ".legacy",
"WebSocketCommonProtocol": ".legacy.protocol",
"ServerConnection": ".server",
"serve": ".legacy.server",
"unix_serve": ".legacy.server",
"WebSocketServerProtocol": ".legacy.server",
"WebSocketServer": ".legacy.server",
"Data": ".typing",
"LoggerLike": ".typing",
"Origin": ".typing",
"ExtensionHeader": ".typing",
"ExtensionParameter": ".typing",
"Subprotocol": ".typing",
},
deprecated_aliases={
"framing": ".legacy",
"handshake": ".legacy",
"parse_uri": ".uri",
"WebSocketURI": ".uri",
},
)
| 3,436 | Python | 28.886956 | 59 | 0.602736 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/uri.py | from __future__ import annotations
import dataclasses
import urllib.parse
from typing import Optional, Tuple
from . import exceptions
__all__ = ["parse_uri", "WebSocketURI"]
@dataclasses.dataclass
class WebSocketURI:
"""
WebSocket URI.
Attributes:
secure: :obj:`True` for a ``wss`` URI, :obj:`False` for a ``ws`` URI.
host: Normalized to lower case.
port: Always set even if it's the default.
path: May be empty.
query: May be empty if the URI doesn't include a query component.
username: Available when the URI contains `User Information`_.
password: Available when the URI contains `User Information`_.
.. _User Information: https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2.1
"""
secure: bool
host: str
port: int
path: str
query: str
username: Optional[str]
password: Optional[str]
@property
def resource_name(self) -> str:
if self.path:
resource_name = self.path
else:
resource_name = "/"
if self.query:
resource_name += "?" + self.query
return resource_name
@property
def user_info(self) -> Optional[Tuple[str, str]]:
if self.username is None:
return None
assert self.password is not None
return (self.username, self.password)
# All characters from the gen-delims and sub-delims sets in RFC 3987.
DELIMS = ":/?#[]@!$&'()*+,;="
def parse_uri(uri: str) -> WebSocketURI:
"""
Parse and validate a WebSocket URI.
Args:
uri: WebSocket URI.
Returns:
WebSocketURI: Parsed WebSocket URI.
Raises:
InvalidURI: if ``uri`` isn't a valid WebSocket URI.
"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme not in ["ws", "wss"]:
raise exceptions.InvalidURI(uri, "scheme isn't ws or wss")
if parsed.hostname is None:
raise exceptions.InvalidURI(uri, "hostname isn't provided")
if parsed.fragment != "":
raise exceptions.InvalidURI(uri, "fragment identifier is meaningless")
secure = parsed.scheme == "wss"
host = parsed.hostname
port = parsed.port or (443 if secure else 80)
path = parsed.path
query = parsed.query
username = parsed.username
password = parsed.password
# urllib.parse.urlparse accepts URLs with a username but without a
# password. This doesn't make sense for HTTP Basic Auth credentials.
if username is not None and password is None:
raise exceptions.InvalidURI(uri, "username provided without password")
try:
uri.encode("ascii")
except UnicodeEncodeError:
# Input contains non-ASCII characters.
# It must be an IRI. Convert it to a URI.
host = host.encode("idna").decode()
path = urllib.parse.quote(path, safe=DELIMS)
query = urllib.parse.quote(query, safe=DELIMS)
if username is not None:
assert password is not None
username = urllib.parse.quote(username, safe=DELIMS)
password = urllib.parse.quote(password, safe=DELIMS)
return WebSocketURI(secure, host, port, path, query, username, password)
| 3,201 | Python | 28.376147 | 83 | 0.634177 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/version.py | from __future__ import annotations
__all__ = ["tag", "version", "commit"]
# ========= =========== ===================
# release development
# ========= =========== ===================
# tag X.Y X.Y (upcoming)
# version X.Y X.Y.dev1+g5678cde
# commit X.Y 5678cde
# ========= =========== ===================
# When tagging a release, set `released = True`.
# After tagging a release, set `released = False` and increment `tag`.
released = True
tag = version = commit = "10.3"
if not released: # pragma: no cover
import pathlib
import re
import subprocess
def get_version(tag: str) -> str:
# Since setup.py executes the contents of src/websockets/version.py,
# __file__ can point to either of these two files.
file_path = pathlib.Path(__file__)
root_dir = file_path.parents[0 if file_path.name == "setup.py" else 2]
# Read version from git if available. This prevents reading stale
# information from src/websockets.egg-info after building a sdist.
try:
description = subprocess.run(
["git", "describe", "--dirty", "--tags", "--long"],
capture_output=True,
cwd=root_dir,
timeout=1,
check=True,
text=True,
).stdout.strip()
# subprocess.run raises FileNotFoundError if git isn't on $PATH.
except (FileNotFoundError, subprocess.CalledProcessError):
pass
else:
description_re = r"[0-9.]+-([0-9]+)-(g[0-9a-f]{7,}(?:-dirty)?)"
match = re.fullmatch(description_re, description)
assert match is not None
distance, remainder = match.groups()
remainder = remainder.replace("-", ".") # required by PEP 440
return f"{tag}.dev{distance}+{remainder}"
# Read version from package metadata if it is installed.
try:
import importlib.metadata # move up when dropping Python 3.7
return importlib.metadata.version("websockets")
except ImportError:
pass
# Avoid crashing if the development version cannot be determined.
return f"{tag}.dev0+gunknown"
version = get_version(tag)
def get_commit(tag: str, version: str) -> str:
# Extract commit from version, falling back to tag if not available.
version_re = r"[0-9.]+\.dev[0-9]+\+g([0-9a-f]{7,}|unknown)(?:\.dirty)?"
match = re.fullmatch(version_re, version)
assert match is not None
(commit,) = match.groups()
return tag if commit == "unknown" else commit
commit = get_commit(tag, version)
| 2,721 | Python | 33.455696 | 79 | 0.551635 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/server.py | from __future__ import annotations
import base64
import binascii
import email.utils
import http
from typing import Generator, List, Optional, Sequence, Tuple, cast
from .connection import CONNECTING, OPEN, SERVER, Connection, State
from .datastructures import Headers, MultipleValuesError
from .exceptions import (
InvalidHandshake,
InvalidHeader,
InvalidHeaderValue,
InvalidOrigin,
InvalidStatus,
InvalidUpgrade,
NegotiationError,
)
from .extensions import Extension, ServerExtensionFactory
from .headers import (
build_extension,
parse_connection,
parse_extension,
parse_subprotocol,
parse_upgrade,
)
from .http import USER_AGENT
from .http11 import Request, Response
from .typing import (
ConnectionOption,
ExtensionHeader,
LoggerLike,
Origin,
Subprotocol,
UpgradeProtocol,
)
from .utils import accept_key
# See #940 for why lazy_import isn't used here for backwards compatibility.
from .legacy.server import * # isort:skip # noqa
__all__ = ["ServerConnection"]
class ServerConnection(Connection):
"""
Sans-I/O implementation of a WebSocket server connection.
Args:
origins: acceptable values of the ``Origin`` header; include
:obj:`None` in the list if the lack of an origin is acceptable.
This is useful for defending against Cross-Site WebSocket
Hijacking attacks.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of decreasing
preference.
state: initial state of the WebSocket connection.
max_size: maximum size of incoming messages in bytes;
:obj:`None` to disable the limit.
logger: logger for this connection;
defaults to ``logging.getLogger("websockets.client")``;
see the :doc:`logging guide <../topics/logging>` for details.
"""
def __init__(
self,
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
state: State = CONNECTING,
max_size: Optional[int] = 2**20,
logger: Optional[LoggerLike] = None,
):
super().__init__(
side=SERVER,
state=state,
max_size=max_size,
logger=logger,
)
self.origins = origins
self.available_extensions = extensions
self.available_subprotocols = subprotocols
def accept(self, request: Request) -> Response:
"""
Create a handshake response to accept the connection.
If the connection cannot be established, the handshake response
actually rejects the handshake.
You must send the handshake response with :meth:`send_response`.
You can modify it before sending it, for example to add HTTP headers.
Args:
request: WebSocket handshake request event received from the client.
Returns:
Response: WebSocket handshake response event to send to the client.
"""
try:
(
accept_header,
extensions_header,
protocol_header,
) = self.process_request(request)
except InvalidOrigin as exc:
request._exception = exc
self.handshake_exc = exc
if self.debug:
self.logger.debug("! invalid origin", exc_info=True)
return self.reject(
http.HTTPStatus.FORBIDDEN,
f"Failed to open a WebSocket connection: {exc}.\n",
)
except InvalidUpgrade as exc:
request._exception = exc
self.handshake_exc = exc
if self.debug:
self.logger.debug("! invalid upgrade", exc_info=True)
response = self.reject(
http.HTTPStatus.UPGRADE_REQUIRED,
(
f"Failed to open a WebSocket connection: {exc}.\n"
f"\n"
f"You cannot access a WebSocket server directly "
f"with a browser. You need a WebSocket client.\n"
),
)
response.headers["Upgrade"] = "websocket"
return response
except InvalidHandshake as exc:
request._exception = exc
self.handshake_exc = exc
if self.debug:
self.logger.debug("! invalid handshake", exc_info=True)
return self.reject(
http.HTTPStatus.BAD_REQUEST,
f"Failed to open a WebSocket connection: {exc}.\n",
)
except Exception as exc:
request._exception = exc
self.handshake_exc = exc
self.logger.error("opening handshake failed", exc_info=True)
return self.reject(
http.HTTPStatus.INTERNAL_SERVER_ERROR,
(
"Failed to open a WebSocket connection.\n"
"See server log for more information.\n"
),
)
headers = Headers()
headers["Date"] = email.utils.formatdate(usegmt=True)
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept_header
if extensions_header is not None:
headers["Sec-WebSocket-Extensions"] = extensions_header
if protocol_header is not None:
headers["Sec-WebSocket-Protocol"] = protocol_header
headers["Server"] = USER_AGENT
self.logger.info("connection open")
return Response(101, "Switching Protocols", headers)
def process_request(
self, request: Request
) -> Tuple[str, Optional[str], Optional[str]]:
"""
Check a handshake request and negotiate extensions and subprotocol.
This function doesn't verify that the request is an HTTP/1.1 or higher
GET request and doesn't check the ``Host`` header. These controls are
usually performed earlier in the HTTP request handling code. They're
the responsibility of the caller.
Args:
request: WebSocket handshake request received from the client.
Returns:
Tuple[str, Optional[str], Optional[str]]:
``Sec-WebSocket-Accept``, ``Sec-WebSocket-Extensions``, and
``Sec-WebSocket-Protocol`` headers for the handshake response.
Raises:
InvalidHandshake: if the handshake request is invalid;
then the server must return 400 Bad Request error.
"""
headers = request.headers
connection: List[ConnectionOption] = sum(
[parse_connection(value) for value in headers.get_all("Connection")], []
)
if not any(value.lower() == "upgrade" for value in connection):
raise InvalidUpgrade(
"Connection", ", ".join(connection) if connection else None
)
upgrade: List[UpgradeProtocol] = sum(
[parse_upgrade(value) for value in headers.get_all("Upgrade")], []
)
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. The RFC always uses "websocket", except
# in section 11.2. (IANA registration) where it uses "WebSocket".
if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None)
try:
key = headers["Sec-WebSocket-Key"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Key") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Key", "more than one Sec-WebSocket-Key header found"
) from exc
try:
raw_key = base64.b64decode(key.encode(), validate=True)
except binascii.Error as exc:
raise InvalidHeaderValue("Sec-WebSocket-Key", key) from exc
if len(raw_key) != 16:
raise InvalidHeaderValue("Sec-WebSocket-Key", key)
try:
version = headers["Sec-WebSocket-Version"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Version") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Version",
"more than one Sec-WebSocket-Version header found",
) from exc
if version != "13":
raise InvalidHeaderValue("Sec-WebSocket-Version", version)
accept_header = accept_key(key)
self.origin = self.process_origin(headers)
extensions_header, self.extensions = self.process_extensions(headers)
protocol_header = self.subprotocol = self.process_subprotocol(headers)
return (
accept_header,
extensions_header,
protocol_header,
)
def process_origin(self, headers: Headers) -> Optional[Origin]:
"""
Handle the Origin HTTP request header.
Args:
headers: WebSocket handshake request headers.
Returns:
Optional[Origin]: origin, if it is acceptable.
Raises:
InvalidOrigin: if the origin isn't acceptable.
"""
# "The user agent MUST NOT include more than one Origin header field"
# per https://www.rfc-editor.org/rfc/rfc6454.html#section-7.3.
try:
origin = cast(Optional[Origin], headers.get("Origin"))
except MultipleValuesError as exc:
raise InvalidHeader("Origin", "more than one Origin header found") from exc
if self.origins is not None:
if origin not in self.origins:
raise InvalidOrigin(origin)
return origin
def process_extensions(
self,
headers: Headers,
) -> Tuple[Optional[str], List[Extension]]:
"""
Handle the Sec-WebSocket-Extensions HTTP request header.
Accept or reject each extension proposed in the client request.
Negotiate parameters for accepted extensions.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension proposed by
the client, we check for a match with each extension available in the
server configuration. If no match is found, the extension is ignored.
If several variants of the same extension are proposed by the client,
it may be accepted several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
This process doesn't allow the server to reorder extensions. It can
only select a subset of the extensions proposed by the client.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
Args:
headers: WebSocket handshake request headers.
Returns:
Tuple[Optional[str], List[Extension]]: ``Sec-WebSocket-Extensions``
HTTP response header and list of accepted extensions.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
response_header_value: Optional[str] = None
extension_headers: List[ExtensionHeader] = []
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values and self.available_extensions:
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, request_params in parsed_header_values:
for ext_factory in self.available_extensions:
# Skip non-matching extensions based on their name.
if ext_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
response_params, extension = ext_factory.process_request_params(
request_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
extension_headers.append((name, response_params))
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the client sent. The extension is declined.
# Serialize extension header.
if extension_headers:
response_header_value = build_extension(extension_headers)
return response_header_value, accepted_extensions
def process_subprotocol(self, headers: Headers) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP request header.
Args:
headers: WebSocket handshake request headers.
Returns:
Optional[Subprotocol]: Subprotocol, if one was selected; this is
also the value of the ``Sec-WebSocket-Protocol`` response header.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values and self.available_subprotocols:
parsed_header_values: List[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
subprotocol = self.select_subprotocol(
parsed_header_values, self.available_subprotocols
)
return subprotocol
def select_subprotocol(
self,
client_subprotocols: Sequence[Subprotocol],
server_subprotocols: Sequence[Subprotocol],
) -> Optional[Subprotocol]:
"""
Pick a subprotocol among those offered by the client.
If several subprotocols are supported by the client and the server,
the default implementation selects the preferred subprotocols by
giving equal value to the priorities of the client and the server.
If no common subprotocol is supported by the client and the server, it
proceeds without a subprotocol.
This is unlikely to be the most useful implementation in practice, as
many servers providing a subprotocol will require that the client uses
that subprotocol.
Args:
client_subprotocols: list of subprotocols offered by the client.
server_subprotocols: list of subprotocols available on the server.
Returns:
Optional[Subprotocol]: Subprotocol, if a common subprotocol was
found.
"""
subprotocols = set(client_subprotocols) & set(server_subprotocols)
if not subprotocols:
return None
priority = lambda p: (
client_subprotocols.index(p) + server_subprotocols.index(p)
)
return sorted(subprotocols, key=priority)[0]
def reject(
self,
status: http.HTTPStatus,
text: str,
) -> Response:
"""
Create a handshake response to reject the connection.
A short plain text response is the best fallback when failing to
establish a WebSocket connection.
You must send the handshake response with :meth:`send_response`.
You can modify it before sending it, for example to alter HTTP headers.
Args:
status: HTTP status code.
text: HTTP response body; will be encoded to UTF-8.
Returns:
Response: WebSocket handshake response event to send to the client.
"""
body = text.encode()
headers = Headers(
[
("Date", email.utils.formatdate(usegmt=True)),
("Connection", "close"),
("Content-Length", str(len(body))),
("Content-Type", "text/plain; charset=utf-8"),
("Server", USER_AGENT),
]
)
response = Response(status.value, status.phrase, headers, body)
# When reject() is called from accept(), handshake_exc is already set.
# If a user calls reject(), set handshake_exc to guarantee invariant:
# "handshake_exc is None if and only if opening handshake succeded."
if self.handshake_exc is None:
self.handshake_exc = InvalidStatus(response)
self.logger.info("connection failed (%d %s)", status.value, status.phrase)
return response
def send_response(self, response: Response) -> None:
"""
Send a handshake response to the client.
Args:
response: WebSocket handshake response event to send.
"""
if self.debug:
code, phrase = response.status_code, response.reason_phrase
self.logger.debug("> HTTP/1.1 %d %s", code, phrase)
for key, value in response.headers.raw_items():
self.logger.debug("> %s: %s", key, value)
if response.body is not None:
self.logger.debug("> [body] (%d bytes)", len(response.body))
self.writes.append(response.serialize())
if response.status_code == 101:
assert self.state is CONNECTING
self.state = OPEN
else:
self.send_eof()
self.parser = self.discard()
next(self.parser) # start coroutine
def parse(self) -> Generator[None, None, None]:
if self.state is CONNECTING:
request = yield from Request.parse(self.reader.read_line)
if self.debug:
self.logger.debug("< GET %s HTTP/1.1", request.path)
for key, value in request.headers.raw_items():
self.logger.debug("< %s: %s", key, value)
self.events.append(request)
yield from super().parse()
| 18,589 | Python | 34.613027 | 88 | 0.603421 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/http11.py | from __future__ import annotations
import dataclasses
import re
import warnings
from typing import Callable, Generator, Optional
from . import datastructures, exceptions
# Maximum total size of headers is around 256 * 4 KiB = 1 MiB
MAX_HEADERS = 256
# We can use the same limit for the request line and header lines:
# "GET <4096 bytes> HTTP/1.1\r\n" = 4111 bytes
# "Set-Cookie: <4097 bytes>\r\n" = 4111 bytes
# (RFC requires 4096 bytes; for some reason Firefox supports 4097 bytes.)
MAX_LINE = 4111
# Support for HTTP response bodies is intended to read an error message
# returned by a server. It isn't designed to perform large file transfers.
MAX_BODY = 2**20 # 1 MiB
def d(value: bytes) -> str:
"""
Decode a bytestring for interpolating into an error message.
"""
return value.decode(errors="backslashreplace")
# See https://www.rfc-editor.org/rfc/rfc7230.html#appendix-B.
# Regex for validating header names.
_token_re = re.compile(rb"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+")
# Regex for validating header values.
# We don't attempt to support obsolete line folding.
# Include HTAB (\x09), SP (\x20), VCHAR (\x21-\x7e), obs-text (\x80-\xff).
# The ABNF is complicated because it attempts to express that optional
# whitespace is ignored. We strip whitespace and don't revalidate that.
# See also https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
_value_re = re.compile(rb"[\x09\x20-\x7e\x80-\xff]*")
@dataclasses.dataclass
class Request:
"""
WebSocket handshake request.
Attributes:
path: Request path, including optional query.
headers: Request headers.
"""
path: str
headers: datastructures.Headers
# body isn't useful is the context of this library.
_exception: Optional[Exception] = None
@property
def exception(self) -> Optional[Exception]: # pragma: no cover
warnings.warn(
"Request.exception is deprecated; "
"use ServerConnection.handshake_exc instead",
DeprecationWarning,
)
return self._exception
@classmethod
def parse(
cls,
read_line: Callable[[int], Generator[None, None, bytes]],
) -> Generator[None, None, Request]:
"""
Parse a WebSocket handshake request.
This is a generator-based coroutine.
The request path isn't URL-decoded or validated in any way.
The request path and headers are expected to contain only ASCII
characters. Other characters are represented with surrogate escapes.
:meth:`parse` doesn't attempt to read the request body because
WebSocket handshake requests don't have one. If the request contains a
body, it may be read from the data stream after :meth:`parse` returns.
Args:
read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data
Raises:
EOFError: if the connection is closed without a full HTTP request.
SecurityError: if the request exceeds a security limit.
ValueError: if the request isn't well formatted.
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.1.1
# Parsing is simple because fixed values are expected for method and
# version and because path isn't checked. Since WebSocket software tends
# to implement HTTP/1.1 strictly, there's little need for lenient parsing.
try:
request_line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP request line") from exc
try:
method, raw_path, version = request_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP request line: {d(request_line)}") from None
if method != b"GET":
raise ValueError(f"unsupported HTTP method: {d(method)}")
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
path = raw_path.decode("ascii", "surrogateescape")
headers = yield from parse_headers(read_line)
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.3.3
if "Transfer-Encoding" in headers:
raise NotImplementedError("transfer codings aren't supported")
if "Content-Length" in headers:
raise ValueError("unsupported request body")
return cls(path, headers)
def serialize(self) -> bytes:
"""
Serialize a WebSocket handshake request.
"""
# Since the request line and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {self.path} HTTP/1.1\r\n".encode()
request += self.headers.serialize()
return request
@dataclasses.dataclass
class Response:
"""
WebSocket handshake response.
Attributes:
status_code: Response code.
reason_phrase: Response reason.
headers: Response headers.
body: Response body, if any.
"""
status_code: int
reason_phrase: str
headers: datastructures.Headers
body: Optional[bytes] = None
_exception: Optional[Exception] = None
@property
def exception(self) -> Optional[Exception]: # pragma: no cover
warnings.warn(
"Response.exception is deprecated; "
"use ClientConnection.handshake_exc instead",
DeprecationWarning,
)
return self._exception
@classmethod
def parse(
cls,
read_line: Callable[[int], Generator[None, None, bytes]],
read_exact: Callable[[int], Generator[None, None, bytes]],
read_to_eof: Callable[[int], Generator[None, None, bytes]],
) -> Generator[None, None, Response]:
"""
Parse a WebSocket handshake response.
This is a generator-based coroutine.
The reason phrase and headers are expected to contain only ASCII
characters. Other characters are represented with surrogate escapes.
Args:
read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data.
read_exact: generator-based coroutine that reads the requested
bytes or raises an exception if there isn't enough data.
read_to_eof: generator-based coroutine that reads until the end
of the stream.
Raises:
EOFError: if the connection is closed without a full HTTP response.
SecurityError: if the response exceeds a security limit.
LookupError: if the response isn't well formatted.
ValueError: if the response isn't well formatted.
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.1.2
try:
status_line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP status line") from exc
try:
version, raw_status_code, raw_reason = status_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP status line: {d(status_line)}") from None
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
try:
status_code = int(raw_status_code)
except ValueError: # invalid literal for int() with base 10
raise ValueError(
f"invalid HTTP status code: {d(raw_status_code)}"
) from None
if not 100 <= status_code < 1000:
raise ValueError(f"unsupported HTTP status code: {d(raw_status_code)}")
if not _value_re.fullmatch(raw_reason):
raise ValueError(f"invalid HTTP reason phrase: {d(raw_reason)}")
reason = raw_reason.decode()
headers = yield from parse_headers(read_line)
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.3.3
if "Transfer-Encoding" in headers:
raise NotImplementedError("transfer codings aren't supported")
# Since websockets only does GET requests (no HEAD, no CONNECT), all
# responses except 1xx, 204, and 304 include a message body.
if 100 <= status_code < 200 or status_code == 204 or status_code == 304:
body = None
else:
content_length: Optional[int]
try:
# MultipleValuesError is sufficiently unlikely that we don't
# attempt to handle it. Instead we document that its parent
# class, LookupError, may be raised.
raw_content_length = headers["Content-Length"]
except KeyError:
content_length = None
else:
content_length = int(raw_content_length)
if content_length is None:
try:
body = yield from read_to_eof(MAX_BODY)
except RuntimeError:
raise exceptions.SecurityError(
f"body too large: over {MAX_BODY} bytes"
)
elif content_length > MAX_BODY:
raise exceptions.SecurityError(
f"body too large: {content_length} bytes"
)
else:
body = yield from read_exact(content_length)
return cls(status_code, reason, headers, body)
def serialize(self) -> bytes:
"""
Serialize a WebSocket handshake response.
"""
# Since the status line and headers only contain ASCII characters,
# we can keep this simple.
response = f"HTTP/1.1 {self.status_code} {self.reason_phrase}\r\n".encode()
response += self.headers.serialize()
if self.body is not None:
response += self.body
return response
def parse_headers(
read_line: Callable[[int], Generator[None, None, bytes]],
) -> Generator[None, None, datastructures.Headers]:
"""
Parse HTTP headers.
Non-ASCII characters are represented with surrogate escapes.
Args:
read_line: generator-based coroutine that reads a LF-terminated line
or raises an exception if there isn't enough data.
Raises:
EOFError: if the connection is closed without complete headers.
SecurityError: if the request exceeds a security limit.
ValueError: if the request isn't well formatted.
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2
# We don't attempt to support obsolete line folding.
headers = datastructures.Headers()
for _ in range(MAX_HEADERS + 1):
try:
line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP headers") from exc
if line == b"":
break
try:
raw_name, raw_value = line.split(b":", 1)
except ValueError: # not enough values to unpack (expected 2, got 1)
raise ValueError(f"invalid HTTP header line: {d(line)}") from None
if not _token_re.fullmatch(raw_name):
raise ValueError(f"invalid HTTP header name: {d(raw_name)}")
raw_value = raw_value.strip(b" \t")
if not _value_re.fullmatch(raw_value):
raise ValueError(f"invalid HTTP header value: {d(raw_value)}")
name = raw_name.decode("ascii") # guaranteed to be ASCII at this point
value = raw_value.decode("ascii", "surrogateescape")
headers[name] = value
else:
raise exceptions.SecurityError("too many HTTP headers")
return headers
def parse_line(
read_line: Callable[[int], Generator[None, None, bytes]],
) -> Generator[None, None, bytes]:
"""
Parse a single line.
CRLF is stripped from the return value.
Args:
read_line: generator-based coroutine that reads a LF-terminated line
or raises an exception if there isn't enough data.
Raises:
EOFError: if the connection is closed without a CRLF.
SecurityError: if the response exceeds a security limit.
"""
try:
line = yield from read_line(MAX_LINE)
except RuntimeError:
raise exceptions.SecurityError("line too long")
# Not mandatory but safe - https://www.rfc-editor.org/rfc/rfc7230.html#section-3.5
if not line.endswith(b"\r\n"):
raise EOFError("line without CRLF")
return line[:-2]
| 12,690 | Python | 33.580381 | 88 | 0.626005 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/utils.py | from __future__ import annotations
import base64
import hashlib
import secrets
import sys
__all__ = ["accept_key", "apply_mask"]
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def generate_key() -> str:
"""
Generate a random key for the Sec-WebSocket-Key header.
"""
key = secrets.token_bytes(16)
return base64.b64encode(key).decode()
def accept_key(key: str) -> str:
"""
Compute the value of the Sec-WebSocket-Accept header.
Args:
key: value of the Sec-WebSocket-Key header.
"""
sha1 = hashlib.sha1((key + GUID).encode()).digest()
return base64.b64encode(sha1).decode()
def apply_mask(data: bytes, mask: bytes) -> bytes:
"""
Apply masking to the data of a WebSocket message.
Args:
data: data to mask.
mask: 4-bytes mask.
"""
if len(mask) != 4:
raise ValueError("mask must contain 4 bytes")
data_int = int.from_bytes(data, sys.byteorder)
mask_repeated = mask * (len(data) // 4) + mask[: len(data) % 4]
mask_int = int.from_bytes(mask_repeated, sys.byteorder)
return (data_int ^ mask_int).to_bytes(len(data), sys.byteorder)
| 1,150 | Python | 21.134615 | 67 | 0.631304 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/streams.py | from __future__ import annotations
from typing import Generator
class StreamReader:
"""
Generator-based stream reader.
This class doesn't support concurrent calls to :meth:`read_line`,
:meth:`read_exact`, or :meth:`read_to_eof`. Make sure calls are
serialized.
"""
def __init__(self) -> None:
self.buffer = bytearray()
self.eof = False
def read_line(self, m: int) -> Generator[None, None, bytes]:
"""
Read a LF-terminated line from the stream.
This is a generator-based coroutine.
The return value includes the LF character.
Args:
m: maximum number bytes to read; this is a security limit.
Raises:
EOFError: if the stream ends without a LF.
RuntimeError: if the stream ends in more than ``m`` bytes.
"""
n = 0 # number of bytes to read
p = 0 # number of bytes without a newline
while True:
n = self.buffer.find(b"\n", p) + 1
if n > 0:
break
p = len(self.buffer)
if p > m:
raise RuntimeError(f"read {p} bytes, expected no more than {m} bytes")
if self.eof:
raise EOFError(f"stream ends after {p} bytes, before end of line")
yield
if n > m:
raise RuntimeError(f"read {n} bytes, expected no more than {m} bytes")
r = self.buffer[:n]
del self.buffer[:n]
return r
def read_exact(self, n: int) -> Generator[None, None, bytes]:
"""
Read a given number of bytes from the stream.
This is a generator-based coroutine.
Args:
n: how many bytes to read.
Raises:
EOFError: if the stream ends in less than ``n`` bytes.
"""
assert n >= 0
while len(self.buffer) < n:
if self.eof:
p = len(self.buffer)
raise EOFError(f"stream ends after {p} bytes, expected {n} bytes")
yield
r = self.buffer[:n]
del self.buffer[:n]
return r
def read_to_eof(self, m: int) -> Generator[None, None, bytes]:
"""
Read all bytes from the stream.
This is a generator-based coroutine.
Args:
m: maximum number bytes to read; this is a security limit.
Raises:
RuntimeError: if the stream ends in more than ``m`` bytes.
"""
while not self.eof:
p = len(self.buffer)
if p > m:
raise RuntimeError(f"read {p} bytes, expected no more than {m} bytes")
yield
r = self.buffer[:]
del self.buffer[:]
return r
def at_eof(self) -> Generator[None, None, bool]:
"""
Tell whether the stream has ended and all data was read.
This is a generator-based coroutine.
"""
while True:
if self.buffer:
return False
if self.eof:
return True
# When all data was read but the stream hasn't ended, we can't
# tell if until either feed_data() or feed_eof() is called.
yield
def feed_data(self, data: bytes) -> None:
"""
Write data to the stream.
:meth:`feed_data` cannot be called after :meth:`feed_eof`.
Args:
data: data to write.
Raises:
EOFError: if the stream has ended.
"""
if self.eof:
raise EOFError("stream ended")
self.buffer += data
def feed_eof(self) -> None:
"""
End the stream.
:meth:`feed_eof` cannot be called more than once.
Raises:
EOFError: if the stream has ended.
"""
if self.eof:
raise EOFError("stream ended")
self.eof = True
def discard(self) -> None:
"""
Discard all buffered data, but don't end the stream.
"""
del self.buffer[:]
| 4,038 | Python | 25.572368 | 86 | 0.523774 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/auth.py | from __future__ import annotations
# See #940 for why lazy_import isn't used here for backwards compatibility.
from .legacy.auth import * # noqa
| 147 | Python | 28.599994 | 75 | 0.748299 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/client.py | from __future__ import annotations
from typing import Generator, List, Optional, Sequence
from .connection import CLIENT, CONNECTING, OPEN, Connection, State
from .datastructures import Headers, MultipleValuesError
from .exceptions import (
InvalidHandshake,
InvalidHeader,
InvalidHeaderValue,
InvalidStatus,
InvalidUpgrade,
NegotiationError,
)
from .extensions import ClientExtensionFactory, Extension
from .headers import (
build_authorization_basic,
build_extension,
build_host,
build_subprotocol,
parse_connection,
parse_extension,
parse_subprotocol,
parse_upgrade,
)
from .http import USER_AGENT
from .http11 import Request, Response
from .typing import (
ConnectionOption,
ExtensionHeader,
LoggerLike,
Origin,
Subprotocol,
UpgradeProtocol,
)
from .uri import WebSocketURI
from .utils import accept_key, generate_key
# See #940 for why lazy_import isn't used here for backwards compatibility.
from .legacy.client import * # isort:skip # noqa
__all__ = ["ClientConnection"]
class ClientConnection(Connection):
"""
Sans-I/O implementation of a WebSocket client connection.
Args:
wsuri: URI of the WebSocket server, parsed
with :func:`~websockets.uri.parse_uri`.
origin: value of the ``Origin`` header. This is useful when connecting
to a server that validates the ``Origin`` header to defend against
Cross-Site WebSocket Hijacking attacks.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of decreasing
preference.
state: initial state of the WebSocket connection.
max_size: maximum size of incoming messages in bytes;
:obj:`None` to disable the limit.
logger: logger for this connection;
defaults to ``logging.getLogger("websockets.client")``;
see the :doc:`logging guide <../topics/logging>` for details.
"""
def __init__(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
state: State = CONNECTING,
max_size: Optional[int] = 2**20,
logger: Optional[LoggerLike] = None,
):
super().__init__(
side=CLIENT,
state=state,
max_size=max_size,
logger=logger,
)
self.wsuri = wsuri
self.origin = origin
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.key = generate_key()
def connect(self) -> Request: # noqa: F811
"""
Create a handshake request to open a connection.
You must send the handshake request with :meth:`send_request`.
You can modify it before sending it, for example to add HTTP headers.
Returns:
Request: WebSocket handshake request event to send to the server.
"""
headers = Headers()
headers["Host"] = build_host(
self.wsuri.host, self.wsuri.port, self.wsuri.secure
)
if self.wsuri.user_info:
headers["Authorization"] = build_authorization_basic(*self.wsuri.user_info)
if self.origin is not None:
headers["Origin"] = self.origin
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Key"] = self.key
headers["Sec-WebSocket-Version"] = "13"
if self.available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in self.available_extensions
]
)
headers["Sec-WebSocket-Extensions"] = extensions_header
if self.available_subprotocols is not None:
protocol_header = build_subprotocol(self.available_subprotocols)
headers["Sec-WebSocket-Protocol"] = protocol_header
headers["User-Agent"] = USER_AGENT
return Request(self.wsuri.resource_name, headers)
def process_response(self, response: Response) -> None:
"""
Check a handshake response.
Args:
request: WebSocket handshake response received from the server.
Raises:
InvalidHandshake: if the handshake response is invalid.
"""
if response.status_code != 101:
raise InvalidStatus(response)
headers = response.headers
connection: List[ConnectionOption] = sum(
[parse_connection(value) for value in headers.get_all("Connection")], []
)
if not any(value.lower() == "upgrade" for value in connection):
raise InvalidUpgrade(
"Connection", ", ".join(connection) if connection else None
)
upgrade: List[UpgradeProtocol] = sum(
[parse_upgrade(value) for value in headers.get_all("Upgrade")], []
)
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. It's supposed to be 'WebSocket'.
if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None)
try:
s_w_accept = headers["Sec-WebSocket-Accept"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Accept") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Accept",
"more than one Sec-WebSocket-Accept header found",
) from exc
if s_w_accept != accept_key(self.key):
raise InvalidHeaderValue("Sec-WebSocket-Accept", s_w_accept)
self.extensions = self.process_extensions(headers)
self.subprotocol = self.process_subprotocol(headers)
def process_extensions(self, headers: Headers) -> List[Extension]:
"""
Handle the Sec-WebSocket-Extensions HTTP response header.
Check that each extension is supported, as well as its parameters.
:rfc:`6455` leaves the rules up to the specification of each
extension.
To provide this level of flexibility, for each extension accepted by
the server, we check for a match with each extension available in the
client configuration. If no match is found, an exception is raised.
If several variants of the same extension are accepted by the server,
it may be configured several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
Args:
headers: WebSocket handshake response headers.
Returns:
List[Extension]: List of accepted extensions.
Raises:
InvalidHandshake: to abort the handshake.
"""
accepted_extensions: List[Extension] = []
extensions = headers.get_all("Sec-WebSocket-Extensions")
if extensions:
if self.available_extensions is None:
raise InvalidHandshake("no extensions supported")
parsed_extensions: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in extensions], []
)
for name, response_params in parsed_extensions:
for extension_factory in self.available_extensions:
# Skip non-matching extensions based on their name.
if extension_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
extension = extension_factory.process_response_params(
response_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the server sent. Fail the connection.
else:
raise NegotiationError(
f"Unsupported extension: "
f"name = {name}, params = {response_params}"
)
return accepted_extensions
def process_subprotocol(self, headers: Headers) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP response header.
If provided, check that it contains exactly one supported subprotocol.
Args:
headers: WebSocket handshake response headers.
Returns:
Optional[Subprotocol]: Subprotocol, if one was selected.
"""
subprotocol: Optional[Subprotocol] = None
subprotocols = headers.get_all("Sec-WebSocket-Protocol")
if subprotocols:
if self.available_subprotocols is None:
raise InvalidHandshake("no subprotocols supported")
parsed_subprotocols: Sequence[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in subprotocols], []
)
if len(parsed_subprotocols) > 1:
subprotocols_display = ", ".join(parsed_subprotocols)
raise InvalidHandshake(f"multiple subprotocols: {subprotocols_display}")
subprotocol = parsed_subprotocols[0]
if subprotocol not in self.available_subprotocols:
raise NegotiationError(f"unsupported subprotocol: {subprotocol}")
return subprotocol
def send_request(self, request: Request) -> None:
"""
Send a handshake request to the server.
Args:
request: WebSocket handshake request event.
"""
if self.debug:
self.logger.debug("> GET %s HTTP/1.1", request.path)
for key, value in request.headers.raw_items():
self.logger.debug("> %s: %s", key, value)
self.writes.append(request.serialize())
def parse(self) -> Generator[None, None, None]:
if self.state is CONNECTING:
response = yield from Response.parse(
self.reader.read_line,
self.reader.read_exact,
self.reader.read_to_eof,
)
if self.debug:
code, phrase = response.status_code, response.reason_phrase
self.logger.debug("< HTTP/1.1 %d %s", code, phrase)
for key, value in response.headers.raw_items():
self.logger.debug("< %s: %s", key, value)
if response.body is not None:
self.logger.debug("< [body] (%d bytes)", len(response.body))
try:
self.process_response(response)
except InvalidHandshake as exc:
response._exception = exc
self.handshake_exc = exc
self.parser = self.discard()
next(self.parser) # start coroutine
else:
assert self.state is CONNECTING
self.state = OPEN
finally:
self.events.append(response)
yield from super().parse()
| 11,958 | Python | 33.364942 | 88 | 0.600686 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/typing.py | from __future__ import annotations
import logging
from typing import List, NewType, Optional, Tuple, Union
__all__ = [
"Data",
"LoggerLike",
"Origin",
"Subprotocol",
"ExtensionName",
"ExtensionParameter",
]
# Public types used in the signature of public APIs
Data = Union[str, bytes]
"""Types supported in a WebSocket message:
:class:`str` for a Text_ frame, :class:`bytes` for a Binary_.
.. _Text: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
.. _Binary : https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
"""
LoggerLike = Union[logging.Logger, logging.LoggerAdapter]
"""Types accepted where a :class:`~logging.Logger` is expected."""
Origin = NewType("Origin", str)
"""Value of a ``Origin`` header."""
Subprotocol = NewType("Subprotocol", str)
"""Subprotocol in a ``Sec-WebSocket-Protocol`` header."""
ExtensionName = NewType("ExtensionName", str)
"""Name of a WebSocket extension."""
ExtensionParameter = Tuple[str, Optional[str]]
"""Parameter of a WebSocket extension."""
# Private types
ExtensionHeader = Tuple[ExtensionName, List[ExtensionParameter]]
"""Extension in a ``Sec-WebSocket-Extensions`` header."""
ConnectionOption = NewType("ConnectionOption", str)
"""Connection option in a ``Connection`` header."""
UpgradeProtocol = NewType("UpgradeProtocol", str)
"""Upgrade protocol in an ``Upgrade`` header."""
| 1,384 | Python | 21.704918 | 68 | 0.70159 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/__main__.py | from __future__ import annotations
import argparse
import asyncio
import os
import signal
import sys
import threading
from typing import Any, Set
from .exceptions import ConnectionClosed
from .frames import Close
from .legacy.client import connect
from .version import version as websockets_version
if sys.platform == "win32":
def win_enable_vt100() -> None:
"""
Enable VT-100 for console output on Windows.
See also https://bugs.python.org/issue29059.
"""
import ctypes
STD_OUTPUT_HANDLE = ctypes.c_uint(-11)
INVALID_HANDLE_VALUE = ctypes.c_uint(-1)
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x004
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
if handle == INVALID_HANDLE_VALUE:
raise RuntimeError("unable to obtain stdout handle")
cur_mode = ctypes.c_uint()
if ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(cur_mode)) == 0:
raise RuntimeError("unable to query current console mode")
# ctypes ints lack support for the required bit-OR operation.
# Temporarily convert to Py int, do the OR and convert back.
py_int_mode = int.from_bytes(cur_mode, sys.byteorder)
new_mode = ctypes.c_uint(py_int_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
if ctypes.windll.kernel32.SetConsoleMode(handle, new_mode) == 0:
raise RuntimeError("unable to set console mode")
def exit_from_event_loop_thread(
loop: asyncio.AbstractEventLoop,
stop: asyncio.Future[None],
) -> None:
loop.stop()
if not stop.done():
# When exiting the thread that runs the event loop, raise
# KeyboardInterrupt in the main thread to exit the program.
if sys.platform == "win32":
ctrl_c = signal.CTRL_C_EVENT
else:
ctrl_c = signal.SIGINT
os.kill(os.getpid(), ctrl_c)
def print_during_input(string: str) -> None:
sys.stdout.write(
# Save cursor position
"\N{ESC}7"
# Add a new line
"\N{LINE FEED}"
# Move cursor up
"\N{ESC}[A"
# Insert blank line, scroll last line down
"\N{ESC}[L"
# Print string in the inserted blank line
f"{string}\N{LINE FEED}"
# Restore cursor position
"\N{ESC}8"
# Move cursor down
"\N{ESC}[B"
)
sys.stdout.flush()
def print_over_input(string: str) -> None:
sys.stdout.write(
# Move cursor to beginning of line
"\N{CARRIAGE RETURN}"
# Delete current line
"\N{ESC}[K"
# Print string
f"{string}\N{LINE FEED}"
)
sys.stdout.flush()
async def run_client(
uri: str,
loop: asyncio.AbstractEventLoop,
inputs: asyncio.Queue[str],
stop: asyncio.Future[None],
) -> None:
try:
websocket = await connect(uri)
except Exception as exc:
print_over_input(f"Failed to connect to {uri}: {exc}.")
exit_from_event_loop_thread(loop, stop)
return
else:
print_during_input(f"Connected to {uri}.")
try:
while True:
incoming: asyncio.Future[Any] = asyncio.create_task(websocket.recv())
outgoing: asyncio.Future[Any] = asyncio.create_task(inputs.get())
done: Set[asyncio.Future[Any]]
pending: Set[asyncio.Future[Any]]
done, pending = await asyncio.wait(
[incoming, outgoing, stop], return_when=asyncio.FIRST_COMPLETED
)
# Cancel pending tasks to avoid leaking them.
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
except ConnectionClosed:
break
else:
if isinstance(message, str):
print_during_input("< " + message)
else:
print_during_input("< (binary) " + message.hex())
if outgoing in done:
message = outgoing.result()
await websocket.send(message)
if stop in done:
break
finally:
await websocket.close()
assert websocket.close_code is not None and websocket.close_reason is not None
close_status = Close(websocket.close_code, websocket.close_reason)
print_over_input(f"Connection closed: {close_status}.")
exit_from_event_loop_thread(loop, stop)
def main() -> None:
# Parse command line arguments.
parser = argparse.ArgumentParser(
prog="python -m websockets",
description="Interactive WebSocket client.",
add_help=False,
)
group = parser.add_mutually_exclusive_group()
group.add_argument("--version", action="store_true")
group.add_argument("uri", metavar="<uri>", nargs="?")
args = parser.parse_args()
if args.version:
print(f"websockets {websockets_version}")
return
if args.uri is None:
parser.error("the following arguments are required: <uri>")
# If we're on Windows, enable VT100 terminal support.
if sys.platform == "win32":
try:
win_enable_vt100()
except RuntimeError as exc:
sys.stderr.write(
f"Unable to set terminal to VT100 mode. This is only "
f"supported since Win10 anniversary update. Expect "
f"weird symbols on the terminal.\nError: {exc}\n"
)
sys.stderr.flush()
try:
import readline # noqa
except ImportError: # Windows has no `readline` normally
pass
# Create an event loop that will run in a background thread.
loop = asyncio.new_event_loop()
# Due to zealous removal of the loop parameter in the Queue constructor,
# we need a factory coroutine to run in the freshly created event loop.
async def queue_factory() -> asyncio.Queue[str]:
return asyncio.Queue()
# Create a queue of user inputs. There's no need to limit its size.
inputs: asyncio.Queue[str] = loop.run_until_complete(queue_factory())
# Create a stop condition when receiving SIGINT or SIGTERM.
stop: asyncio.Future[None] = loop.create_future()
# Schedule the task that will manage the connection.
loop.create_task(run_client(args.uri, loop, inputs, stop))
# Start the event loop in a background thread.
thread = threading.Thread(target=loop.run_forever)
thread.start()
# Read from stdin in the main thread in order to receive signals.
try:
while True:
# Since there's no size limit, put_nowait is identical to put.
message = input("> ")
loop.call_soon_threadsafe(inputs.put_nowait, message)
except (KeyboardInterrupt, EOFError): # ^C, ^D
loop.call_soon_threadsafe(stop.set_result, None)
# Wait for the event loop to terminate.
thread.join()
# For reasons unclear, even though the loop is closed in the thread,
# it still thinks it's running here.
loop.close()
if __name__ == "__main__":
main()
| 7,255 | Python | 30.411255 | 86 | 0.604824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/frames.py | from __future__ import annotations
import dataclasses
import enum
import io
import secrets
import struct
from typing import Callable, Generator, Optional, Sequence, Tuple
from . import exceptions, extensions
from .typing import Data
try:
from .speedups import apply_mask
except ImportError: # pragma: no cover
from .utils import apply_mask
__all__ = [
"Opcode",
"OP_CONT",
"OP_TEXT",
"OP_BINARY",
"OP_CLOSE",
"OP_PING",
"OP_PONG",
"DATA_OPCODES",
"CTRL_OPCODES",
"Frame",
"prepare_data",
"prepare_ctrl",
"Close",
]
class Opcode(enum.IntEnum):
"""Opcode values for WebSocket frames."""
CONT, TEXT, BINARY = 0x00, 0x01, 0x02
CLOSE, PING, PONG = 0x08, 0x09, 0x0A
OP_CONT = Opcode.CONT
OP_TEXT = Opcode.TEXT
OP_BINARY = Opcode.BINARY
OP_CLOSE = Opcode.CLOSE
OP_PING = Opcode.PING
OP_PONG = Opcode.PONG
DATA_OPCODES = OP_CONT, OP_TEXT, OP_BINARY
CTRL_OPCODES = OP_CLOSE, OP_PING, OP_PONG
# See https://www.iana.org/assignments/websocket/websocket.xhtml
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004 is reserved
1005: "no status code [internal]",
1006: "connection closed abnormally [internal]",
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
1012: "service restart",
1013: "try again later",
1014: "bad gateway",
1015: "TLS failure [internal]",
}
# Close code that are allowed in a close frame.
# Using a set optimizes `code in EXTERNAL_CLOSE_CODES`.
EXTERNAL_CLOSE_CODES = {
1000,
1001,
1002,
1003,
1007,
1008,
1009,
1010,
1011,
1012,
1013,
1014,
}
OK_CLOSE_CODES = {1000, 1001}
BytesLike = bytes, bytearray, memoryview
@dataclasses.dataclass
class Frame:
"""
WebSocket frame.
Attributes:
opcode: Opcode.
data: Payload data.
fin: FIN bit.
rsv1: RSV1 bit.
rsv2: RSV2 bit.
rsv3: RSV3 bit.
Only these fields are needed. The MASK bit, payload length and masking-key
are handled on the fly when parsing and serializing frames.
"""
opcode: Opcode
data: bytes
fin: bool = True
rsv1: bool = False
rsv2: bool = False
rsv3: bool = False
def __str__(self) -> str:
"""
Return a human-readable represention of a frame.
"""
coding = None
length = f"{len(self.data)} byte{'' if len(self.data) == 1 else 's'}"
non_final = "" if self.fin else "continued"
if self.opcode is OP_TEXT:
# Decoding only the beginning and the end is needlessly hard.
# Decode the entire payload then elide later if necessary.
data = repr(self.data.decode())
elif self.opcode is OP_BINARY:
# We'll show at most the first 16 bytes and the last 8 bytes.
# Encode just what we need, plus two dummy bytes to elide later.
binary = self.data
if len(binary) > 25:
binary = b"".join([binary[:16], b"\x00\x00", binary[-8:]])
data = " ".join(f"{byte:02x}" for byte in binary)
elif self.opcode is OP_CLOSE:
data = str(Close.parse(self.data))
elif self.data:
# We don't know if a Continuation frame contains text or binary.
# Ping and Pong frames could contain UTF-8.
# Attempt to decode as UTF-8 and display it as text; fallback to
# binary. If self.data is a memoryview, it has no decode() method,
# which raises AttributeError.
try:
data = repr(self.data.decode())
coding = "text"
except (UnicodeDecodeError, AttributeError):
binary = self.data
if len(binary) > 25:
binary = b"".join([binary[:16], b"\x00\x00", binary[-8:]])
data = " ".join(f"{byte:02x}" for byte in binary)
coding = "binary"
else:
data = "''"
if len(data) > 75:
data = data[:48] + "..." + data[-24:]
metadata = ", ".join(filter(None, [coding, length, non_final]))
return f"{self.opcode.name} {data} [{metadata}]"
@classmethod
def parse(
cls,
read_exact: Callable[[int], Generator[None, None, bytes]],
*,
mask: bool,
max_size: Optional[int] = None,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> Generator[None, None, Frame]:
"""
Parse a WebSocket frame.
This is a generator-based coroutine.
Args:
read_exact: generator-based coroutine that reads the requested
bytes or raises an exception if there isn't enough data.
mask: whether the frame should be masked i.e. whether the read
happens on the server side.
max_size: maximum payload size in bytes.
extensions: list of extensions, applied in reverse order.
Raises:
PayloadTooBig: if the frame's payload size exceeds ``max_size``.
ProtocolError: if the frame contains incorrect values.
"""
# Read the header.
data = yield from read_exact(2)
head1, head2 = struct.unpack("!BB", data)
# While not Pythonic, this is marginally faster than calling bool().
fin = True if head1 & 0b10000000 else False
rsv1 = True if head1 & 0b01000000 else False
rsv2 = True if head1 & 0b00100000 else False
rsv3 = True if head1 & 0b00010000 else False
try:
opcode = Opcode(head1 & 0b00001111)
except ValueError as exc:
raise exceptions.ProtocolError("invalid opcode") from exc
if (True if head2 & 0b10000000 else False) != mask:
raise exceptions.ProtocolError("incorrect masking")
length = head2 & 0b01111111
if length == 126:
data = yield from read_exact(2)
(length,) = struct.unpack("!H", data)
elif length == 127:
data = yield from read_exact(8)
(length,) = struct.unpack("!Q", data)
if max_size is not None and length > max_size:
raise exceptions.PayloadTooBig(
f"over size limit ({length} > {max_size} bytes)"
)
if mask:
mask_bytes = yield from read_exact(4)
# Read the data.
data = yield from read_exact(length)
if mask:
data = apply_mask(data, mask_bytes)
frame = cls(opcode, data, fin, rsv1, rsv2, rsv3)
if extensions is None:
extensions = []
for extension in reversed(extensions):
frame = extension.decode(frame, max_size=max_size)
frame.check()
return frame
def serialize(
self,
*,
mask: bool,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> bytes:
"""
Serialize a WebSocket frame.
Args:
mask: whether the frame should be masked i.e. whether the write
happens on the client side.
extensions: list of extensions, applied in order.
Raises:
ProtocolError: if the frame contains incorrect values.
"""
self.check()
if extensions is None:
extensions = []
for extension in extensions:
self = extension.encode(self)
output = io.BytesIO()
# Prepare the header.
head1 = (
(0b10000000 if self.fin else 0)
| (0b01000000 if self.rsv1 else 0)
| (0b00100000 if self.rsv2 else 0)
| (0b00010000 if self.rsv3 else 0)
| self.opcode
)
head2 = 0b10000000 if mask else 0
length = len(self.data)
if length < 126:
output.write(struct.pack("!BB", head1, head2 | length))
elif length < 65536:
output.write(struct.pack("!BBH", head1, head2 | 126, length))
else:
output.write(struct.pack("!BBQ", head1, head2 | 127, length))
if mask:
mask_bytes = secrets.token_bytes(4)
output.write(mask_bytes)
# Prepare the data.
if mask:
data = apply_mask(self.data, mask_bytes)
else:
data = self.data
output.write(data)
return output.getvalue()
def check(self) -> None:
"""
Check that reserved bits and opcode have acceptable values.
Raises:
ProtocolError: if a reserved bit or the opcode is invalid.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise exceptions.ProtocolError("reserved bits must be 0")
if self.opcode in CTRL_OPCODES:
if len(self.data) > 125:
raise exceptions.ProtocolError("control frame too long")
if not self.fin:
raise exceptions.ProtocolError("fragmented control frame")
def prepare_data(data: Data) -> Tuple[int, bytes]:
"""
Convert a string or byte-like object to an opcode and a bytes-like object.
This function is designed for data frames.
If ``data`` is a :class:`str`, return ``OP_TEXT`` and a :class:`bytes`
object encoding ``data`` in UTF-8.
If ``data`` is a bytes-like object, return ``OP_BINARY`` and a bytes-like
object.
Raises:
TypeError: if ``data`` doesn't have a supported type.
"""
if isinstance(data, str):
return OP_TEXT, data.encode("utf-8")
elif isinstance(data, BytesLike):
return OP_BINARY, data
else:
raise TypeError("data must be str or bytes-like")
def prepare_ctrl(data: Data) -> bytes:
"""
Convert a string or byte-like object to bytes.
This function is designed for ping and pong frames.
If ``data`` is a :class:`str`, return a :class:`bytes` object encoding
``data`` in UTF-8.
If ``data`` is a bytes-like object, return a :class:`bytes` object.
Raises:
TypeError: if ``data`` doesn't have a supported type.
"""
if isinstance(data, str):
return data.encode("utf-8")
elif isinstance(data, BytesLike):
return bytes(data)
else:
raise TypeError("data must be str or bytes-like")
@dataclasses.dataclass
class Close:
"""
Code and reason for WebSocket close frames.
Attributes:
code: Close code.
reason: Close reason.
"""
code: int
reason: str
def __str__(self) -> str:
"""
Return a human-readable represention of a close code and reason.
"""
if 3000 <= self.code < 4000:
explanation = "registered"
elif 4000 <= self.code < 5000:
explanation = "private use"
else:
explanation = CLOSE_CODES.get(self.code, "unknown")
result = f"{self.code} ({explanation})"
if self.reason:
result = f"{result} {self.reason}"
return result
@classmethod
def parse(cls, data: bytes) -> Close:
"""
Parse the payload of a close frame.
Args:
data: payload of the close frame.
Raises:
ProtocolError: if data is ill-formed.
UnicodeDecodeError: if the reason isn't valid UTF-8.
"""
if len(data) >= 2:
(code,) = struct.unpack("!H", data[:2])
reason = data[2:].decode("utf-8")
close = cls(code, reason)
close.check()
return close
elif len(data) == 0:
return cls(1005, "")
else:
raise exceptions.ProtocolError("close frame too short")
def serialize(self) -> bytes:
"""
Serialize the payload of a close frame.
"""
self.check()
return struct.pack("!H", self.code) + self.reason.encode("utf-8")
def check(self) -> None:
"""
Check that the close code has a valid value for a close frame.
Raises:
ProtocolError: if the close code is invalid.
"""
if not (self.code in EXTERNAL_CLOSE_CODES or 3000 <= self.code < 5000):
raise exceptions.ProtocolError("invalid status code")
| 12,381 | Python | 26.887387 | 79 | 0.569986 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/extensions/base.py | from __future__ import annotations
from typing import List, Optional, Sequence, Tuple
from .. import frames
from ..typing import ExtensionName, ExtensionParameter
__all__ = ["Extension", "ClientExtensionFactory", "ServerExtensionFactory"]
class Extension:
"""
Base class for extensions.
"""
name: ExtensionName
"""Extension identifier."""
def decode(
self,
frame: frames.Frame,
*,
max_size: Optional[int] = None,
) -> frames.Frame:
"""
Decode an incoming frame.
Args:
frame (Frame): incoming frame.
max_size: maximum payload size in bytes.
Returns:
Frame: Decoded frame.
Raises:
PayloadTooBig: if decoding the payload exceeds ``max_size``.
"""
def encode(self, frame: frames.Frame) -> frames.Frame:
"""
Encode an outgoing frame.
Args:
frame (Frame): outgoing frame.
Returns:
Frame: Encoded frame.
"""
class ClientExtensionFactory:
"""
Base class for client-side extension factories.
"""
name: ExtensionName
"""Extension identifier."""
def get_request_params(self) -> List[ExtensionParameter]:
"""
Build parameters to send to the server for this extension.
Returns:
List[ExtensionParameter]: Parameters to send to the server.
"""
def process_response_params(
self,
params: Sequence[ExtensionParameter],
accepted_extensions: Sequence[Extension],
) -> Extension:
"""
Process parameters received from the server.
Args:
params (Sequence[ExtensionParameter]): parameters received from
the server for this extension.
accepted_extensions (Sequence[Extension]): list of previously
accepted extensions.
Returns:
Extension: An extension instance.
Raises:
NegotiationError: if parameters aren't acceptable.
"""
class ServerExtensionFactory:
"""
Base class for server-side extension factories.
"""
name: ExtensionName
"""Extension identifier."""
def process_request_params(
self,
params: Sequence[ExtensionParameter],
accepted_extensions: Sequence[Extension],
) -> Tuple[List[ExtensionParameter], Extension]:
"""
Process parameters received from the client.
Args:
params (Sequence[ExtensionParameter]): parameters received from
the client for this extension.
accepted_extensions (Sequence[Extension]): list of previously
accepted extensions.
Returns:
Tuple[List[ExtensionParameter], Extension]: To accept the offer,
parameters to send to the client for this extension and an
extension instance.
Raises:
NegotiationError: to reject the offer, if parameters received from
the client aren't acceptable.
"""
| 3,101 | Python | 23.046511 | 78 | 0.597549 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/extensions/__init__.py | from .base import *
__all__ = ["Extension", "ClientExtensionFactory", "ServerExtensionFactory"]
| 98 | Python | 18.799996 | 75 | 0.714286 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/extensions/permessage_deflate.py | from __future__ import annotations
import dataclasses
import zlib
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from .. import exceptions, frames
from ..typing import ExtensionName, ExtensionParameter
from .base import ClientExtensionFactory, Extension, ServerExtensionFactory
__all__ = [
"PerMessageDeflate",
"ClientPerMessageDeflateFactory",
"enable_client_permessage_deflate",
"ServerPerMessageDeflateFactory",
"enable_server_permessage_deflate",
]
_EMPTY_UNCOMPRESSED_BLOCK = b"\x00\x00\xff\xff"
_MAX_WINDOW_BITS_VALUES = [str(bits) for bits in range(8, 16)]
class PerMessageDeflate(Extension):
"""
Per-Message Deflate extension.
"""
name = ExtensionName("permessage-deflate")
def __init__(
self,
remote_no_context_takeover: bool,
local_no_context_takeover: bool,
remote_max_window_bits: int,
local_max_window_bits: int,
compress_settings: Optional[Dict[Any, Any]] = None,
) -> None:
"""
Configure the Per-Message Deflate extension.
"""
if compress_settings is None:
compress_settings = {}
assert remote_no_context_takeover in [False, True]
assert local_no_context_takeover in [False, True]
assert 8 <= remote_max_window_bits <= 15
assert 8 <= local_max_window_bits <= 15
assert "wbits" not in compress_settings
self.remote_no_context_takeover = remote_no_context_takeover
self.local_no_context_takeover = local_no_context_takeover
self.remote_max_window_bits = remote_max_window_bits
self.local_max_window_bits = local_max_window_bits
self.compress_settings = compress_settings
if not self.remote_no_context_takeover:
self.decoder = zlib.decompressobj(wbits=-self.remote_max_window_bits)
if not self.local_no_context_takeover:
self.encoder = zlib.compressobj(
wbits=-self.local_max_window_bits, **self.compress_settings
)
# To handle continuation frames properly, we must keep track of
# whether that initial frame was encoded.
self.decode_cont_data = False
# There's no need for self.encode_cont_data because we always encode
# outgoing frames, so it would always be True.
def __repr__(self) -> str:
return (
f"PerMessageDeflate("
f"remote_no_context_takeover={self.remote_no_context_takeover}, "
f"local_no_context_takeover={self.local_no_context_takeover}, "
f"remote_max_window_bits={self.remote_max_window_bits}, "
f"local_max_window_bits={self.local_max_window_bits})"
)
def decode(
self,
frame: frames.Frame,
*,
max_size: Optional[int] = None,
) -> frames.Frame:
"""
Decode an incoming frame.
"""
# Skip control frames.
if frame.opcode in frames.CTRL_OPCODES:
return frame
# Handle continuation data frames:
# - skip if the message isn't encoded
# - reset "decode continuation data" flag if it's a final frame
if frame.opcode is frames.OP_CONT:
if not self.decode_cont_data:
return frame
if frame.fin:
self.decode_cont_data = False
# Handle text and binary data frames:
# - skip if the message isn't encoded
# - unset the rsv1 flag on the first frame of a compressed message
# - set "decode continuation data" flag if it's a non-final frame
else:
if not frame.rsv1:
return frame
frame = dataclasses.replace(frame, rsv1=False)
if not frame.fin:
self.decode_cont_data = True
# Re-initialize per-message decoder.
if self.remote_no_context_takeover:
self.decoder = zlib.decompressobj(wbits=-self.remote_max_window_bits)
# Uncompress data. Protect against zip bombs by preventing zlib from
# decompressing more than max_length bytes (except when the limit is
# disabled with max_size = None).
data = frame.data
if frame.fin:
data += _EMPTY_UNCOMPRESSED_BLOCK
max_length = 0 if max_size is None else max_size
try:
data = self.decoder.decompress(data, max_length)
except zlib.error as exc:
raise exceptions.ProtocolError("decompression failed") from exc
if self.decoder.unconsumed_tail:
raise exceptions.PayloadTooBig(f"over size limit (? > {max_size} bytes)")
# Allow garbage collection of the decoder if it won't be reused.
if frame.fin and self.remote_no_context_takeover:
del self.decoder
return dataclasses.replace(frame, data=data)
def encode(self, frame: frames.Frame) -> frames.Frame:
"""
Encode an outgoing frame.
"""
# Skip control frames.
if frame.opcode in frames.CTRL_OPCODES:
return frame
# Since we always encode messages, there's no "encode continuation
# data" flag similar to "decode continuation data" at this time.
if frame.opcode is not frames.OP_CONT:
# Set the rsv1 flag on the first frame of a compressed message.
frame = dataclasses.replace(frame, rsv1=True)
# Re-initialize per-message decoder.
if self.local_no_context_takeover:
self.encoder = zlib.compressobj(
wbits=-self.local_max_window_bits, **self.compress_settings
)
# Compress data.
data = self.encoder.compress(frame.data) + self.encoder.flush(zlib.Z_SYNC_FLUSH)
if frame.fin and data.endswith(_EMPTY_UNCOMPRESSED_BLOCK):
data = data[:-4]
# Allow garbage collection of the encoder if it won't be reused.
if frame.fin and self.local_no_context_takeover:
del self.encoder
return dataclasses.replace(frame, data=data)
def _build_parameters(
server_no_context_takeover: bool,
client_no_context_takeover: bool,
server_max_window_bits: Optional[int],
client_max_window_bits: Optional[Union[int, bool]],
) -> List[ExtensionParameter]:
"""
Build a list of ``(name, value)`` pairs for some compression parameters.
"""
params: List[ExtensionParameter] = []
if server_no_context_takeover:
params.append(("server_no_context_takeover", None))
if client_no_context_takeover:
params.append(("client_no_context_takeover", None))
if server_max_window_bits:
params.append(("server_max_window_bits", str(server_max_window_bits)))
if client_max_window_bits is True: # only in handshake requests
params.append(("client_max_window_bits", None))
elif client_max_window_bits:
params.append(("client_max_window_bits", str(client_max_window_bits)))
return params
def _extract_parameters(
params: Sequence[ExtensionParameter], *, is_server: bool
) -> Tuple[bool, bool, Optional[int], Optional[Union[int, bool]]]:
"""
Extract compression parameters from a list of ``(name, value)`` pairs.
If ``is_server`` is :obj:`True`, ``client_max_window_bits`` may be
provided without a value. This is only allowed in handshake requests.
"""
server_no_context_takeover: bool = False
client_no_context_takeover: bool = False
server_max_window_bits: Optional[int] = None
client_max_window_bits: Optional[Union[int, bool]] = None
for name, value in params:
if name == "server_no_context_takeover":
if server_no_context_takeover:
raise exceptions.DuplicateParameter(name)
if value is None:
server_no_context_takeover = True
else:
raise exceptions.InvalidParameterValue(name, value)
elif name == "client_no_context_takeover":
if client_no_context_takeover:
raise exceptions.DuplicateParameter(name)
if value is None:
client_no_context_takeover = True
else:
raise exceptions.InvalidParameterValue(name, value)
elif name == "server_max_window_bits":
if server_max_window_bits is not None:
raise exceptions.DuplicateParameter(name)
if value in _MAX_WINDOW_BITS_VALUES:
server_max_window_bits = int(value)
else:
raise exceptions.InvalidParameterValue(name, value)
elif name == "client_max_window_bits":
if client_max_window_bits is not None:
raise exceptions.DuplicateParameter(name)
if is_server and value is None: # only in handshake requests
client_max_window_bits = True
elif value in _MAX_WINDOW_BITS_VALUES:
client_max_window_bits = int(value)
else:
raise exceptions.InvalidParameterValue(name, value)
else:
raise exceptions.InvalidParameterName(name)
return (
server_no_context_takeover,
client_no_context_takeover,
server_max_window_bits,
client_max_window_bits,
)
class ClientPerMessageDeflateFactory(ClientExtensionFactory):
"""
Client-side extension factory for the Per-Message Deflate extension.
Parameters behave as described in `section 7.1 of RFC 7692`_.
.. _section 7.1 of RFC 7692: https://www.rfc-editor.org/rfc/rfc7692.html#section-7.1
Set them to :obj:`True` to include them in the negotiation offer without a
value or to an integer value to include them with this value.
Args:
server_no_context_takeover: prevent server from using context takeover.
client_no_context_takeover: prevent client from using context takeover.
server_max_window_bits: maximum size of the server's LZ77 sliding window
in bits, between 8 and 15.
client_max_window_bits: maximum size of the client's LZ77 sliding window
in bits, between 8 and 15, or :obj:`True` to indicate support without
setting a limit.
compress_settings: additional keyword arguments for :func:`zlib.compressobj`,
excluding ``wbits``.
"""
name = ExtensionName("permessage-deflate")
def __init__(
self,
server_no_context_takeover: bool = False,
client_no_context_takeover: bool = False,
server_max_window_bits: Optional[int] = None,
client_max_window_bits: Optional[Union[int, bool]] = True,
compress_settings: Optional[Dict[str, Any]] = None,
) -> None:
"""
Configure the Per-Message Deflate extension factory.
"""
if not (server_max_window_bits is None or 8 <= server_max_window_bits <= 15):
raise ValueError("server_max_window_bits must be between 8 and 15")
if not (
client_max_window_bits is None
or client_max_window_bits is True
or 8 <= client_max_window_bits <= 15
):
raise ValueError("client_max_window_bits must be between 8 and 15")
if compress_settings is not None and "wbits" in compress_settings:
raise ValueError(
"compress_settings must not include wbits, "
"set client_max_window_bits instead"
)
self.server_no_context_takeover = server_no_context_takeover
self.client_no_context_takeover = client_no_context_takeover
self.server_max_window_bits = server_max_window_bits
self.client_max_window_bits = client_max_window_bits
self.compress_settings = compress_settings
def get_request_params(self) -> List[ExtensionParameter]:
"""
Build request parameters.
"""
return _build_parameters(
self.server_no_context_takeover,
self.client_no_context_takeover,
self.server_max_window_bits,
self.client_max_window_bits,
)
def process_response_params(
self,
params: Sequence[ExtensionParameter],
accepted_extensions: Sequence[Extension],
) -> PerMessageDeflate:
"""
Process response parameters.
Return an extension instance.
"""
if any(other.name == self.name for other in accepted_extensions):
raise exceptions.NegotiationError(f"received duplicate {self.name}")
# Request parameters are available in instance variables.
# Load response parameters in local variables.
(
server_no_context_takeover,
client_no_context_takeover,
server_max_window_bits,
client_max_window_bits,
) = _extract_parameters(params, is_server=False)
# After comparing the request and the response, the final
# configuration must be available in the local variables.
# server_no_context_takeover
#
# Req. Resp. Result
# ------ ------ --------------------------------------------------
# False False False
# False True True
# True False Error!
# True True True
if self.server_no_context_takeover:
if not server_no_context_takeover:
raise exceptions.NegotiationError("expected server_no_context_takeover")
# client_no_context_takeover
#
# Req. Resp. Result
# ------ ------ --------------------------------------------------
# False False False
# False True True
# True False True - must change value
# True True True
if self.client_no_context_takeover:
if not client_no_context_takeover:
client_no_context_takeover = True
# server_max_window_bits
# Req. Resp. Result
# ------ ------ --------------------------------------------------
# None None None
# None 8≤M≤15 M
# 8≤N≤15 None Error!
# 8≤N≤15 8≤M≤N M
# 8≤N≤15 N<M≤15 Error!
if self.server_max_window_bits is None:
pass
else:
if server_max_window_bits is None:
raise exceptions.NegotiationError("expected server_max_window_bits")
elif server_max_window_bits > self.server_max_window_bits:
raise exceptions.NegotiationError("unsupported server_max_window_bits")
# client_max_window_bits
# Req. Resp. Result
# ------ ------ --------------------------------------------------
# None None None
# None 8≤M≤15 Error!
# True None None
# True 8≤M≤15 M
# 8≤N≤15 None N - must change value
# 8≤N≤15 8≤M≤N M
# 8≤N≤15 N<M≤15 Error!
if self.client_max_window_bits is None:
if client_max_window_bits is not None:
raise exceptions.NegotiationError("unexpected client_max_window_bits")
elif self.client_max_window_bits is True:
pass
else:
if client_max_window_bits is None:
client_max_window_bits = self.client_max_window_bits
elif client_max_window_bits > self.client_max_window_bits:
raise exceptions.NegotiationError("unsupported client_max_window_bits")
return PerMessageDeflate(
server_no_context_takeover, # remote_no_context_takeover
client_no_context_takeover, # local_no_context_takeover
server_max_window_bits or 15, # remote_max_window_bits
client_max_window_bits or 15, # local_max_window_bits
self.compress_settings,
)
def enable_client_permessage_deflate(
extensions: Optional[Sequence[ClientExtensionFactory]],
) -> Sequence[ClientExtensionFactory]:
"""
Enable Per-Message Deflate with default settings in client extensions.
If the extension is already present, perhaps with non-default settings,
the configuration isn't changed.
"""
if extensions is None:
extensions = []
if not any(
extension_factory.name == ClientPerMessageDeflateFactory.name
for extension_factory in extensions
):
extensions = list(extensions) + [
ClientPerMessageDeflateFactory(
compress_settings={"memLevel": 5},
)
]
return extensions
class ServerPerMessageDeflateFactory(ServerExtensionFactory):
"""
Server-side extension factory for the Per-Message Deflate extension.
Parameters behave as described in `section 7.1 of RFC 7692`_.
.. _section 7.1 of RFC 7692: https://www.rfc-editor.org/rfc/rfc7692.html#section-7.1
Set them to :obj:`True` to include them in the negotiation offer without a
value or to an integer value to include them with this value.
Args:
server_no_context_takeover: prevent server from using context takeover.
client_no_context_takeover: prevent client from using context takeover.
server_max_window_bits: maximum size of the server's LZ77 sliding window
in bits, between 8 and 15.
client_max_window_bits: maximum size of the client's LZ77 sliding window
in bits, between 8 and 15.
compress_settings: additional keyword arguments for :func:`zlib.compressobj`,
excluding ``wbits``.
require_client_max_window_bits: do not enable compression at all if
client doesn't advertise support for ``client_max_window_bits``;
the default behavior is to enable compression without enforcing
``client_max_window_bits``.
"""
name = ExtensionName("permessage-deflate")
def __init__(
self,
server_no_context_takeover: bool = False,
client_no_context_takeover: bool = False,
server_max_window_bits: Optional[int] = None,
client_max_window_bits: Optional[int] = None,
compress_settings: Optional[Dict[str, Any]] = None,
require_client_max_window_bits: bool = False,
) -> None:
"""
Configure the Per-Message Deflate extension factory.
"""
if not (server_max_window_bits is None or 8 <= server_max_window_bits <= 15):
raise ValueError("server_max_window_bits must be between 8 and 15")
if not (client_max_window_bits is None or 8 <= client_max_window_bits <= 15):
raise ValueError("client_max_window_bits must be between 8 and 15")
if compress_settings is not None and "wbits" in compress_settings:
raise ValueError(
"compress_settings must not include wbits, "
"set server_max_window_bits instead"
)
if client_max_window_bits is None and require_client_max_window_bits:
raise ValueError(
"require_client_max_window_bits is enabled, "
"but client_max_window_bits isn't configured"
)
self.server_no_context_takeover = server_no_context_takeover
self.client_no_context_takeover = client_no_context_takeover
self.server_max_window_bits = server_max_window_bits
self.client_max_window_bits = client_max_window_bits
self.compress_settings = compress_settings
self.require_client_max_window_bits = require_client_max_window_bits
def process_request_params(
self,
params: Sequence[ExtensionParameter],
accepted_extensions: Sequence[Extension],
) -> Tuple[List[ExtensionParameter], PerMessageDeflate]:
"""
Process request parameters.
Return response params and an extension instance.
"""
if any(other.name == self.name for other in accepted_extensions):
raise exceptions.NegotiationError(f"skipped duplicate {self.name}")
# Load request parameters in local variables.
(
server_no_context_takeover,
client_no_context_takeover,
server_max_window_bits,
client_max_window_bits,
) = _extract_parameters(params, is_server=True)
# Configuration parameters are available in instance variables.
# After comparing the request and the configuration, the response must
# be available in the local variables.
# server_no_context_takeover
#
# Config Req. Resp.
# ------ ------ --------------------------------------------------
# False False False
# False True True
# True False True - must change value to True
# True True True
if self.server_no_context_takeover:
if not server_no_context_takeover:
server_no_context_takeover = True
# client_no_context_takeover
#
# Config Req. Resp.
# ------ ------ --------------------------------------------------
# False False False
# False True True (or False)
# True False True - must change value to True
# True True True (or False)
if self.client_no_context_takeover:
if not client_no_context_takeover:
client_no_context_takeover = True
# server_max_window_bits
# Config Req. Resp.
# ------ ------ --------------------------------------------------
# None None None
# None 8≤M≤15 M
# 8≤N≤15 None N - must change value
# 8≤N≤15 8≤M≤N M
# 8≤N≤15 N<M≤15 N - must change value
if self.server_max_window_bits is None:
pass
else:
if server_max_window_bits is None:
server_max_window_bits = self.server_max_window_bits
elif server_max_window_bits > self.server_max_window_bits:
server_max_window_bits = self.server_max_window_bits
# client_max_window_bits
# Config Req. Resp.
# ------ ------ --------------------------------------------------
# None None None
# None True None - must change value
# None 8≤M≤15 M (or None)
# 8≤N≤15 None None or Error!
# 8≤N≤15 True N - must change value
# 8≤N≤15 8≤M≤N M (or None)
# 8≤N≤15 N<M≤15 N
if self.client_max_window_bits is None:
if client_max_window_bits is True:
client_max_window_bits = self.client_max_window_bits
else:
if client_max_window_bits is None:
if self.require_client_max_window_bits:
raise exceptions.NegotiationError("required client_max_window_bits")
elif client_max_window_bits is True:
client_max_window_bits = self.client_max_window_bits
elif self.client_max_window_bits < client_max_window_bits:
client_max_window_bits = self.client_max_window_bits
return (
_build_parameters(
server_no_context_takeover,
client_no_context_takeover,
server_max_window_bits,
client_max_window_bits,
),
PerMessageDeflate(
client_no_context_takeover, # remote_no_context_takeover
server_no_context_takeover, # local_no_context_takeover
client_max_window_bits or 15, # remote_max_window_bits
server_max_window_bits or 15, # local_max_window_bits
self.compress_settings,
),
)
def enable_server_permessage_deflate(
extensions: Optional[Sequence[ServerExtensionFactory]],
) -> Sequence[ServerExtensionFactory]:
"""
Enable Per-Message Deflate with default settings in server extensions.
If the extension is already present, perhaps with non-default settings,
the configuration isn't changed.
"""
if extensions is None:
extensions = []
if not any(
ext_factory.name == ServerPerMessageDeflateFactory.name
for ext_factory in extensions
):
extensions = list(extensions) + [
ServerPerMessageDeflateFactory(
server_max_window_bits=12,
client_max_window_bits=12,
compress_settings={"memLevel": 5},
)
]
return extensions
| 24,687 | Python | 36.293051 | 88 | 0.591283 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/http.py | from __future__ import annotations
import asyncio
import re
from typing import Tuple
from ..datastructures import Headers
from ..exceptions import SecurityError
__all__ = ["read_request", "read_response"]
MAX_HEADERS = 256
MAX_LINE = 4110
def d(value: bytes) -> str:
"""
Decode a bytestring for interpolating into an error message.
"""
return value.decode(errors="backslashreplace")
# See https://www.rfc-editor.org/rfc/rfc7230.html#appendix-B.
# Regex for validating header names.
_token_re = re.compile(rb"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+")
# Regex for validating header values.
# We don't attempt to support obsolete line folding.
# Include HTAB (\x09), SP (\x20), VCHAR (\x21-\x7e), obs-text (\x80-\xff).
# The ABNF is complicated because it attempts to express that optional
# whitespace is ignored. We strip whitespace and don't revalidate that.
# See also https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
_value_re = re.compile(rb"[\x09\x20-\x7e\x80-\xff]*")
async def read_request(stream: asyncio.StreamReader) -> Tuple[str, Headers]:
"""
Read an HTTP/1.1 GET request and return ``(path, headers)``.
``path`` isn't URL-decoded or validated in any way.
``path`` and ``headers`` are expected to contain only ASCII characters.
Other characters are represented with surrogate escapes.
:func:`read_request` doesn't attempt to read the request body because
WebSocket handshake requests don't have one. If the request contains a
body, it may be read from ``stream`` after this coroutine returns.
Args:
stream: input to read the request from
Raises:
EOFError: if the connection is closed without a full HTTP request
SecurityError: if the request exceeds a security limit
ValueError: if the request isn't well formatted
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.1.1
# Parsing is simple because fixed values are expected for method and
# version and because path isn't checked. Since WebSocket software tends
# to implement HTTP/1.1 strictly, there's little need for lenient parsing.
try:
request_line = await read_line(stream)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP request line") from exc
try:
method, raw_path, version = request_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP request line: {d(request_line)}") from None
if method != b"GET":
raise ValueError(f"unsupported HTTP method: {d(method)}")
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
path = raw_path.decode("ascii", "surrogateescape")
headers = await read_headers(stream)
return path, headers
async def read_response(stream: asyncio.StreamReader) -> Tuple[int, str, Headers]:
"""
Read an HTTP/1.1 response and return ``(status_code, reason, headers)``.
``reason`` and ``headers`` are expected to contain only ASCII characters.
Other characters are represented with surrogate escapes.
:func:`read_request` doesn't attempt to read the response body because
WebSocket handshake responses don't have one. If the response contains a
body, it may be read from ``stream`` after this coroutine returns.
Args:
stream: input to read the response from
Raises:
EOFError: if the connection is closed without a full HTTP response
SecurityError: if the response exceeds a security limit
ValueError: if the response isn't well formatted
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.1.2
# As in read_request, parsing is simple because a fixed value is expected
# for version, status_code is a 3-digit number, and reason can be ignored.
try:
status_line = await read_line(stream)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP status line") from exc
try:
version, raw_status_code, raw_reason = status_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP status line: {d(status_line)}") from None
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
try:
status_code = int(raw_status_code)
except ValueError: # invalid literal for int() with base 10
raise ValueError(f"invalid HTTP status code: {d(raw_status_code)}") from None
if not 100 <= status_code < 1000:
raise ValueError(f"unsupported HTTP status code: {d(raw_status_code)}")
if not _value_re.fullmatch(raw_reason):
raise ValueError(f"invalid HTTP reason phrase: {d(raw_reason)}")
reason = raw_reason.decode()
headers = await read_headers(stream)
return status_code, reason, headers
async def read_headers(stream: asyncio.StreamReader) -> Headers:
"""
Read HTTP headers from ``stream``.
Non-ASCII characters are represented with surrogate escapes.
"""
# https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2
# We don't attempt to support obsolete line folding.
headers = Headers()
for _ in range(MAX_HEADERS + 1):
try:
line = await read_line(stream)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP headers") from exc
if line == b"":
break
try:
raw_name, raw_value = line.split(b":", 1)
except ValueError: # not enough values to unpack (expected 2, got 1)
raise ValueError(f"invalid HTTP header line: {d(line)}") from None
if not _token_re.fullmatch(raw_name):
raise ValueError(f"invalid HTTP header name: {d(raw_name)}")
raw_value = raw_value.strip(b" \t")
if not _value_re.fullmatch(raw_value):
raise ValueError(f"invalid HTTP header value: {d(raw_value)}")
name = raw_name.decode("ascii") # guaranteed to be ASCII at this point
value = raw_value.decode("ascii", "surrogateescape")
headers[name] = value
else:
raise SecurityError("too many HTTP headers")
return headers
async def read_line(stream: asyncio.StreamReader) -> bytes:
"""
Read a single line from ``stream``.
CRLF is stripped from the return value.
"""
# Security: this is bounded by the StreamReader's limit (default = 32 KiB).
line = await stream.readline()
# Security: this guarantees header values are small (hard-coded = 4 KiB)
if len(line) > MAX_LINE:
raise SecurityError("line too long")
# Not mandatory but safe - https://www.rfc-editor.org/rfc/rfc7230.html#section-3.5
if not line.endswith(b"\r\n"):
raise EOFError("line without CRLF")
return line[:-2]
| 6,928 | Python | 33.30198 | 86 | 0.668736 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/handshake.py | from __future__ import annotations
import base64
import binascii
from typing import List
from ..datastructures import Headers, MultipleValuesError
from ..exceptions import InvalidHeader, InvalidHeaderValue, InvalidUpgrade
from ..headers import parse_connection, parse_upgrade
from ..typing import ConnectionOption, UpgradeProtocol
from ..utils import accept_key as accept, generate_key
__all__ = ["build_request", "check_request", "build_response", "check_response"]
def build_request(headers: Headers) -> str:
"""
Build a handshake request to send to the server.
Update request headers passed in argument.
Args:
headers: handshake request headers.
Returns:
str: ``key`` that must be passed to :func:`check_response`.
"""
key = generate_key()
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Key"] = key
headers["Sec-WebSocket-Version"] = "13"
return key
def check_request(headers: Headers) -> str:
"""
Check a handshake request received from the client.
This function doesn't verify that the request is an HTTP/1.1 or higher GET
request and doesn't perform ``Host`` and ``Origin`` checks. These controls
are usually performed earlier in the HTTP request handling code. They're
the responsibility of the caller.
Args:
headers: handshake request headers.
Returns:
str: ``key`` that must be passed to :func:`build_response`.
Raises:
InvalidHandshake: if the handshake request is invalid;
then the server must return 400 Bad Request error.
"""
connection: List[ConnectionOption] = sum(
[parse_connection(value) for value in headers.get_all("Connection")], []
)
if not any(value.lower() == "upgrade" for value in connection):
raise InvalidUpgrade("Connection", ", ".join(connection))
upgrade: List[UpgradeProtocol] = sum(
[parse_upgrade(value) for value in headers.get_all("Upgrade")], []
)
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. The RFC always uses "websocket", except
# in section 11.2. (IANA registration) where it uses "WebSocket".
if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
raise InvalidUpgrade("Upgrade", ", ".join(upgrade))
try:
s_w_key = headers["Sec-WebSocket-Key"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Key") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Key", "more than one Sec-WebSocket-Key header found"
) from exc
try:
raw_key = base64.b64decode(s_w_key.encode(), validate=True)
except binascii.Error as exc:
raise InvalidHeaderValue("Sec-WebSocket-Key", s_w_key) from exc
if len(raw_key) != 16:
raise InvalidHeaderValue("Sec-WebSocket-Key", s_w_key)
try:
s_w_version = headers["Sec-WebSocket-Version"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Version") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Version", "more than one Sec-WebSocket-Version header found"
) from exc
if s_w_version != "13":
raise InvalidHeaderValue("Sec-WebSocket-Version", s_w_version)
return s_w_key
def build_response(headers: Headers, key: str) -> None:
"""
Build a handshake response to send to the client.
Update response headers passed in argument.
Args:
headers: handshake response headers.
key: returned by :func:`check_request`.
"""
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept(key)
def check_response(headers: Headers, key: str) -> None:
"""
Check a handshake response received from the server.
This function doesn't verify that the response is an HTTP/1.1 or higher
response with a 101 status code. These controls are the responsibility of
the caller.
Args:
headers: handshake response headers.
key: returned by :func:`build_request`.
Raises:
InvalidHandshake: if the handshake response is invalid.
"""
connection: List[ConnectionOption] = sum(
[parse_connection(value) for value in headers.get_all("Connection")], []
)
if not any(value.lower() == "upgrade" for value in connection):
raise InvalidUpgrade("Connection", " ".join(connection))
upgrade: List[UpgradeProtocol] = sum(
[parse_upgrade(value) for value in headers.get_all("Upgrade")], []
)
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. The RFC always uses "websocket", except
# in section 11.2. (IANA registration) where it uses "WebSocket".
if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
raise InvalidUpgrade("Upgrade", ", ".join(upgrade))
try:
s_w_accept = headers["Sec-WebSocket-Accept"]
except KeyError as exc:
raise InvalidHeader("Sec-WebSocket-Accept") from exc
except MultipleValuesError as exc:
raise InvalidHeader(
"Sec-WebSocket-Accept", "more than one Sec-WebSocket-Accept header found"
) from exc
if s_w_accept != accept(key):
raise InvalidHeaderValue("Sec-WebSocket-Accept", s_w_accept)
| 5,476 | Python | 31.993976 | 87 | 0.666545 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/framing.py | from __future__ import annotations
import dataclasses
import struct
from typing import Any, Awaitable, Callable, NamedTuple, Optional, Sequence, Tuple
from .. import extensions, frames
from ..exceptions import PayloadTooBig, ProtocolError
try:
from ..speedups import apply_mask
except ImportError: # pragma: no cover
from ..utils import apply_mask
class Frame(NamedTuple):
fin: bool
opcode: frames.Opcode
data: bytes
rsv1: bool = False
rsv2: bool = False
rsv3: bool = False
@property
def new_frame(self) -> frames.Frame:
return frames.Frame(
self.opcode,
self.data,
self.fin,
self.rsv1,
self.rsv2,
self.rsv3,
)
def __str__(self) -> str:
return str(self.new_frame)
def check(self) -> None:
return self.new_frame.check()
@classmethod
async def read(
cls,
reader: Callable[[int], Awaitable[bytes]],
*,
mask: bool,
max_size: Optional[int] = None,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> Frame:
"""
Read a WebSocket frame.
Args:
reader: coroutine that reads exactly the requested number of
bytes, unless the end of file is reached.
mask: whether the frame should be masked i.e. whether the read
happens on the server side.
max_size: maximum payload size in bytes.
extensions: list of extensions, applied in reverse order.
Raises:
PayloadTooBig: if the frame exceeds ``max_size``.
ProtocolError: if the frame contains incorrect values.
"""
# Read the header.
data = await reader(2)
head1, head2 = struct.unpack("!BB", data)
# While not Pythonic, this is marginally faster than calling bool().
fin = True if head1 & 0b10000000 else False
rsv1 = True if head1 & 0b01000000 else False
rsv2 = True if head1 & 0b00100000 else False
rsv3 = True if head1 & 0b00010000 else False
try:
opcode = frames.Opcode(head1 & 0b00001111)
except ValueError as exc:
raise ProtocolError("invalid opcode") from exc
if (True if head2 & 0b10000000 else False) != mask:
raise ProtocolError("incorrect masking")
length = head2 & 0b01111111
if length == 126:
data = await reader(2)
(length,) = struct.unpack("!H", data)
elif length == 127:
data = await reader(8)
(length,) = struct.unpack("!Q", data)
if max_size is not None and length > max_size:
raise PayloadTooBig(f"over size limit ({length} > {max_size} bytes)")
if mask:
mask_bits = await reader(4)
# Read the data.
data = await reader(length)
if mask:
data = apply_mask(data, mask_bits)
new_frame = frames.Frame(opcode, data, fin, rsv1, rsv2, rsv3)
if extensions is None:
extensions = []
for extension in reversed(extensions):
new_frame = extension.decode(new_frame, max_size=max_size)
new_frame.check()
return cls(
new_frame.fin,
new_frame.opcode,
new_frame.data,
new_frame.rsv1,
new_frame.rsv2,
new_frame.rsv3,
)
def write(
self,
write: Callable[[bytes], Any],
*,
mask: bool,
extensions: Optional[Sequence[extensions.Extension]] = None,
) -> None:
"""
Write a WebSocket frame.
Args:
frame: frame to write.
write: function that writes bytes.
mask: whether the frame should be masked i.e. whether the write
happens on the client side.
extensions: list of extensions, applied in order.
Raises:
ProtocolError: if the frame contains incorrect values.
"""
# The frame is written in a single call to write in order to prevent
# TCP fragmentation. See #68 for details. This also makes it safe to
# send frames concurrently from multiple coroutines.
write(self.new_frame.serialize(mask=mask, extensions=extensions))
# Backwards compatibility with previously documented public APIs
from ..frames import Close, prepare_ctrl as encode_data, prepare_data # noqa
def parse_close(data: bytes) -> Tuple[int, str]:
"""
Parse the payload from a close frame.
Returns:
Tuple[int, str]: close code and reason.
Raises:
ProtocolError: if data is ill-formed.
UnicodeDecodeError: if the reason isn't valid UTF-8.
"""
return dataclasses.astuple(Close.parse(data)) # type: ignore
def serialize_close(code: int, reason: str) -> bytes:
"""
Serialize the payload for a close frame.
"""
return Close(code, reason).serialize()
| 5,021 | Python | 27.697143 | 82 | 0.587931 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/server.py | from __future__ import annotations
import asyncio
import email.utils
import functools
import http
import inspect
import logging
import socket
import warnings
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Generator,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
from ..connection import State
from ..datastructures import Headers, HeadersLike, MultipleValuesError
from ..exceptions import (
AbortHandshake,
InvalidHandshake,
InvalidHeader,
InvalidMessage,
InvalidOrigin,
InvalidUpgrade,
NegotiationError,
)
from ..extensions import Extension, ServerExtensionFactory
from ..extensions.permessage_deflate import enable_server_permessage_deflate
from ..headers import (
build_extension,
parse_extension,
parse_subprotocol,
validate_subprotocols,
)
from ..http import USER_AGENT
from ..typing import ExtensionHeader, LoggerLike, Origin, Subprotocol
from .compatibility import loop_if_py_lt_38
from .handshake import build_response, check_request
from .http import read_request
from .protocol import WebSocketCommonProtocol
__all__ = ["serve", "unix_serve", "WebSocketServerProtocol", "WebSocketServer"]
HeadersLikeOrCallable = Union[HeadersLike, Callable[[str, Headers], HeadersLike]]
HTTPResponse = Tuple[http.HTTPStatus, HeadersLike, bytes]
class WebSocketServerProtocol(WebSocketCommonProtocol):
"""
WebSocket server connection.
:class:`WebSocketServerProtocol` provides :meth:`recv` and :meth:`send`
coroutines for receiving and sending messages.
It supports asynchronous iteration to receive messages::
async for message in websocket:
await process(message)
The iterator exits normally when the connection is closed with close code
1000 (OK) or 1001 (going away). It raises
a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection
is closed with any other code.
You may customize the opening handshake in a subclass by
overriding :meth:`process_request` or :meth:`select_subprotocol`.
Args:
ws_server: WebSocket server that created this connection.
See :func:`serve` for the documentation of ``ws_handler``, ``logger``, ``origins``,
``extensions``, ``subprotocols``, and ``extra_headers``.
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
"""
is_client = False
side = "server"
def __init__(
self,
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
ws_server: WebSocketServer,
*,
logger: Optional[LoggerLike] = None,
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
**kwargs: Any,
) -> None:
if logger is None:
logger = logging.getLogger("websockets.server")
super().__init__(logger=logger, **kwargs)
# For backwards compatibility with 6.0 or earlier.
if origins is not None and "" in origins:
warnings.warn("use None instead of '' in origins", DeprecationWarning)
origins = [None if origin == "" else origin for origin in origins]
# For backwards compatibility with 10.0 or earlier. Done here in
# addition to serve to trigger the deprecation warning on direct
# use of WebSocketServerProtocol.
self.ws_handler = remove_path_argument(ws_handler)
self.ws_server = ws_server
self.origins = origins
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.extra_headers = extra_headers
self._process_request = process_request
self._select_subprotocol = select_subprotocol
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Register connection and initialize a task to handle it.
"""
super().connection_made(transport)
# Register the connection with the server before creating the handler
# task. Registering at the beginning of the handler coroutine would
# create a race condition between the creation of the task, which
# schedules its execution, and the moment the handler starts running.
self.ws_server.register(self)
self.handler_task = self.loop.create_task(self.handler())
async def handler(self) -> None:
"""
Handle the lifecycle of a WebSocket connection.
Since this method doesn't have a caller able to handle exceptions, it
attemps to log relevant ones and guarantees that the TCP connection is
closed before exiting.
"""
try:
try:
await self.handshake(
origins=self.origins,
available_extensions=self.available_extensions,
available_subprotocols=self.available_subprotocols,
extra_headers=self.extra_headers,
)
# Remove this branch when dropping support for Python < 3.8
# because CancelledError no longer inherits Exception.
except asyncio.CancelledError: # pragma: no cover
raise
except ConnectionError:
raise
except Exception as exc:
if isinstance(exc, AbortHandshake):
status, headers, body = exc.status, exc.headers, exc.body
elif isinstance(exc, InvalidOrigin):
if self.debug:
self.logger.debug("! invalid origin", exc_info=True)
status, headers, body = (
http.HTTPStatus.FORBIDDEN,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
elif isinstance(exc, InvalidUpgrade):
if self.debug:
self.logger.debug("! invalid upgrade", exc_info=True)
status, headers, body = (
http.HTTPStatus.UPGRADE_REQUIRED,
Headers([("Upgrade", "websocket")]),
(
f"Failed to open a WebSocket connection: {exc}.\n"
f"\n"
f"You cannot access a WebSocket server directly "
f"with a browser. You need a WebSocket client.\n"
).encode(),
)
elif isinstance(exc, InvalidHandshake):
if self.debug:
self.logger.debug("! invalid handshake", exc_info=True)
status, headers, body = (
http.HTTPStatus.BAD_REQUEST,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
else:
self.logger.error("opening handshake failed", exc_info=True)
status, headers, body = (
http.HTTPStatus.INTERNAL_SERVER_ERROR,
Headers(),
(
b"Failed to open a WebSocket connection.\n"
b"See server log for more information.\n"
),
)
headers.setdefault("Date", email.utils.formatdate(usegmt=True))
headers.setdefault("Server", USER_AGENT)
headers.setdefault("Content-Length", str(len(body)))
headers.setdefault("Content-Type", "text/plain")
headers.setdefault("Connection", "close")
self.write_http_response(status, headers, body)
self.logger.info(
"connection failed (%d %s)", status.value, status.phrase
)
await self.close_transport()
return
try:
await self.ws_handler(self)
except Exception:
self.logger.error("connection handler failed", exc_info=True)
if not self.closed:
self.fail_connection(1011)
raise
try:
await self.close()
except ConnectionError:
raise
except Exception:
self.logger.error("closing handshake failed", exc_info=True)
raise
except Exception:
# Last-ditch attempt to avoid leaking connections on errors.
try:
self.transport.close()
except Exception: # pragma: no cover
pass
finally:
# Unregister the connection with the server when the handler task
# terminates. Registration is tied to the lifecycle of the handler
# task because the server waits for tasks attached to registered
# connections before terminating.
self.ws_server.unregister(self)
self.logger.info("connection closed")
async def read_http_request(self) -> Tuple[str, Headers]:
"""
Read request line and headers from the HTTP request.
If the request contains a body, it may be read from ``self.reader``
after this coroutine returns.
Raises:
InvalidMessage: if the HTTP message is malformed or isn't an
HTTP/1.1 GET request.
"""
try:
path, headers = await read_request(self.reader)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as exc:
raise InvalidMessage("did not receive a valid HTTP request") from exc
if self.debug:
self.logger.debug("< GET %s HTTP/1.1", path)
for key, value in headers.raw_items():
self.logger.debug("< %s: %s", key, value)
self.path = path
self.request_headers = headers
return path, headers
def write_http_response(
self, status: http.HTTPStatus, headers: Headers, body: Optional[bytes] = None
) -> None:
"""
Write status line and headers to the HTTP response.
This coroutine is also able to write a response body.
"""
self.response_headers = headers
if self.debug:
self.logger.debug("> HTTP/1.1 %d %s", status.value, status.phrase)
for key, value in headers.raw_items():
self.logger.debug("> %s: %s", key, value)
if body is not None:
self.logger.debug("> [body] (%d bytes)", len(body))
# Since the status line and headers only contain ASCII characters,
# we can keep this simple.
response = f"HTTP/1.1 {status.value} {status.phrase}\r\n"
response += str(headers)
self.transport.write(response.encode())
if body is not None:
self.transport.write(body)
async def process_request(
self, path: str, request_headers: Headers
) -> Optional[HTTPResponse]:
"""
Intercept the HTTP request and return an HTTP response if appropriate.
You may override this method in a :class:`WebSocketServerProtocol`
subclass, for example:
* to return a HTTP 200 OK response on a given path; then a load
balancer can use this path for a health check;
* to authenticate the request and return a HTTP 401 Unauthorized or a
HTTP 403 Forbidden when authentication fails.
You may also override this method with the ``process_request``
argument of :func:`serve` and :class:`WebSocketServerProtocol`. This
is equivalent, except ``process_request`` won't have access to the
protocol instance, so it can't store information for later use.
:meth:`process_request` is expected to complete quickly. If it may run
for a long time, then it should await :meth:`wait_closed` and exit if
:meth:`wait_closed` completes, or else it could prevent the server
from shutting down.
Args:
path: request path, including optional query string.
request_headers: request headers.
Returns:
Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]: :obj:`None`
to continue the WebSocket handshake normally.
An HTTP response, represented by a 3-uple of the response status,
headers, and body, to abort the WebSocket handshake and return
that HTTP response instead.
"""
if self._process_request is not None:
response = self._process_request(path, request_headers)
if isinstance(response, Awaitable):
return await response
else:
# For backwards compatibility with 7.0.
warnings.warn(
"declare process_request as a coroutine", DeprecationWarning
)
return response
return None
@staticmethod
def process_origin(
headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None
) -> Optional[Origin]:
"""
Handle the Origin HTTP request header.
Args:
headers: request headers.
origins: optional list of acceptable origins.
Raises:
InvalidOrigin: if the origin isn't acceptable.
"""
# "The user agent MUST NOT include more than one Origin header field"
# per https://www.rfc-editor.org/rfc/rfc6454.html#section-7.3.
try:
origin = cast(Optional[Origin], headers.get("Origin"))
except MultipleValuesError as exc:
raise InvalidHeader("Origin", "more than one Origin header found") from exc
if origins is not None:
if origin not in origins:
raise InvalidOrigin(origin)
return origin
@staticmethod
def process_extensions(
headers: Headers,
available_extensions: Optional[Sequence[ServerExtensionFactory]],
) -> Tuple[Optional[str], List[Extension]]:
"""
Handle the Sec-WebSocket-Extensions HTTP request header.
Accept or reject each extension proposed in the client request.
Negotiate parameters for accepted extensions.
Return the Sec-WebSocket-Extensions HTTP response header and the list
of accepted extensions.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension proposed by
the client, we check for a match with each extension available in the
server configuration. If no match is found, the extension is ignored.
If several variants of the same extension are proposed by the client,
it may be accepted several times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
This process doesn't allow the server to reorder extensions. It can
only select a subset of the extensions proposed by the client.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
Args:
headers: request headers.
extensions: optional list of supported extensions.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
response_header_value: Optional[str] = None
extension_headers: List[ExtensionHeader] = []
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values and available_extensions:
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, request_params in parsed_header_values:
for ext_factory in available_extensions:
# Skip non-matching extensions based on their name.
if ext_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
response_params, extension = ext_factory.process_request_params(
request_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
extension_headers.append((name, response_params))
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the client sent. The extension is declined.
# Serialize extension header.
if extension_headers:
response_header_value = build_extension(extension_headers)
return response_header_value, accepted_extensions
# Not @staticmethod because it calls self.select_subprotocol()
def process_subprotocol(
self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP request header.
Return Sec-WebSocket-Protocol HTTP response header, which is the same
as the selected subprotocol.
Args:
headers: request headers.
available_subprotocols: optional list of supported subprotocols.
Raises:
InvalidHandshake: to abort the handshake with an HTTP 400 error.
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values and available_subprotocols:
parsed_header_values: List[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
subprotocol = self.select_subprotocol(
parsed_header_values, available_subprotocols
)
return subprotocol
def select_subprotocol(
self,
client_subprotocols: Sequence[Subprotocol],
server_subprotocols: Sequence[Subprotocol],
) -> Optional[Subprotocol]:
"""
Pick a subprotocol among those offered by the client.
If several subprotocols are supported by the client and the server,
the default implementation selects the preferred subprotocol by
giving equal value to the priorities of the client and the server.
If no subprotocol is supported by the client and the server, it
proceeds without a subprotocol.
This is unlikely to be the most useful implementation in practice.
Many servers providing a subprotocol will require that the client
uses that subprotocol. Such rules can be implemented in a subclass.
You may also override this method with the ``select_subprotocol``
argument of :func:`serve` and :class:`WebSocketServerProtocol`.
Args:
client_subprotocols: list of subprotocols offered by the client.
server_subprotocols: list of subprotocols available on the server.
Returns:
Optional[Subprotocol]: Selected subprotocol.
:obj:`None` to continue without a subprotocol.
"""
if self._select_subprotocol is not None:
return self._select_subprotocol(client_subprotocols, server_subprotocols)
subprotocols = set(client_subprotocols) & set(server_subprotocols)
if not subprotocols:
return None
priority = lambda p: (
client_subprotocols.index(p) + server_subprotocols.index(p)
)
return sorted(subprotocols, key=priority)[0]
async def handshake(
self,
origins: Optional[Sequence[Optional[Origin]]] = None,
available_extensions: Optional[Sequence[ServerExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
) -> str:
"""
Perform the server side of the opening handshake.
Args:
origins: list of acceptable values of the Origin HTTP header;
include :obj:`None` if the lack of an origin is acceptable.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of
decreasing preference.
extra_headers: arbitrary HTTP headers to add to the response when
the handshake succeeds.
Returns:
str: path of the URI of the request.
Raises:
InvalidHandshake: if the handshake fails.
"""
path, request_headers = await self.read_http_request()
# Hook for customizing request handling, for example checking
# authentication or treating some paths as plain HTTP endpoints.
early_response_awaitable = self.process_request(path, request_headers)
if isinstance(early_response_awaitable, Awaitable):
early_response = await early_response_awaitable
else:
# For backwards compatibility with 7.0.
warnings.warn("declare process_request as a coroutine", DeprecationWarning)
early_response = early_response_awaitable
# The connection may drop while process_request is running.
if self.state is State.CLOSED:
raise self.connection_closed_exc() # pragma: no cover
# Change the response to a 503 error if the server is shutting down.
if not self.ws_server.is_serving():
early_response = (
http.HTTPStatus.SERVICE_UNAVAILABLE,
[],
b"Server is shutting down.\n",
)
if early_response is not None:
raise AbortHandshake(*early_response)
key = check_request(request_headers)
self.origin = self.process_origin(request_headers, origins)
extensions_header, self.extensions = self.process_extensions(
request_headers, available_extensions
)
protocol_header = self.subprotocol = self.process_subprotocol(
request_headers, available_subprotocols
)
response_headers = Headers()
build_response(response_headers, key)
if extensions_header is not None:
response_headers["Sec-WebSocket-Extensions"] = extensions_header
if protocol_header is not None:
response_headers["Sec-WebSocket-Protocol"] = protocol_header
if callable(extra_headers):
extra_headers = extra_headers(path, self.request_headers)
if extra_headers is not None:
response_headers.update(extra_headers)
response_headers.setdefault("Date", email.utils.formatdate(usegmt=True))
response_headers.setdefault("Server", USER_AGENT)
self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers)
self.logger.info("connection open")
self.connection_open()
return path
class WebSocketServer:
"""
WebSocket server returned by :func:`serve`.
This class provides the same interface as :class:`~asyncio.Server`,
notably the :meth:`~asyncio.Server.close`
and :meth:`~asyncio.Server.wait_closed` methods.
It keeps track of WebSocket connections in order to close them properly
when shutting down.
Args:
logger: logger for this server;
defaults to ``logging.getLogger("websockets.server")``;
see the :doc:`logging guide <../topics/logging>` for details.
"""
def __init__(self, logger: Optional[LoggerLike] = None):
if logger is None:
logger = logging.getLogger("websockets.server")
self.logger = logger
# Keep track of active connections.
self.websockets: Set[WebSocketServerProtocol] = set()
# Task responsible for closing the server and terminating connections.
self.close_task: Optional[asyncio.Task[None]] = None
# Completed when the server is closed and connections are terminated.
self.closed_waiter: asyncio.Future[None]
def wrap(self, server: asyncio.base_events.Server) -> None:
"""
Attach to a given :class:`~asyncio.Server`.
Since :meth:`~asyncio.loop.create_server` doesn't support injecting a
custom ``Server`` class, the easiest solution that doesn't rely on
private :mod:`asyncio` APIs is to:
- instantiate a :class:`WebSocketServer`
- give the protocol factory a reference to that instance
- call :meth:`~asyncio.loop.create_server` with the factory
- attach the resulting :class:`~asyncio.Server` with this method
"""
self.server = server
for sock in server.sockets:
if sock.family == socket.AF_INET:
name = "%s:%d" % sock.getsockname()
elif sock.family == socket.AF_INET6:
name = "[%s]:%d" % sock.getsockname()[:2]
elif sock.family == socket.AF_UNIX:
name = sock.getsockname()
# In the unlikely event that someone runs websockets over a
# protocol other than IP or Unix sockets, avoid crashing.
else: # pragma: no cover
name = str(sock.getsockname())
self.logger.info("server listening on %s", name)
# Initialized here because we need a reference to the event loop.
# This should be moved back to __init__ in Python 3.10.
self.closed_waiter = server.get_loop().create_future()
def register(self, protocol: WebSocketServerProtocol) -> None:
"""
Register a connection with this server.
"""
self.websockets.add(protocol)
def unregister(self, protocol: WebSocketServerProtocol) -> None:
"""
Unregister a connection with this server.
"""
self.websockets.remove(protocol)
def close(self) -> None:
"""
Close the server.
This method:
* closes the underlying :class:`~asyncio.Server`;
* rejects new WebSocket connections with an HTTP 503 (service
unavailable) error; this happens when the server accepted the TCP
connection but didn't complete the WebSocket opening handshake prior
to closing;
* closes open WebSocket connections with close code 1001 (going away).
:meth:`close` is idempotent.
"""
if self.close_task is None:
self.close_task = self.get_loop().create_task(self._close())
async def _close(self) -> None:
"""
Implementation of :meth:`close`.
This calls :meth:`~asyncio.Server.close` on the underlying
:class:`~asyncio.Server` object to stop accepting new connections and
then closes open connections with close code 1001.
"""
self.logger.info("server closing")
# Stop accepting new connections.
self.server.close()
# Wait until self.server.close() completes.
await self.server.wait_closed()
# Wait until all accepted connections reach connection_made() and call
# register(). See https://bugs.python.org/issue34852 for details.
await asyncio.sleep(0, **loop_if_py_lt_38(self.get_loop()))
# Close OPEN connections with status code 1001. Since the server was
# closed, handshake() closes OPENING connections with a HTTP 503
# error. Wait until all connections are closed.
close_tasks = [
asyncio.create_task(websocket.close(1001))
for websocket in self.websockets
if websocket.state is not State.CONNECTING
]
# asyncio.wait doesn't accept an empty first argument.
if close_tasks:
await asyncio.wait(
close_tasks,
**loop_if_py_lt_38(self.get_loop()),
)
# Wait until all connection handlers are complete.
# asyncio.wait doesn't accept an empty first argument.
if self.websockets:
await asyncio.wait(
[websocket.handler_task for websocket in self.websockets],
**loop_if_py_lt_38(self.get_loop()),
)
# Tell wait_closed() to return.
self.closed_waiter.set_result(None)
self.logger.info("server closed")
async def wait_closed(self) -> None:
"""
Wait until the server is closed.
When :meth:`wait_closed` returns, all TCP connections are closed and
all connection handlers have returned.
To ensure a fast shutdown, a connection handler should always be
awaiting at least one of:
* :meth:`~WebSocketServerProtocol.recv`: when the connection is closed,
it raises :exc:`~websockets.exceptions.ConnectionClosedOK`;
* :meth:`~WebSocketServerProtocol.wait_closed`: when the connection is
closed, it returns.
Then the connection handler is immediately notified of the shutdown;
it can clean up and exit.
"""
await asyncio.shield(self.closed_waiter)
def get_loop(self) -> asyncio.AbstractEventLoop:
"""
See :meth:`asyncio.Server.get_loop`.
"""
return self.server.get_loop()
def is_serving(self) -> bool:
"""
See :meth:`asyncio.Server.is_serving`.
"""
return self.server.is_serving()
async def start_serving(self) -> None:
"""
See :meth:`asyncio.Server.start_serving`.
"""
await self.server.start_serving() # pragma: no cover
async def serve_forever(self) -> None:
"""
See :meth:`asyncio.Server.serve_forever`.
"""
await self.server.serve_forever() # pragma: no cover
@property
def sockets(self) -> Iterable[socket.socket]:
"""
See :attr:`asyncio.Server.sockets`.
"""
return self.server.sockets
async def __aenter__(self) -> WebSocketServer:
return self # pragma: no cover
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close() # pragma: no cover
await self.wait_closed() # pragma: no cover
class Serve:
"""
Start a WebSocket server listening on ``host`` and ``port``.
Whenever a client connects, the server creates a
:class:`WebSocketServerProtocol`, performs the opening handshake, and
delegates to the connection handler, ``ws_handler``.
The handler receives the :class:`WebSocketServerProtocol` and uses it to
send and receive messages.
Once the handler completes, either normally or with an exception, the
server performs the closing handshake and closes the connection.
Awaiting :func:`serve` yields a :class:`WebSocketServer`. This object
provides :meth:`~WebSocketServer.close` and
:meth:`~WebSocketServer.wait_closed` methods for shutting down the server.
:func:`serve` can be used as an asynchronous context manager::
stop = asyncio.Future() # set this future to exit the server
async with serve(...):
await stop
The server is shut down automatically when exiting the context.
Args:
ws_handler: connection handler. It receives the WebSocket connection,
which is a :class:`WebSocketServerProtocol`, in argument.
host: network interfaces the server is bound to;
see :meth:`~asyncio.loop.create_server` for details.
port: TCP port the server listens on;
see :meth:`~asyncio.loop.create_server` for details.
create_protocol: factory for the :class:`asyncio.Protocol` managing
the connection; defaults to :class:`WebSocketServerProtocol`; may
be set to a wrapper or a subclass to customize connection handling.
logger: logger for this server;
defaults to ``logging.getLogger("websockets.server")``;
see the :doc:`logging guide <../topics/logging>` for details.
compression: shortcut that enables the "permessage-deflate" extension
by default; may be set to :obj:`None` to disable compression;
see the :doc:`compression guide <../topics/compression>` for details.
origins: acceptable values of the ``Origin`` header; include
:obj:`None` in the list if the lack of an origin is acceptable.
This is useful for defending against Cross-Site WebSocket
Hijacking attacks.
extensions: list of supported extensions, in order in which they
should be tried.
subprotocols: list of supported subprotocols, in order of decreasing
preference.
extra_headers (Union[HeadersLike, Callable[[str, Headers], HeadersLike]]):
arbitrary HTTP headers to add to the request; this can be
a :data:`~websockets.datastructures.HeadersLike` or a callable
taking the request path and headers in arguments and returning
a :data:`~websockets.datastructures.HeadersLike`.
process_request (Optional[Callable[[str, Headers], \
Awaitable[Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]]]]):
intercept HTTP request before the opening handshake;
see :meth:`~WebSocketServerProtocol.process_request` for details.
select_subprotocol: select a subprotocol supported by the client;
see :meth:`~WebSocketServerProtocol.select_subprotocol` for details.
See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the
documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``.
Any other keyword arguments are passed the event loop's
:meth:`~asyncio.loop.create_server` method.
For example:
* You can set ``ssl`` to a :class:`~ssl.SSLContext` to enable TLS.
* You can set ``sock`` to a :obj:`~socket.socket` that you created
outside of websockets.
Returns:
WebSocketServer: WebSocket server.
"""
def __init__(
self,
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
host: Optional[Union[str, Sequence[str]]] = None,
port: Optional[int] = None,
*,
create_protocol: Optional[Callable[[Any], WebSocketServerProtocol]] = None,
logger: Optional[LoggerLike] = None,
compression: Optional[str] = "deflate",
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2**20,
max_queue: Optional[int] = 2**5,
read_limit: int = 2**16,
write_limit: int = 2**16,
**kwargs: Any,
) -> None:
# Backwards compatibility: close_timeout used to be called timeout.
timeout: Optional[float] = kwargs.pop("timeout", None)
if timeout is None:
timeout = 10
else:
warnings.warn("rename timeout to close_timeout", DeprecationWarning)
# If both are specified, timeout is ignored.
if close_timeout is None:
close_timeout = timeout
# Backwards compatibility: create_protocol used to be called klass.
klass: Optional[Type[WebSocketServerProtocol]] = kwargs.pop("klass", None)
if klass is None:
klass = WebSocketServerProtocol
else:
warnings.warn("rename klass to create_protocol", DeprecationWarning)
# If both are specified, klass is ignored.
if create_protocol is None:
create_protocol = klass
# Backwards compatibility: recv() used to return None on closed connections
legacy_recv: bool = kwargs.pop("legacy_recv", False)
# Backwards compatibility: the loop parameter used to be supported.
_loop: Optional[asyncio.AbstractEventLoop] = kwargs.pop("loop", None)
if _loop is None:
loop = asyncio.get_event_loop()
else:
loop = _loop
warnings.warn("remove loop argument", DeprecationWarning)
ws_server = WebSocketServer(logger=logger)
secure = kwargs.get("ssl") is not None
if compression == "deflate":
extensions = enable_server_permessage_deflate(extensions)
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
if subprotocols is not None:
validate_subprotocols(subprotocols)
factory = functools.partial(
create_protocol,
# For backwards compatibility with 10.0 or earlier. Done here in
# addition to WebSocketServerProtocol to trigger the deprecation
# warning once per serve() call rather than once per connection.
remove_path_argument(ws_handler),
ws_server,
host=host,
port=port,
secure=secure,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
max_size=max_size,
max_queue=max_queue,
read_limit=read_limit,
write_limit=write_limit,
loop=_loop,
legacy_recv=legacy_recv,
origins=origins,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
process_request=process_request,
select_subprotocol=select_subprotocol,
logger=logger,
)
if kwargs.pop("unix", False):
path: Optional[str] = kwargs.pop("path", None)
# unix_serve(path) must not specify host and port parameters.
assert host is None and port is None
create_server = functools.partial(
loop.create_unix_server, factory, path, **kwargs
)
else:
create_server = functools.partial(
loop.create_server, factory, host, port, **kwargs
)
# This is a coroutine function.
self._create_server = create_server
self.ws_server = ws_server
# async with serve(...)
async def __aenter__(self) -> WebSocketServer:
return await self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.ws_server.close()
await self.ws_server.wait_closed()
# await serve(...)
def __await__(self) -> Generator[Any, None, WebSocketServer]:
# Create a suitable iterator by calling __await__ on a coroutine.
return self.__await_impl__().__await__()
async def __await_impl__(self) -> WebSocketServer:
server = await self._create_server()
self.ws_server.wrap(server)
return self.ws_server
# yield from serve(...) - remove when dropping Python < 3.10
__iter__ = __await__
serve = Serve
def unix_serve(
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated
],
path: Optional[str] = None,
**kwargs: Any,
) -> Serve:
"""
Similar to :func:`serve`, but for listening on Unix sockets.
This function builds upon the event
loop's :meth:`~asyncio.loop.create_unix_server` method.
It is only available on Unix.
It's useful for deploying a server behind a reverse proxy such as nginx.
Args:
path: file system path to the Unix socket.
"""
return serve(ws_handler, path=path, unix=True, **kwargs)
def remove_path_argument(
ws_handler: Union[
Callable[[WebSocketServerProtocol], Awaitable[Any]],
Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
]
) -> Callable[[WebSocketServerProtocol], Awaitable[Any]]:
try:
inspect.signature(ws_handler).bind(None)
except TypeError:
try:
inspect.signature(ws_handler).bind(None, "")
except TypeError: # pragma: no cover
# ws_handler accepts neither one nor two arguments; leave it alone.
pass
else:
# ws_handler accepts two arguments; activate backwards compatibility.
# Enable deprecation warning and announce deprecation in 11.0.
# warnings.warn("remove second argument of ws_handler", DeprecationWarning)
async def _ws_handler(websocket: WebSocketServerProtocol) -> Any:
return await cast(
Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
ws_handler,
)(websocket, websocket.path)
return _ws_handler
return cast(
Callable[[WebSocketServerProtocol], Awaitable[Any]],
ws_handler,
)
| 43,078 | Python | 36.233362 | 88 | 0.614142 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/protocol.py | from __future__ import annotations
import asyncio
import codecs
import collections
import logging
import random
import ssl
import struct
import uuid
import warnings
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Deque,
Dict,
Iterable,
List,
Mapping,
Optional,
Union,
cast,
)
from ..connection import State
from ..datastructures import Headers
from ..exceptions import (
ConnectionClosed,
ConnectionClosedError,
ConnectionClosedOK,
InvalidState,
PayloadTooBig,
ProtocolError,
)
from ..extensions import Extension
from ..frames import (
OK_CLOSE_CODES,
OP_BINARY,
OP_CLOSE,
OP_CONT,
OP_PING,
OP_PONG,
OP_TEXT,
Close,
Opcode,
prepare_ctrl,
prepare_data,
)
from ..typing import Data, LoggerLike, Subprotocol
from .compatibility import loop_if_py_lt_38
from .framing import Frame
__all__ = ["WebSocketCommonProtocol", "broadcast"]
# In order to ensure consistency, the code always checks the current value of
# WebSocketCommonProtocol.state before assigning a new value and never yields
# between the check and the assignment.
class WebSocketCommonProtocol(asyncio.Protocol):
"""
WebSocket connection.
:class:`WebSocketCommonProtocol` provides APIs shared between WebSocket
servers and clients. You shouldn't use it directly. Instead, use
:class:`~websockets.client.WebSocketClientProtocol` or
:class:`~websockets.server.WebSocketServerProtocol`.
This documentation focuses on low-level details that aren't covered in the
documentation of :class:`~websockets.client.WebSocketClientProtocol` and
:class:`~websockets.server.WebSocketServerProtocol` for the sake of
simplicity.
Once the connection is open, a Ping_ frame is sent every ``ping_interval``
seconds. This serves as a keepalive. It helps keeping the connection
open, especially in the presence of proxies with short timeouts on
inactive connections. Set ``ping_interval`` to :obj:`None` to disable
this behavior.
.. _Ping: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.5.2
If the corresponding Pong_ frame isn't received within ``ping_timeout``
seconds, the connection is considered unusable and is closed with code
1011. This ensures that the remote endpoint remains responsive. Set
``ping_timeout`` to :obj:`None` to disable this behavior.
.. _Pong: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.5.3
The ``close_timeout`` parameter defines a maximum wait time for completing
the closing handshake and terminating the TCP connection. For legacy
reasons, :meth:`close` completes in at most ``5 * close_timeout`` seconds
for clients and ``4 * close_timeout`` for servers.
See the discussion of :doc:`timeouts <../topics/timeouts>` for details.
``close_timeout`` needs to be a parameter of the protocol because
websockets usually calls :meth:`close` implicitly upon exit:
* on the client side, when :func:`~websockets.client.connect` is used as a
context manager;
* on the server side, when the connection handler terminates;
To apply a timeout to any other API, wrap it in :func:`~asyncio.wait_for`.
The ``max_size`` parameter enforces the maximum size for incoming messages
in bytes. The default value is 1 MiB. If a larger message is received,
:meth:`recv` will raise :exc:`~websockets.exceptions.ConnectionClosedError`
and the connection will be closed with code 1009.
The ``max_queue`` parameter sets the maximum length of the queue that
holds incoming messages. The default value is ``32``. Messages are added
to an in-memory queue when they're received; then :meth:`recv` pops from
that queue. In order to prevent excessive memory consumption when
messages are received faster than they can be processed, the queue must
be bounded. If the queue fills up, the protocol stops processing incoming
data until :meth:`recv` is called. In this situation, various receive
buffers (at least in :mod:`asyncio` and in the OS) will fill up, then the
TCP receive window will shrink, slowing down transmission to avoid packet
loss.
Since Python can use up to 4 bytes of memory to represent a single
character, each connection may use up to ``4 * max_size * max_queue``
bytes of memory to store incoming messages. By default, this is 128 MiB.
You may want to lower the limits, depending on your application's
requirements.
The ``read_limit`` argument sets the high-water limit of the buffer for
incoming bytes. The low-water limit is half the high-water limit. The
default value is 64 KiB, half of asyncio's default (based on the current
implementation of :class:`~asyncio.StreamReader`).
The ``write_limit`` argument sets the high-water limit of the buffer for
outgoing bytes. The low-water limit is a quarter of the high-water limit.
The default value is 64 KiB, equal to asyncio's default (based on the
current implementation of ``FlowControlMixin``).
See the discussion of :doc:`memory usage <../topics/memory>` for details.
Args:
logger: logger for this connection;
defaults to ``logging.getLogger("websockets.protocol")``;
see the :doc:`logging guide <../topics/logging>` for details.
ping_interval: delay between keepalive pings in seconds;
:obj:`None` to disable keepalive pings.
ping_timeout: timeout for keepalive pings in seconds;
:obj:`None` to disable timeouts.
close_timeout: timeout for closing the connection in seconds;
for legacy reasons, the actual timeout is 4 or 5 times larger.
max_size: maximum size of incoming messages in bytes;
:obj:`None` to disable the limit.
max_queue: maximum number of incoming messages in receive buffer;
:obj:`None` to disable the limit.
read_limit: high-water mark of read buffer in bytes.
write_limit: high-water mark of write buffer in bytes.
"""
# There are only two differences between the client-side and server-side
# behavior: masking the payload and closing the underlying TCP connection.
# Set is_client = True/False and side = "client"/"server" to pick a side.
is_client: bool
side: str = "undefined"
def __init__(
self,
*,
logger: Optional[LoggerLike] = None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2**20,
max_queue: Optional[int] = 2**5,
read_limit: int = 2**16,
write_limit: int = 2**16,
# The following arguments are kept only for backwards compatibility.
host: Optional[str] = None,
port: Optional[int] = None,
secure: Optional[bool] = None,
legacy_recv: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
timeout: Optional[float] = None,
) -> None:
if legacy_recv: # pragma: no cover
warnings.warn("legacy_recv is deprecated", DeprecationWarning)
# Backwards compatibility: close_timeout used to be called timeout.
if timeout is None:
timeout = 10
else:
warnings.warn("rename timeout to close_timeout", DeprecationWarning)
# If both are specified, timeout is ignored.
if close_timeout is None:
close_timeout = timeout
# Backwards compatibility: the loop parameter used to be supported.
if loop is None:
loop = asyncio.get_event_loop()
else:
warnings.warn("remove loop argument", DeprecationWarning)
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.close_timeout = close_timeout
self.max_size = max_size
self.max_queue = max_queue
self.read_limit = read_limit
self.write_limit = write_limit
# Unique identifier. For logs.
self.id: uuid.UUID = uuid.uuid4()
"""Unique identifier of the connection. Useful in logs."""
# Logger or LoggerAdapter for this connection.
if logger is None:
logger = logging.getLogger("websockets.protocol")
# https://github.com/python/typeshed/issues/5561
logger = cast(logging.Logger, logger)
self.logger: LoggerLike = logging.LoggerAdapter(logger, {"websocket": self})
"""Logger for this connection."""
# Track if DEBUG is enabled. Shortcut logging calls if it isn't.
self.debug = logger.isEnabledFor(logging.DEBUG)
self.loop = loop
self._host = host
self._port = port
self._secure = secure
self.legacy_recv = legacy_recv
# Configure read buffer limits. The high-water limit is defined by
# ``self.read_limit``. The ``limit`` argument controls the line length
# limit and half the buffer limit of :class:`~asyncio.StreamReader`.
# That's why it must be set to half of ``self.read_limit``.
self.reader = asyncio.StreamReader(limit=read_limit // 2, loop=loop)
# Copied from asyncio.FlowControlMixin
self._paused = False
self._drain_waiter: Optional[asyncio.Future[None]] = None
self._drain_lock = asyncio.Lock(**loop_if_py_lt_38(loop))
# This class implements the data transfer and closing handshake, which
# are shared between the client-side and the server-side.
# Subclasses implement the opening handshake and, on success, execute
# :meth:`connection_open` to change the state to OPEN.
self.state = State.CONNECTING
if self.debug:
self.logger.debug("= connection is CONNECTING")
# HTTP protocol parameters.
self.path: str
"""Path of the opening handshake request."""
self.request_headers: Headers
"""Opening handshake request headers."""
self.response_headers: Headers
"""Opening handshake response headers."""
# WebSocket protocol parameters.
self.extensions: List[Extension] = []
self.subprotocol: Optional[Subprotocol] = None
"""Subprotocol, if one was negotiated."""
# Close code and reason, set when a close frame is sent or received.
self.close_rcvd: Optional[Close] = None
self.close_sent: Optional[Close] = None
self.close_rcvd_then_sent: Optional[bool] = None
# Completed when the connection state becomes CLOSED. Translates the
# :meth:`connection_lost` callback to a :class:`~asyncio.Future`
# that can be awaited. (Other :class:`~asyncio.Protocol` callbacks are
# translated by ``self.stream_reader``).
self.connection_lost_waiter: asyncio.Future[None] = loop.create_future()
# Queue of received messages.
self.messages: Deque[Data] = collections.deque()
self._pop_message_waiter: Optional[asyncio.Future[None]] = None
self._put_message_waiter: Optional[asyncio.Future[None]] = None
# Protect sending fragmented messages.
self._fragmented_message_waiter: Optional[asyncio.Future[None]] = None
# Mapping of ping IDs to pong waiters, in chronological order.
self.pings: Dict[bytes, asyncio.Future[None]] = {}
# Task running the data transfer.
self.transfer_data_task: asyncio.Task[None]
# Exception that occurred during data transfer, if any.
self.transfer_data_exc: Optional[BaseException] = None
# Task sending keepalive pings.
self.keepalive_ping_task: asyncio.Task[None]
# Task closing the TCP connection.
self.close_connection_task: asyncio.Task[None]
# Copied from asyncio.FlowControlMixin
async def _drain_helper(self) -> None: # pragma: no cover
if self.connection_lost_waiter.done():
raise ConnectionResetError("Connection lost")
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = self.loop.create_future()
self._drain_waiter = waiter
await waiter
# Copied from asyncio.StreamWriter
async def _drain(self) -> None: # pragma: no cover
if self.reader is not None:
exc = self.reader.exception()
if exc is not None:
raise exc
if self.transport is not None:
if self.transport.is_closing():
# Yield to the event loop so connection_lost() may be
# called. Without this, _drain_helper() would return
# immediately, and code that calls
# write(...); yield from drain()
# in a loop would never call connection_lost(), so it
# would not see an error when the socket is closed.
await asyncio.sleep(0, **loop_if_py_lt_38(self.loop))
await self._drain_helper()
def connection_open(self) -> None:
"""
Callback when the WebSocket opening handshake completes.
Enter the OPEN state and start the data transfer phase.
"""
# 4.1. The WebSocket Connection is Established.
assert self.state is State.CONNECTING
self.state = State.OPEN
if self.debug:
self.logger.debug("= connection is OPEN")
# Start the task that receives incoming WebSocket messages.
self.transfer_data_task = self.loop.create_task(self.transfer_data())
# Start the task that sends pings at regular intervals.
self.keepalive_ping_task = self.loop.create_task(self.keepalive_ping())
# Start the task that eventually closes the TCP connection.
self.close_connection_task = self.loop.create_task(self.close_connection())
@property
def host(self) -> Optional[str]:
alternative = "remote_address" if self.is_client else "local_address"
warnings.warn(f"use {alternative}[0] instead of host", DeprecationWarning)
return self._host
@property
def port(self) -> Optional[int]:
alternative = "remote_address" if self.is_client else "local_address"
warnings.warn(f"use {alternative}[1] instead of port", DeprecationWarning)
return self._port
@property
def secure(self) -> Optional[bool]:
warnings.warn("don't use secure", DeprecationWarning)
return self._secure
# Public API
@property
def local_address(self) -> Any:
"""
Local address of the connection.
For IPv4 connections, this is a ``(host, port)`` tuple.
The format of the address depends on the address family;
see :meth:`~socket.socket.getsockname`.
:obj:`None` if the TCP connection isn't established yet.
"""
try:
transport = self.transport
except AttributeError:
return None
else:
return transport.get_extra_info("sockname")
@property
def remote_address(self) -> Any:
"""
Remote address of the connection.
For IPv4 connections, this is a ``(host, port)`` tuple.
The format of the address depends on the address family;
see :meth:`~socket.socket.getpeername`.
:obj:`None` if the TCP connection isn't established yet.
"""
try:
transport = self.transport
except AttributeError:
return None
else:
return transport.get_extra_info("peername")
@property
def open(self) -> bool:
"""
:obj:`True` when the connection is open; :obj:`False` otherwise.
This attribute may be used to detect disconnections. However, this
approach is discouraged per the EAFP_ principle. Instead, you should
handle :exc:`~websockets.exceptions.ConnectionClosed` exceptions.
.. _EAFP: https://docs.python.org/3/glossary.html#term-eafp
"""
return self.state is State.OPEN and not self.transfer_data_task.done()
@property
def closed(self) -> bool:
"""
:obj:`True` when the connection is closed; :obj:`False` otherwise.
Be aware that both :attr:`open` and :attr:`closed` are :obj:`False`
during the opening and closing sequences.
"""
return self.state is State.CLOSED
@property
def close_code(self) -> Optional[int]:
"""
WebSocket close code, defined in `section 7.1.5 of RFC 6455`_.
.. _section 7.1.5 of RFC 6455:
https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.5
:obj:`None` if the connection isn't closed yet.
"""
if self.state is not State.CLOSED:
return None
elif self.close_rcvd is None:
return 1006
else:
return self.close_rcvd.code
@property
def close_reason(self) -> Optional[str]:
"""
WebSocket close reason, defined in `section 7.1.6 of RFC 6455`_.
.. _section 7.1.6 of RFC 6455:
https://www.rfc-editor.org/rfc/rfc6455.html#section-7.1.6
:obj:`None` if the connection isn't closed yet.
"""
if self.state is not State.CLOSED:
return None
elif self.close_rcvd is None:
return ""
else:
return self.close_rcvd.reason
async def __aiter__(self) -> AsyncIterator[Data]:
"""
Iterate on incoming messages.
The iterator exits normally when the connection is closed with the
close code 1000 (OK) or 1001(going away). It raises
a :exc:`~websockets.exceptions.ConnectionClosedError` exception when
the connection is closed with any other code.
"""
try:
while True:
yield await self.recv()
except ConnectionClosedOK:
return
async def recv(self) -> Data:
"""
Receive the next message.
When the connection is closed, :meth:`recv` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it
raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal
connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure. This is how you detect the end of the
message stream.
Canceling :meth:`recv` is safe. There's no risk of losing the next
message. The next invocation of :meth:`recv` will return it.
This makes it possible to enforce a timeout by wrapping :meth:`recv`
in :func:`~asyncio.wait_for`.
Returns:
Data: A string (:class:`str`) for a Text_ frame. A bytestring
(:class:`bytes`) for a Binary_ frame.
.. _Text: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
.. _Binary: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
Raises:
ConnectionClosed: when the connection is closed.
RuntimeError: if two coroutines call :meth:`recv` concurrently.
"""
if self._pop_message_waiter is not None:
raise RuntimeError(
"cannot call recv while another coroutine "
"is already waiting for the next message"
)
# Don't await self.ensure_open() here:
# - messages could be available in the queue even if the connection
# is closed;
# - messages could be received before the closing frame even if the
# connection is closing.
# Wait until there's a message in the queue (if necessary) or the
# connection is closed.
while len(self.messages) <= 0:
pop_message_waiter: asyncio.Future[None] = self.loop.create_future()
self._pop_message_waiter = pop_message_waiter
try:
# If asyncio.wait() is canceled, it doesn't cancel
# pop_message_waiter and self.transfer_data_task.
await asyncio.wait(
[pop_message_waiter, self.transfer_data_task],
return_when=asyncio.FIRST_COMPLETED,
**loop_if_py_lt_38(self.loop),
)
finally:
self._pop_message_waiter = None
# If asyncio.wait(...) exited because self.transfer_data_task
# completed before receiving a new message, raise a suitable
# exception (or return None if legacy_recv is enabled).
if not pop_message_waiter.done():
if self.legacy_recv:
return None # type: ignore
else:
# Wait until the connection is closed to raise
# ConnectionClosed with the correct code and reason.
await self.ensure_open()
# Pop a message from the queue.
message = self.messages.popleft()
# Notify transfer_data().
if self._put_message_waiter is not None:
self._put_message_waiter.set_result(None)
self._put_message_waiter = None
return message
async def send(
self,
message: Union[Data, Iterable[Data], AsyncIterable[Data]],
) -> None:
"""
Send a message.
A string (:class:`str`) is sent as a Text_ frame. A bytestring or
bytes-like object (:class:`bytes`, :class:`bytearray`, or
:class:`memoryview`) is sent as a Binary_ frame.
.. _Text: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
.. _Binary: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
:meth:`send` also accepts an iterable or an asynchronous iterable of
strings, bytestrings, or bytes-like objects to enable fragmentation_.
Each item is treated as a message fragment and sent in its own frame.
All items must be of the same type, or else :meth:`send` will raise a
:exc:`TypeError` and the connection will be closed.
.. _fragmentation: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.4
:meth:`send` rejects dict-like objects because this is often an error.
(If you want to send the keys of a dict-like object as fragments, call
its :meth:`~dict.keys` method and pass the result to :meth:`send`.)
Canceling :meth:`send` is discouraged. Instead, you should close the
connection with :meth:`close`. Indeed, there are only two situations
where :meth:`send` may yield control to the event loop and then get
canceled; in both cases, :meth:`close` has the same effect and is
more clear:
1. The write buffer is full. If you don't want to wait until enough
data is sent, your only alternative is to close the connection.
:meth:`close` will likely time out then abort the TCP connection.
2. ``message`` is an asynchronous iterator that yields control.
Stopping in the middle of a fragmented message will cause a
protocol error and the connection will be closed.
When the connection is closed, :meth:`send` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it
raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal
connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
Args:
message (Union[Data, Iterable[Data], AsyncIterable[Data]): message
to send.
Raises:
ConnectionClosed: when the connection is closed.
TypeError: if ``message`` doesn't have a supported type.
"""
await self.ensure_open()
# While sending a fragmented message, prevent sending other messages
# until all fragments are sent.
while self._fragmented_message_waiter is not None:
await asyncio.shield(self._fragmented_message_waiter)
# Unfragmented message -- this case must be handled first because
# strings and bytes-like objects are iterable.
if isinstance(message, (str, bytes, bytearray, memoryview)):
opcode, data = prepare_data(message)
await self.write_frame(True, opcode, data)
# Catch a common mistake -- passing a dict to send().
elif isinstance(message, Mapping):
raise TypeError("data is a dict-like object")
# Fragmented message -- regular iterator.
elif isinstance(message, Iterable):
# Work around https://github.com/python/mypy/issues/6227
message = cast(Iterable[Data], message)
iter_message = iter(message)
try:
message_chunk = next(iter_message)
except StopIteration:
return
opcode, data = prepare_data(message_chunk)
self._fragmented_message_waiter = asyncio.Future()
try:
# First fragment.
await self.write_frame(False, opcode, data)
# Other fragments.
for message_chunk in iter_message:
confirm_opcode, data = prepare_data(message_chunk)
if confirm_opcode != opcode:
raise TypeError("data contains inconsistent types")
await self.write_frame(False, OP_CONT, data)
# Final fragment.
await self.write_frame(True, OP_CONT, b"")
except (Exception, asyncio.CancelledError):
# We're half-way through a fragmented message and we can't
# complete it. This makes the connection unusable.
self.fail_connection(1011)
raise
finally:
self._fragmented_message_waiter.set_result(None)
self._fragmented_message_waiter = None
# Fragmented message -- asynchronous iterator
elif isinstance(message, AsyncIterable):
# aiter_message = aiter(message) without aiter
# https://github.com/python/mypy/issues/5738
aiter_message = type(message).__aiter__(message) # type: ignore
try:
# message_chunk = anext(aiter_message) without anext
# https://github.com/python/mypy/issues/5738
message_chunk = await type(aiter_message).__anext__( # type: ignore
aiter_message
)
except StopAsyncIteration:
return
opcode, data = prepare_data(message_chunk)
self._fragmented_message_waiter = asyncio.Future()
try:
# First fragment.
await self.write_frame(False, opcode, data)
# Other fragments.
# https://github.com/python/mypy/issues/5738
# coverage reports this code as not covered, but it is
# exercised by tests - changing it breaks the tests!
async for message_chunk in aiter_message: # type: ignore # pragma: no cover # noqa
confirm_opcode, data = prepare_data(message_chunk)
if confirm_opcode != opcode:
raise TypeError("data contains inconsistent types")
await self.write_frame(False, OP_CONT, data)
# Final fragment.
await self.write_frame(True, OP_CONT, b"")
except (Exception, asyncio.CancelledError):
# We're half-way through a fragmented message and we can't
# complete it. This makes the connection unusable.
self.fail_connection(1011)
raise
finally:
self._fragmented_message_waiter.set_result(None)
self._fragmented_message_waiter = None
else:
raise TypeError("data must be str, bytes-like, or iterable")
async def close(self, code: int = 1000, reason: str = "") -> None:
"""
Perform the closing handshake.
:meth:`close` waits for the other end to complete the handshake and
for the TCP connection to terminate. As a consequence, there's no need
to await :meth:`wait_closed` after :meth:`close`.
:meth:`close` is idempotent: it doesn't do anything once the
connection is closed.
Wrapping :func:`close` in :func:`~asyncio.create_task` is safe, given
that errors during connection termination aren't particularly useful.
Canceling :meth:`close` is discouraged. If it takes too long, you can
set a shorter ``close_timeout``. If you don't want to wait, let the
Python process exit, then the OS will take care of closing the TCP
connection.
Args:
code: WebSocket close code.
reason: WebSocket close reason.
"""
try:
await asyncio.wait_for(
self.write_close_frame(Close(code, reason)),
self.close_timeout,
**loop_if_py_lt_38(self.loop),
)
except asyncio.TimeoutError:
# If the close frame cannot be sent because the send buffers
# are full, the closing handshake won't complete anyway.
# Fail the connection to shut down faster.
self.fail_connection()
# If no close frame is received within the timeout, wait_for() cancels
# the data transfer task and raises TimeoutError.
# If close() is called multiple times concurrently and one of these
# calls hits the timeout, the data transfer task will be canceled.
# Other calls will receive a CancelledError here.
try:
# If close() is canceled during the wait, self.transfer_data_task
# is canceled before the timeout elapses.
await asyncio.wait_for(
self.transfer_data_task,
self.close_timeout,
**loop_if_py_lt_38(self.loop),
)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
# Wait for the close connection task to close the TCP connection.
await asyncio.shield(self.close_connection_task)
async def wait_closed(self) -> None:
"""
Wait until the connection is closed.
This coroutine is identical to the :attr:`closed` attribute, except it
can be awaited.
This can make it easier to detect connection termination, regardless
of its cause, in tasks that interact with the WebSocket connection.
"""
await asyncio.shield(self.connection_lost_waiter)
async def ping(self, data: Optional[Data] = None) -> Awaitable[None]:
"""
Send a Ping_.
.. _Ping: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.5.2
A ping may serve as a keepalive or as a check that the remote endpoint
received all messages up to this point
Canceling :meth:`ping` is discouraged. If :meth:`ping` doesn't return
immediately, it means the write buffer is full. If you don't want to
wait, you should close the connection.
Canceling the :class:`~asyncio.Future` returned by :meth:`ping` has no
effect.
Args:
data (Optional[Data]): payload of the ping; a string will be
encoded to UTF-8; or :obj:`None` to generate a payload
containing four random bytes.
Returns:
~asyncio.Future: A future that will be completed when the
corresponding pong is received. You can ignore it if you
don't intend to wait.
::
pong_waiter = await ws.ping()
await pong_waiter # only if you want to wait for the pong
Raises:
ConnectionClosed: when the connection is closed.
RuntimeError: if another ping was sent with the same data and
the corresponding pong wasn't received yet.
"""
await self.ensure_open()
if data is not None:
data = prepare_ctrl(data)
# Protect against duplicates if a payload is explicitly set.
if data in self.pings:
raise RuntimeError("already waiting for a pong with the same data")
# Generate a unique random payload otherwise.
while data is None or data in self.pings:
data = struct.pack("!I", random.getrandbits(32))
self.pings[data] = self.loop.create_future()
await self.write_frame(True, OP_PING, data)
return asyncio.shield(self.pings[data])
async def pong(self, data: Data = b"") -> None:
"""
Send a Pong_.
.. _Pong: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.5.3
An unsolicited pong may serve as a unidirectional heartbeat.
Canceling :meth:`pong` is discouraged. If :meth:`pong` doesn't return
immediately, it means the write buffer is full. If you don't want to
wait, you should close the connection.
Args:
data (Data): payload of the pong; a string will be encoded to
UTF-8.
Raises:
ConnectionClosed: when the connection is closed.
"""
await self.ensure_open()
data = prepare_ctrl(data)
await self.write_frame(True, OP_PONG, data)
# Private methods - no guarantees.
def connection_closed_exc(self) -> ConnectionClosed:
exc: ConnectionClosed
if (
self.close_rcvd is not None
and self.close_rcvd.code in OK_CLOSE_CODES
and self.close_sent is not None
and self.close_sent.code in OK_CLOSE_CODES
):
exc = ConnectionClosedOK(
self.close_rcvd,
self.close_sent,
self.close_rcvd_then_sent,
)
else:
exc = ConnectionClosedError(
self.close_rcvd,
self.close_sent,
self.close_rcvd_then_sent,
)
# Chain to the exception that terminated data transfer, if any.
exc.__cause__ = self.transfer_data_exc
return exc
async def ensure_open(self) -> None:
"""
Check that the WebSocket connection is open.
Raise :exc:`~websockets.exceptions.ConnectionClosed` if it isn't.
"""
# Handle cases from most common to least common for performance.
if self.state is State.OPEN:
# If self.transfer_data_task exited without a closing handshake,
# self.close_connection_task may be closing the connection, going
# straight from OPEN to CLOSED.
if self.transfer_data_task.done():
await asyncio.shield(self.close_connection_task)
raise self.connection_closed_exc()
else:
return
if self.state is State.CLOSED:
raise self.connection_closed_exc()
if self.state is State.CLOSING:
# If we started the closing handshake, wait for its completion to
# get the proper close code and reason. self.close_connection_task
# will complete within 4 or 5 * close_timeout after close(). The
# CLOSING state also occurs when failing the connection. In that
# case self.close_connection_task will complete even faster.
await asyncio.shield(self.close_connection_task)
raise self.connection_closed_exc()
# Control may only reach this point in buggy third-party subclasses.
assert self.state is State.CONNECTING
raise InvalidState("WebSocket connection isn't established yet")
async def transfer_data(self) -> None:
"""
Read incoming messages and put them in a queue.
This coroutine runs in a task until the closing handshake is started.
"""
try:
while True:
message = await self.read_message()
# Exit the loop when receiving a close frame.
if message is None:
break
# Wait until there's room in the queue (if necessary).
if self.max_queue is not None:
while len(self.messages) >= self.max_queue:
self._put_message_waiter = self.loop.create_future()
try:
await asyncio.shield(self._put_message_waiter)
finally:
self._put_message_waiter = None
# Put the message in the queue.
self.messages.append(message)
# Notify recv().
if self._pop_message_waiter is not None:
self._pop_message_waiter.set_result(None)
self._pop_message_waiter = None
except asyncio.CancelledError as exc:
self.transfer_data_exc = exc
# If fail_connection() cancels this task, avoid logging the error
# twice and failing the connection again.
raise
except ProtocolError as exc:
self.transfer_data_exc = exc
self.fail_connection(1002)
except (ConnectionError, TimeoutError, EOFError, ssl.SSLError) as exc:
# Reading data with self.reader.readexactly may raise:
# - most subclasses of ConnectionError if the TCP connection
# breaks, is reset, or is aborted;
# - TimeoutError if the TCP connection times out;
# - IncompleteReadError, a subclass of EOFError, if fewer
# bytes are available than requested;
# - ssl.SSLError if the other side infringes the TLS protocol.
self.transfer_data_exc = exc
self.fail_connection(1006)
except UnicodeDecodeError as exc:
self.transfer_data_exc = exc
self.fail_connection(1007)
except PayloadTooBig as exc:
self.transfer_data_exc = exc
self.fail_connection(1009)
except Exception as exc:
# This shouldn't happen often because exceptions expected under
# regular circumstances are handled above. If it does, consider
# catching and handling more exceptions.
self.logger.error("data transfer failed", exc_info=True)
self.transfer_data_exc = exc
self.fail_connection(1011)
async def read_message(self) -> Optional[Data]:
"""
Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return :obj:`None` when the closing handshake is started.
"""
frame = await self.read_data_frame(max_size=self.max_size)
# A close frame was received.
if frame is None:
return None
if frame.opcode == OP_TEXT:
text = True
elif frame.opcode == OP_BINARY:
text = False
else: # frame.opcode == OP_CONT
raise ProtocolError("unexpected opcode")
# Shortcut for the common case - no fragmentation
if frame.fin:
return frame.data.decode("utf-8") if text else frame.data
# 5.4. Fragmentation
chunks: List[Data] = []
max_size = self.max_size
if text:
decoder_factory = codecs.getincrementaldecoder("utf-8")
decoder = decoder_factory(errors="strict")
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(decoder.decode(frame.data, frame.fin))
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(decoder.decode(frame.data, frame.fin))
assert isinstance(max_size, int)
max_size -= len(frame.data)
else:
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(frame.data)
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(frame.data)
assert isinstance(max_size, int)
max_size -= len(frame.data)
append(frame)
while not frame.fin:
frame = await self.read_data_frame(max_size=max_size)
if frame is None:
raise ProtocolError("incomplete fragmented message")
if frame.opcode != OP_CONT:
raise ProtocolError("unexpected opcode")
append(frame)
return ("" if text else b"").join(chunks)
async def read_data_frame(self, max_size: Optional[int]) -> Optional[Frame]:
"""
Read a single data frame from the connection.
Process control frames received before the next data frame.
Return :obj:`None` if a close frame is encountered before any data frame.
"""
# 6.2. Receiving Data
while True:
frame = await self.read_frame(max_size)
# 5.5. Control Frames
if frame.opcode == OP_CLOSE:
# 7.1.5. The WebSocket Connection Close Code
# 7.1.6. The WebSocket Connection Close Reason
self.close_rcvd = Close.parse(frame.data)
if self.close_sent is not None:
self.close_rcvd_then_sent = False
try:
# Echo the original data instead of re-serializing it with
# Close.serialize() because that fails when the close frame
# is empty and Close.parse() synthetizes a 1005 close code.
await self.write_close_frame(self.close_rcvd, frame.data)
except ConnectionClosed:
# Connection closed before we could echo the close frame.
pass
return None
elif frame.opcode == OP_PING:
# Answer pings, unless connection is CLOSING.
if self.state is State.OPEN:
try:
await self.pong(frame.data)
except ConnectionClosed:
# Connection closed while draining write buffer.
pass
elif frame.opcode == OP_PONG:
if frame.data in self.pings:
# Sending a pong for only the most recent ping is legal.
# Acknowledge all previous pings too in that case.
ping_id = None
ping_ids = []
for ping_id, ping in self.pings.items():
ping_ids.append(ping_id)
if not ping.done():
ping.set_result(None)
if ping_id == frame.data:
break
else: # pragma: no cover
assert False, "ping_id is in self.pings"
# Remove acknowledged pings from self.pings.
for ping_id in ping_ids:
del self.pings[ping_id]
# 5.6. Data Frames
else:
return frame
async def read_frame(self, max_size: Optional[int]) -> Frame:
"""
Read a single frame from the connection.
"""
frame = await Frame.read(
self.reader.readexactly,
mask=not self.is_client,
max_size=max_size,
extensions=self.extensions,
)
if self.debug:
self.logger.debug("< %s", frame)
return frame
def write_frame_sync(self, fin: bool, opcode: int, data: bytes) -> None:
frame = Frame(fin, Opcode(opcode), data)
if self.debug:
self.logger.debug("> %s", frame)
frame.write(
self.transport.write,
mask=self.is_client,
extensions=self.extensions,
)
async def drain(self) -> None:
try:
# drain() cannot be called concurrently by multiple coroutines:
# http://bugs.python.org/issue29930. Remove this lock when no
# version of Python where this bugs exists is supported anymore.
async with self._drain_lock:
# Handle flow control automatically.
await self._drain()
except ConnectionError:
# Terminate the connection if the socket died.
self.fail_connection()
# Wait until the connection is closed to raise ConnectionClosed
# with the correct code and reason.
await self.ensure_open()
async def write_frame(
self, fin: bool, opcode: int, data: bytes, *, _state: int = State.OPEN
) -> None:
# Defensive assertion for protocol compliance.
if self.state is not _state: # pragma: no cover
raise InvalidState(
f"Cannot write to a WebSocket in the {self.state.name} state"
)
self.write_frame_sync(fin, opcode, data)
await self.drain()
async def write_close_frame(
self, close: Close, data: Optional[bytes] = None
) -> None:
"""
Write a close frame if and only if the connection state is OPEN.
This dedicated coroutine must be used for writing close frames to
ensure that at most one close frame is sent on a given connection.
"""
# Test and set the connection state before sending the close frame to
# avoid sending two frames in case of concurrent calls.
if self.state is State.OPEN:
# 7.1.3. The WebSocket Closing Handshake is Started
self.state = State.CLOSING
if self.debug:
self.logger.debug("= connection is CLOSING")
self.close_sent = close
if self.close_rcvd is not None:
self.close_rcvd_then_sent = True
if data is None:
data = close.serialize()
# 7.1.2. Start the WebSocket Closing Handshake
await self.write_frame(True, OP_CLOSE, data, _state=State.CLOSING)
async def keepalive_ping(self) -> None:
"""
Send a Ping frame and wait for a Pong frame at regular intervals.
This coroutine exits when the connection terminates and one of the
following happens:
- :meth:`ping` raises :exc:`ConnectionClosed`, or
- :meth:`close_connection` cancels :attr:`keepalive_ping_task`.
"""
if self.ping_interval is None:
return
try:
while True:
await asyncio.sleep(
self.ping_interval,
**loop_if_py_lt_38(self.loop),
)
# ping() raises CancelledError if the connection is closed,
# when close_connection() cancels self.keepalive_ping_task.
# ping() raises ConnectionClosed if the connection is lost,
# when connection_lost() calls abort_pings().
self.logger.debug("% sending keepalive ping")
pong_waiter = await self.ping()
if self.ping_timeout is not None:
try:
await asyncio.wait_for(
pong_waiter,
self.ping_timeout,
**loop_if_py_lt_38(self.loop),
)
self.logger.debug("% received keepalive pong")
except asyncio.TimeoutError:
if self.debug:
self.logger.debug("! timed out waiting for keepalive pong")
self.fail_connection(1011, "keepalive ping timeout")
break
# Remove this branch when dropping support for Python < 3.8
# because CancelledError no longer inherits Exception.
except asyncio.CancelledError:
raise
except ConnectionClosed:
pass
except Exception:
self.logger.error("keepalive ping failed", exc_info=True)
async def close_connection(self) -> None:
"""
7.1.1. Close the WebSocket Connection
When the opening handshake succeeds, :meth:`connection_open` starts
this coroutine in a task. It waits for the data transfer phase to
complete then it closes the TCP connection cleanly.
When the opening handshake fails, :meth:`fail_connection` does the
same. There's no data transfer phase in that case.
"""
try:
# Wait for the data transfer phase to complete.
if hasattr(self, "transfer_data_task"):
try:
await self.transfer_data_task
except asyncio.CancelledError:
pass
# Cancel the keepalive ping task.
if hasattr(self, "keepalive_ping_task"):
self.keepalive_ping_task.cancel()
# A client should wait for a TCP close from the server.
if self.is_client and hasattr(self, "transfer_data_task"):
if await self.wait_for_connection_lost():
# Coverage marks this line as a partially executed branch.
# I supect a bug in coverage. Ignore it for now.
return # pragma: no cover
if self.debug:
self.logger.debug("! timed out waiting for TCP close")
# Half-close the TCP connection if possible (when there's no TLS).
if self.transport.can_write_eof():
if self.debug:
self.logger.debug("x half-closing TCP connection")
# write_eof() doesn't document which exceptions it raises.
# "[Errno 107] Transport endpoint is not connected" happens
# but it isn't completely clear under which circumstances.
# uvloop can raise RuntimeError here.
try:
self.transport.write_eof()
except (OSError, RuntimeError): # pragma: no cover
pass
if await self.wait_for_connection_lost():
# Coverage marks this line as a partially executed branch.
# I supect a bug in coverage. Ignore it for now.
return # pragma: no cover
if self.debug:
self.logger.debug("! timed out waiting for TCP close")
finally:
# The try/finally ensures that the transport never remains open,
# even if this coroutine is canceled (for example).
await self.close_transport()
async def close_transport(self) -> None:
"""
Close the TCP connection.
"""
# If connection_lost() was called, the TCP connection is closed.
# However, if TLS is enabled, the transport still needs closing.
# Else asyncio complains: ResourceWarning: unclosed transport.
if self.connection_lost_waiter.done() and self.transport.is_closing():
return
# Close the TCP connection. Buffers are flushed asynchronously.
if self.debug:
self.logger.debug("x closing TCP connection")
self.transport.close()
if await self.wait_for_connection_lost():
return
if self.debug:
self.logger.debug("! timed out waiting for TCP close")
# Abort the TCP connection. Buffers are discarded.
if self.debug:
self.logger.debug("x aborting TCP connection")
self.transport.abort()
# connection_lost() is called quickly after aborting.
# Coverage marks this line as a partially executed branch.
# I supect a bug in coverage. Ignore it for now.
await self.wait_for_connection_lost() # pragma: no cover
async def wait_for_connection_lost(self) -> bool:
"""
Wait until the TCP connection is closed or ``self.close_timeout`` elapses.
Return :obj:`True` if the connection is closed and :obj:`False`
otherwise.
"""
if not self.connection_lost_waiter.done():
try:
await asyncio.wait_for(
asyncio.shield(self.connection_lost_waiter),
self.close_timeout,
**loop_if_py_lt_38(self.loop),
)
except asyncio.TimeoutError:
pass
# Re-check self.connection_lost_waiter.done() synchronously because
# connection_lost() could run between the moment the timeout occurs
# and the moment this coroutine resumes running.
return self.connection_lost_waiter.done()
def fail_connection(self, code: int = 1006, reason: str = "") -> None:
"""
7.1.7. Fail the WebSocket Connection
This requires:
1. Stopping all processing of incoming data, which means cancelling
:attr:`transfer_data_task`. The close code will be 1006 unless a
close frame was received earlier.
2. Sending a close frame with an appropriate code if the opening
handshake succeeded and the other side is likely to process it.
3. Closing the connection. :meth:`close_connection` takes care of
this once :attr:`transfer_data_task` exits after being canceled.
(The specification describes these steps in the opposite order.)
"""
if self.debug:
self.logger.debug("! failing connection with code %d", code)
# Cancel transfer_data_task if the opening handshake succeeded.
# cancel() is idempotent and ignored if the task is done already.
if hasattr(self, "transfer_data_task"):
self.transfer_data_task.cancel()
# Send a close frame when the state is OPEN (a close frame was already
# sent if it's CLOSING), except when failing the connection because of
# an error reading from or writing to the network.
# Don't send a close frame if the connection is broken.
if code != 1006 and self.state is State.OPEN:
close = Close(code, reason)
# Write the close frame without draining the write buffer.
# Keeping fail_connection() synchronous guarantees it can't
# get stuck and simplifies the implementation of the callers.
# Not drainig the write buffer is acceptable in this context.
# This duplicates a few lines of code from write_close_frame().
self.state = State.CLOSING
if self.debug:
self.logger.debug("= connection is CLOSING")
# If self.close_rcvd was set, the connection state would be
# CLOSING. Therefore self.close_rcvd isn't set and we don't
# have to set self.close_rcvd_then_sent.
assert self.close_rcvd is None
self.close_sent = close
self.write_frame_sync(True, OP_CLOSE, close.serialize())
# Start close_connection_task if the opening handshake didn't succeed.
if not hasattr(self, "close_connection_task"):
self.close_connection_task = self.loop.create_task(self.close_connection())
def abort_pings(self) -> None:
"""
Raise ConnectionClosed in pending keepalive pings.
They'll never receive a pong once the connection is closed.
"""
assert self.state is State.CLOSED
exc = self.connection_closed_exc()
for ping in self.pings.values():
ping.set_exception(exc)
# If the exception is never retrieved, it will be logged when ping
# is garbage-collected. This is confusing for users.
# Given that ping is done (with an exception), canceling it does
# nothing, but it prevents logging the exception.
ping.cancel()
# asyncio.Protocol methods
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Configure write buffer limits.
The high-water limit is defined by ``self.write_limit``.
The low-water limit currently defaults to ``self.write_limit // 4`` in
:meth:`~asyncio.WriteTransport.set_write_buffer_limits`, which should
be all right for reasonable use cases of this library.
This is the earliest point where we can get hold of the transport,
which means it's the best point for configuring it.
"""
transport = cast(asyncio.Transport, transport)
transport.set_write_buffer_limits(self.write_limit)
self.transport = transport
# Copied from asyncio.StreamReaderProtocol
self.reader.set_transport(transport)
def connection_lost(self, exc: Optional[Exception]) -> None:
"""
7.1.4. The WebSocket Connection is Closed.
"""
self.state = State.CLOSED
self.logger.debug("= connection is CLOSED")
self.abort_pings()
# If self.connection_lost_waiter isn't pending, that's a bug, because:
# - it's set only here in connection_lost() which is called only once;
# - it must never be canceled.
self.connection_lost_waiter.set_result(None)
if True: # pragma: no cover
# Copied from asyncio.StreamReaderProtocol
if self.reader is not None:
if exc is None:
self.reader.feed_eof()
else:
self.reader.set_exception(exc)
# Copied from asyncio.FlowControlMixin
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
def pause_writing(self) -> None: # pragma: no cover
assert not self._paused
self._paused = True
def resume_writing(self) -> None: # pragma: no cover
assert self._paused
self._paused = False
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def data_received(self, data: bytes) -> None:
self.reader.feed_data(data)
def eof_received(self) -> None:
"""
Close the transport after receiving EOF.
The WebSocket protocol has its own closing handshake: endpoints close
the TCP or TLS connection after sending and receiving a close frame.
As a consequence, they never need to write after receiving EOF, so
there's no reason to keep the transport open by returning :obj:`True`.
Besides, that doesn't work on TLS connections.
"""
self.reader.feed_eof()
def broadcast(websockets: Iterable[WebSocketCommonProtocol], message: Data) -> None:
"""
Broadcast a message to several WebSocket connections.
A string (:class:`str`) is sent as a Text_ frame. A bytestring or
bytes-like object (:class:`bytes`, :class:`bytearray`, or
:class:`memoryview`) is sent as a Binary_ frame.
.. _Text: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
.. _Binary: https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
:func:`broadcast` pushes the message synchronously to all connections even
if their write buffers are overflowing. There's no backpressure.
:func:`broadcast` skips silently connections that aren't open in order to
avoid errors on connections where the closing handshake is in progress.
If you broadcast messages faster than a connection can handle them,
messages will pile up in its write buffer until the connection times out.
Keep low values for ``ping_interval`` and ``ping_timeout`` to prevent
excessive memory usage by slow connections when you use :func:`broadcast`.
Unlike :meth:`~websockets.server.WebSocketServerProtocol.send`,
:func:`broadcast` doesn't support sending fragmented messages. Indeed,
fragmentation is useful for sending large messages without buffering
them in memory, while :func:`broadcast` buffers one copy per connection
as fast as possible.
Args:
websockets (Iterable[WebSocketCommonProtocol]): WebSocket connections
to which the message will be sent.
message (Data): message to send.
Raises:
RuntimeError: if a connection is busy sending a fragmented message.
TypeError: if ``message`` doesn't have a supported type.
"""
if not isinstance(message, (str, bytes, bytearray, memoryview)):
raise TypeError("data must be str or bytes-like")
opcode, data = prepare_data(message)
for websocket in websockets:
if websocket.state is not State.OPEN:
continue
if websocket._fragmented_message_waiter is not None:
raise RuntimeError("busy sending a fragmented message")
websocket.write_frame_sync(True, opcode, data)
| 61,825 | Python | 37.496887 | 101 | 0.602087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/websockets/legacy/auth.py | from __future__ import annotations
import functools
import hmac
import http
from typing import Any, Awaitable, Callable, Iterable, Optional, Tuple, Union, cast
from ..datastructures import Headers
from ..exceptions import InvalidHeader
from ..headers import build_www_authenticate_basic, parse_authorization_basic
from .server import HTTPResponse, WebSocketServerProtocol
__all__ = ["BasicAuthWebSocketServerProtocol", "basic_auth_protocol_factory"]
Credentials = Tuple[str, str]
def is_credentials(value: Any) -> bool:
try:
username, password = value
except (TypeError, ValueError):
return False
else:
return isinstance(username, str) and isinstance(password, str)
class BasicAuthWebSocketServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that enforces HTTP Basic Auth.
"""
realm: str = ""
"""
Scope of protection.
If provided, it should contain only ASCII characters because the
encoding of non-ASCII characters is undefined.
"""
username: Optional[str] = None
"""Username of the authenticated user."""
def __init__(
self,
*args: Any,
realm: Optional[str] = None,
check_credentials: Optional[Callable[[str, str], Awaitable[bool]]] = None,
**kwargs: Any,
) -> None:
if realm is not None:
self.realm = realm # shadow class attribute
self._check_credentials = check_credentials
super().__init__(*args, **kwargs)
async def check_credentials(self, username: str, password: str) -> bool:
"""
Check whether credentials are authorized.
This coroutine may be overridden in a subclass, for example to
authenticate against a database or an external service.
Args:
username: HTTP Basic Auth username.
password: HTTP Basic Auth password.
Returns:
bool: :obj:`True` if the handshake should continue;
:obj:`False` if it should fail with a HTTP 401 error.
"""
if self._check_credentials is not None:
return await self._check_credentials(username, password)
return False
async def process_request(
self,
path: str,
request_headers: Headers,
) -> Optional[HTTPResponse]:
"""
Check HTTP Basic Auth and return a HTTP 401 response if needed.
"""
try:
authorization = request_headers["Authorization"]
except KeyError:
return (
http.HTTPStatus.UNAUTHORIZED,
[("WWW-Authenticate", build_www_authenticate_basic(self.realm))],
b"Missing credentials\n",
)
try:
username, password = parse_authorization_basic(authorization)
except InvalidHeader:
return (
http.HTTPStatus.UNAUTHORIZED,
[("WWW-Authenticate", build_www_authenticate_basic(self.realm))],
b"Unsupported credentials\n",
)
if not await self.check_credentials(username, password):
return (
http.HTTPStatus.UNAUTHORIZED,
[("WWW-Authenticate", build_www_authenticate_basic(self.realm))],
b"Invalid credentials\n",
)
self.username = username
return await super().process_request(path, request_headers)
def basic_auth_protocol_factory(
realm: Optional[str] = None,
credentials: Optional[Union[Credentials, Iterable[Credentials]]] = None,
check_credentials: Optional[Callable[[str, str], Awaitable[bool]]] = None,
create_protocol: Optional[Callable[[Any], BasicAuthWebSocketServerProtocol]] = None,
) -> Callable[[Any], BasicAuthWebSocketServerProtocol]:
"""
Protocol factory that enforces HTTP Basic Auth.
:func:`basic_auth_protocol_factory` is designed to integrate with
:func:`~websockets.server.serve` like this::
websockets.serve(
...,
create_protocol=websockets.basic_auth_protocol_factory(
realm="my dev server",
credentials=("hello", "iloveyou"),
)
)
Args:
realm: indicates the scope of protection. It should contain only ASCII
characters because the encoding of non-ASCII characters is
undefined. Refer to section 2.2 of :rfc:`7235` for details.
credentials: defines hard coded authorized credentials. It can be a
``(username, password)`` pair or a list of such pairs.
check_credentials: defines a coroutine that verifies credentials.
This coroutine receives ``username`` and ``password`` arguments
and returns a :class:`bool`. One of ``credentials`` or
``check_credentials`` must be provided but not both.
create_protocol: factory that creates the protocol. By default, this
is :class:`BasicAuthWebSocketServerProtocol`. It can be replaced
by a subclass.
Raises:
TypeError: if the ``credentials`` or ``check_credentials`` argument is
wrong.
"""
if (credentials is None) == (check_credentials is None):
raise TypeError("provide either credentials or check_credentials")
if credentials is not None:
if is_credentials(credentials):
credentials_list = [cast(Credentials, credentials)]
elif isinstance(credentials, Iterable):
credentials_list = list(credentials)
if not all(is_credentials(item) for item in credentials_list):
raise TypeError(f"invalid credentials argument: {credentials}")
else:
raise TypeError(f"invalid credentials argument: {credentials}")
credentials_dict = dict(credentials_list)
async def check_credentials(username: str, password: str) -> bool:
try:
expected_password = credentials_dict[username]
except KeyError:
return False
return hmac.compare_digest(expected_password, password)
if create_protocol is None:
# Not sure why mypy cannot figure this out.
create_protocol = cast(
Callable[[Any], BasicAuthWebSocketServerProtocol],
BasicAuthWebSocketServerProtocol,
)
return functools.partial(
create_protocol,
realm=realm,
check_credentials=check_credentials,
)
| 6,477 | Python | 33.275132 | 88 | 0.628686 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.